From aed8ec8e2a247d70d2553238caca27098f1858c3 Mon Sep 17 00:00:00 2001 From: Travis Reeder Date: Fri, 7 Jul 2017 16:41:45 -0700 Subject: [PATCH] Remove logrus from fn --- fn/glide.lock | 17 +- fn/glide.yaml | 4 +- fn/start.go | 14 +- fn/update.go | 12 +- .../github.com/aws/aws-sdk-go/CHANGELOG.md | 14 + .../aws-sdk-go/aws/client/default_retryer.go | 4 +- .../github.com/aws/aws-sdk-go/aws/config.go | 2 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 41 +- .../aws/aws-sdk-go/aws/request/retryer.go | 2 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../models/apis/ds/2015-04-16/api-2.json | 123 +- .../models/apis/ds/2015-04-16/docs-2.json | 93 +- .../apis/ds/2015-04-16/paginators-1.json | 9 + .../models/apis/kinesis/2013-12-02/api-2.json | 151 +- .../apis/kinesis/2013-12-02/docs-2.json | 77 +- .../models/apis/kms/2014-11-01/api-2.json | 10 +- .../models/apis/kms/2014-11-01/docs-2.json | 6 + .../models/apis/ssm/2014-11-06/api-2.json | 73 +- .../models/apis/ssm/2014-11-06/docs-2.json | 72 +- .../models/endpoints/endpoints.json | 33 +- .../service/directoryservice/api.go | 569 + .../directoryserviceiface/interface.go | 11 + .../service/directoryservice/errors.go | 7 + .../aws/aws-sdk-go/service/kinesis/api.go | 636 +- .../aws/aws-sdk-go/service/kinesis/errors.go | 43 + .../service/kinesis/kinesisiface/interface.go | 8 + .../aws/aws-sdk-go/service/kms/api.go | 19 + .../aws/aws-sdk-go/service/ssm/api.go | 244 +- .../aws/aws-sdk-go/service/ssm/errors.go | 8 + fn/vendor/github.com/docker/docker/.gitignore | 13 - fn/vendor/github.com/docker/docker/.mailmap | 49 +- fn/vendor/github.com/docker/docker/AUTHORS | 108 +- .../github.com/docker/docker/CHANGELOG.md | 10 +- .../github.com/docker/docker/CONTRIBUTING.md | 4 +- fn/vendor/github.com/docker/docker/Dockerfile | 57 +- .../docker/docker/Dockerfile.aarch64 | 22 +- .../github.com/docker/docker/Dockerfile.armhf | 20 +- .../docker/docker/Dockerfile.ppc64le | 20 +- .../github.com/docker/docker/Dockerfile.s390x | 18 +- .../docker/docker/Dockerfile.simple | 5 +- .../docker/docker/Dockerfile.solaris | 1 - .../docker/docker/Dockerfile.windows | 2 +- .../github.com/docker/docker/MAINTAINERS | 2 +- fn/vendor/github.com/docker/docker/Makefile | 32 +- fn/vendor/github.com/docker/docker/README.md | 316 +- fn/vendor/github.com/docker/docker/VERSION | 2 +- .../github.com/docker/docker/api/README.md | 4 +- .../github.com/docker/docker/api/common.go | 105 +- .../docker/docker/api/common_test.go | 264 - .../docker/docker/api/errors/errors_test.go | 64 + .../api/server/backend/build/backend.go | 90 + .../docker/api/server/backend/build/tag.go | 84 + .../docker/api/server/httputils/errors.go | 41 + .../docker/api/server/httputils/httputils.go | 15 +- .../api/server/httputils/httputils_test.go | 18 + .../server/httputils/httputils_write_json.go | 2 - .../httputils/httputils_write_json_go16.go | 16 - .../api/server/httputils/write_log_stream.go | 2 + .../docker/api/server/middleware/debug.go | 28 +- .../api/server/middleware/debug_test.go | 58 + .../docker/api/server/router/build/backend.go | 17 +- .../docker/api/server/router/build/build.go | 10 +- .../api/server/router/build/build_routes.go | 196 +- .../server/router/checkpoint/checkpoint.go | 6 +- .../api/server/router/container/backend.go | 8 +- .../api/server/router/container/container.go | 8 +- .../router/container/container_routes.go | 47 +- .../api/server/router/container/copy.go | 4 +- .../api/server/router/distribution/backend.go | 14 + .../router/distribution/distribution.go | 31 + .../distribution/distribution_routes.go | 138 + .../docker/api/server/router/experimental.go | 2 +- .../docker/api/server/router/image/backend.go | 6 +- .../docker/api/server/router/image/image.go | 6 +- .../api/server/router/image/image_routes.go | 54 +- .../docker/docker/api/server/router/local.go | 40 +- .../api/server/router/network/backend.go | 4 +- .../api/server/router/network/network.go | 2 +- .../server/router/network/network_routes.go | 37 +- .../api/server/router/plugin/backend.go | 3 +- .../docker/api/server/router/plugin/plugin.go | 6 +- .../api/server/router/plugin/plugin_routes.go | 6 +- .../api/server/router/session/backend.go | 12 + .../api/server/router/session/session.go | 29 + .../server/router/session/session_routes.go | 16 + .../docker/api/server/router/swarm/backend.go | 15 +- .../docker/api/server/router/swarm/cluster.go | 14 +- .../api/server/router/swarm/cluster_routes.go | 86 +- .../docker/api/server/router/swarm/helpers.go | 2 +- .../api/server/router/system/backend.go | 2 +- .../docker/api/server/router/system/system.go | 9 +- .../api/server/router/system/system_routes.go | 8 +- .../api/server/router/volume/backend.go | 4 +- .../docker/api/server/router/volume/volume.go | 2 +- .../api/server/router/volume/volume_routes.go | 2 +- .../docker/docker/api/server/server.go | 3 +- .../github.com/docker/docker/api/swagger.yaml | 400 +- .../docker/api/types/backend/backend.go | 14 +- .../docker/docker/api/types/backend/build.go | 44 + .../docker/docker/api/types/client.go | 20 +- .../docker/docker/api/types/configs.go | 1 + .../docker/api/types/container/config.go | 6 + .../api/types/container/waitcondition.go | 22 + .../docker/docker/api/types/events/events.go | 8 + .../docker/api/types/filters/parse_test.go | 80 +- .../docker/api/types/network/network.go | 6 + .../docker/api/types/registry/registry.go | 21 +- .../docker/docker/api/types/swarm/common.go | 13 + .../docker/docker/api/types/swarm/config.go | 31 + .../docker/api/types/swarm/container.go | 1 + .../docker/docker/api/types/swarm/network.go | 23 +- .../docker/docker/api/types/swarm/node.go | 1 + .../docker/docker/api/types/swarm/runtime.go | 19 + .../docker/api/types/swarm/runtime/gen.go | 3 + .../api/types/swarm/runtime/plugin.pb.go | 712 + .../api/types/swarm/runtime/plugin.proto | 18 + .../docker/docker/api/types/swarm/swarm.go | 22 +- .../docker/docker/api/types/swarm/task.go | 19 +- .../docker/docker/api/types/types.go | 56 +- .../docker/docker/api/types/volume.go | 3 + .../docker/docker/builder/builder.go | 167 +- .../docker/docker/builder/dockerfile/bflag.go | 7 + .../docker/builder/dockerfile/buildargs.go | 44 +- .../builder/dockerfile/buildargs_test.go | 49 +- .../docker/builder/dockerfile/builder.go | 521 +- .../docker/builder/dockerfile/builder_test.go | 13 +- .../docker/builder/dockerfile/builder_unix.go | 4 +- .../builder/dockerfile/builder_windows.go | 7 +- .../builder/dockerfile/clientsession.go | 77 + .../builder/dockerfile/containerbackend.go | 144 + .../docker/docker/builder/dockerfile/copy.go | 444 + .../docker/builder/dockerfile/copy_test.go | 45 + .../docker/builder/dockerfile/copy_unix.go | 36 + .../docker/builder/dockerfile/copy_windows.go | 8 + .../docker/builder/dockerfile/dispatchers.go | 718 +- .../builder/dockerfile/dispatchers_test.go | 640 +- .../builder/dockerfile/dispatchers_unix.go | 6 + .../builder/dockerfile/dispatchers_windows.go | 6 + .../docker/builder/dockerfile/envVarTest | 27 +- .../docker/builder/dockerfile/evaluator.go | 280 +- .../builder/dockerfile/evaluator_test.go | 29 +- .../docker/builder/dockerfile/imagecontext.go | 272 +- .../docker/builder/dockerfile/imageprobe.go | 63 + .../docker/builder/dockerfile/internals.go | 840 +- .../builder/dockerfile/internals_test.go | 87 +- .../builder/dockerfile/internals_unix.go | 6 +- .../builder/dockerfile/internals_windows.go | 35 +- .../dockerfile/internals_windows_test.go | 36 +- .../docker/builder/dockerfile/metrics.go | 44 + .../builder/dockerfile/mockbackend_test.go | 99 +- .../builder/dockerfile/parser/dumper/main.go | 8 +- .../builder/dockerfile/parser/json_test.go | 10 +- .../builder/dockerfile/parser/line_parsers.go | 6 +- .../dockerfile/parser/line_parsers_test.go | 21 +- .../builder/dockerfile/parser/parser.go | 331 +- .../builder/dockerfile/parser/parser_test.go | 123 +- .../testfiles/ADD-COPY-with-JSON/Dockerfile | 2 +- .../testfiles/ADD-COPY-with-JSON/result | 2 +- .../testfiles/brimstone-consuldock/Dockerfile | 2 +- .../testfiles/brimstone-consuldock/result | 2 +- .../testfiles/continue-at-eof/Dockerfile | 3 + .../parser/testfiles/continue-at-eof/result | 2 + .../parser/testfiles/docker/Dockerfile | 2 +- .../dockerfile/parser/testfiles/docker/result | 2 +- .../testfiles/escape-after-comment/Dockerfile | 2 +- .../testfiles/escape-after-comment/result | 2 +- .../testfiles/escape-nonewline/Dockerfile | 2 +- .../parser/testfiles/escape-nonewline/result | 2 +- .../parser/testfiles/escape/Dockerfile | 2 +- .../dockerfile/parser/testfiles/escape/result | 2 +- .../parser/testfiles/escapes/Dockerfile | 2 +- .../parser/testfiles/escapes/result | 2 +- .../kartar-entrypoint-oddities/Dockerfile | 2 +- .../kartar-entrypoint-oddities/result | 2 +- .../lk4d4-the-edge-case-generator/Dockerfile | 2 +- .../lk4d4-the-edge-case-generator/result | 2 +- .../parser/testfiles/nginx/Dockerfile | 2 +- .../dockerfile/parser/testfiles/nginx/result | 2 +- .../parser/testfiles/znc/Dockerfile | 2 +- .../dockerfile/parser/testfiles/znc/result | 2 +- .../docker/builder/dockerfile/shell_parser.go | 262 +- .../builder/dockerfile/shell_parser_test.go | 30 +- .../docker/builder/dockerfile/wordsTest | 7 +- .../docker/docker/builder/dockerignore.go | 48 - .../builder/dockerignore/dockerignore.go | 19 +- .../builder/dockerignore/dockerignore_test.go | 22 +- .../docker/docker/builder/fscache/fscache.go | 602 + .../docker/builder/fscache/fscache_test.go | 131 + .../docker/builder/fscache/naivedriver.go | 28 + .../docker/builder/remotecontext/archive.go | 128 + .../docker/builder/remotecontext/detect.go | 184 + .../detect_test.go} | 46 +- .../docker/builder/remotecontext/filehash.go | 13 +- .../docker/builder/remotecontext/generate.go | 3 + .../docker/builder/{ => remotecontext}/git.go | 11 +- .../builder/remotecontext/git/gitutils.go | 159 + .../remotecontext/git/gitutils_test.go | 238 + .../builder/remotecontext/lazycontext.go | 111 +- .../remotecontext}/mimetype.go | 12 +- .../builder/remotecontext/mimetype_test.go | 16 + .../builder/{ => remotecontext}/remote.go | 73 +- .../{ => remotecontext}/remote_test.go | 78 +- .../docker/builder/remotecontext/tarsum.go | 174 + .../docker/builder/remotecontext/tarsum.pb.go | 525 + .../docker/builder/remotecontext/tarsum.proto | 7 + .../builder/remotecontext/tarsum_test.go | 157 + .../builder/{ => remotecontext}/utils_test.go | 8 +- .../docker/docker/builder/tarsum.go | 159 - .../docker/docker/builder/tarsum_test.go | 265 - .../cli/command/bundlefile/bundlefile.go | 70 - .../cli/command/bundlefile/bundlefile_test.go | 77 - .../docker/cli/command/checkpoint/cmd.go | 24 - .../docker/cli/command/checkpoint/create.go | 58 - .../docker/cli/command/checkpoint/list.go | 54 - .../docker/cli/command/checkpoint/remove.go | 44 - .../docker/docker/cli/command/cli.go | 303 - .../docker/cli/command/commands/commands.go | 121 - .../docker/cli/command/container/attach.go | 129 - .../docker/cli/command/container/cmd.go | 45 - .../docker/cli/command/container/commit.go | 75 - .../docker/docker/cli/command/container/cp.go | 302 - .../docker/cli/command/container/create.go | 224 - .../docker/cli/command/container/diff.go | 46 - .../docker/cli/command/container/exec.go | 205 - .../docker/cli/command/container/exec_test.go | 116 - .../docker/cli/command/container/export.go | 58 - .../docker/cli/command/container/hijack.go | 124 - .../docker/cli/command/container/inspect.go | 46 - .../docker/cli/command/container/kill.go | 56 - .../docker/cli/command/container/list.go | 140 - .../docker/cli/command/container/logs.go | 76 - .../docker/cli/command/container/opts.go | 900 - .../docker/cli/command/container/opts_test.go | 869 - .../docker/cli/command/container/pause.go | 49 - .../docker/cli/command/container/port.go | 78 - .../docker/cli/command/container/prune.go | 78 - .../docker/cli/command/container/ps_test.go | 118 - .../docker/cli/command/container/rename.go | 51 - .../docker/cli/command/container/restart.go | 62 - .../docker/docker/cli/command/container/rm.go | 73 - .../docker/cli/command/container/run.go | 296 - .../docker/cli/command/container/start.go | 179 - .../docker/cli/command/container/stats.go | 242 - .../cli/command/container/stats_helpers.go | 229 - .../cli/command/container/stats_unit_test.go | 20 - .../docker/cli/command/container/stop.go | 67 - .../cli/command/container/testdata/utf16.env | Bin 54 -> 0 bytes .../command/container/testdata/utf16be.env | Bin 54 -> 0 bytes .../cli/command/container/testdata/utf8.env | 3 - .../cli/command/container/testdata/valid.env | 1 - .../command/container/testdata/valid.label | 1 - .../docker/cli/command/container/top.go | 57 - .../docker/cli/command/container/tty.go | 103 - .../docker/cli/command/container/unpause.go | 50 - .../docker/cli/command/container/update.go | 134 - .../docker/cli/command/container/utils.go | 142 - .../docker/cli/command/container/wait.go | 50 - .../docker/docker/cli/command/events_utils.go | 49 - .../cli/command/formatter/checkpoint.go | 52 - .../cli/command/formatter/checkpoint_test.go | 55 - .../docker/cli/command/formatter/container.go | 259 - .../cli/command/formatter/container_test.go | 385 - .../docker/cli/command/formatter/custom.go | 35 - .../cli/command/formatter/custom_test.go | 28 - .../docker/cli/command/formatter/diff.go | 72 - .../docker/cli/command/formatter/diff_test.go | 59 - .../cli/command/formatter/disk_usage.go | 339 - .../cli/command/formatter/disk_usage_test.go | 47 - .../docker/cli/command/formatter/formatter.go | 119 - .../docker/cli/command/formatter/image.go | 272 - .../cli/command/formatter/image_test.go | 327 - .../docker/cli/command/formatter/network.go | 129 - .../cli/command/formatter/network_test.go | 213 - .../docker/cli/command/formatter/node.go | 292 - .../docker/cli/command/formatter/node_test.go | 188 - .../docker/cli/command/formatter/plugin.go | 95 - .../cli/command/formatter/plugin_test.go | 182 - .../docker/cli/command/formatter/reflect.go | 66 - .../cli/command/formatter/reflect_test.go | 66 - .../docker/cli/command/formatter/secret.go | 101 - .../cli/command/formatter/secret_test.go | 63 - .../docker/cli/command/formatter/service.go | 535 - .../cli/command/formatter/service_test.go | 239 - .../docker/cli/command/formatter/stats.go | 220 - .../cli/command/formatter/stats_test.go | 266 - .../docker/cli/command/formatter/task.go | 150 - .../docker/cli/command/formatter/task_test.go | 107 - .../docker/cli/command/formatter/volume.go | 131 - .../cli/command/formatter/volume_test.go | 183 - .../cli/command/idresolver/client_test.go | 28 - .../cli/command/idresolver/idresolver.go | 70 - .../cli/command/idresolver/idresolver_test.go | 144 - .../docker/docker/cli/command/image/build.go | 530 - .../docker/cli/command/image/build/context.go | 275 - .../cli/command/image/build/context_test.go | 383 - .../cli/command/image/build/context_unix.go | 11 - .../command/image/build/context_windows.go | 17 - .../docker/docker/cli/command/image/cmd.go | 33 - .../docker/cli/command/image/history.go | 99 - .../docker/docker/cli/command/image/import.go | 88 - .../docker/cli/command/image/inspect.go | 44 - .../docker/docker/cli/command/image/list.go | 96 - .../docker/docker/cli/command/image/load.go | 77 - .../docker/docker/cli/command/image/prune.go | 95 - .../docker/docker/cli/command/image/pull.go | 85 - .../docker/docker/cli/command/image/push.go | 61 - .../docker/docker/cli/command/image/remove.go | 78 - .../docker/docker/cli/command/image/save.go | 56 - .../docker/docker/cli/command/image/tag.go | 41 - .../docker/docker/cli/command/image/trust.go | 382 - .../docker/cli/command/image/trust_test.go | 57 - .../docker/docker/cli/command/in.go | 75 - .../docker/cli/command/inspect/inspector.go | 198 - .../cli/command/inspect/inspector_test.go | 221 - .../docker/docker/cli/command/network/cmd.go | 28 - .../docker/cli/command/network/connect.go | 63 - .../docker/cli/command/network/create.go | 232 - .../docker/cli/command/network/disconnect.go | 41 - .../docker/cli/command/network/inspect.go | 47 - .../docker/docker/cli/command/network/list.go | 76 - .../docker/cli/command/network/prune.go | 77 - .../docker/cli/command/network/remove.go | 53 - .../docker/cli/command/node/client_test.go | 68 - .../docker/docker/cli/command/node/cmd.go | 57 - .../docker/docker/cli/command/node/demote.go | 36 - .../docker/cli/command/node/demote_test.go | 88 - .../docker/docker/cli/command/node/inspect.go | 72 - .../docker/cli/command/node/inspect_test.go | 123 - .../docker/docker/cli/command/node/list.go | 73 - .../docker/cli/command/node/list_test.go | 162 - .../docker/docker/cli/command/node/opts.go | 24 - .../docker/docker/cli/command/node/promote.go | 36 - .../docker/cli/command/node/promote_test.go | 88 - .../docker/docker/cli/command/node/ps.go | 109 - .../docker/docker/cli/command/node/ps_test.go | 133 - .../docker/docker/cli/command/node/remove.go | 57 - .../docker/cli/command/node/remove_test.go | 47 - .../node-inspect-pretty.manager-leader.golden | 25 - .../node-inspect-pretty.manager.golden | 25 - .../node-inspect-pretty.simple.golden | 23 - .../node/testdata/node-ps.simple.golden | 2 - .../node/testdata/node-ps.with-errors.golden | 4 - .../docker/docker/cli/command/node/update.go | 121 - .../docker/cli/command/node/update_test.go | 172 - .../docker/docker/cli/command/out.go | 69 - .../docker/docker/cli/command/plugin/cmd.go | 32 - .../docker/cli/command/plugin/create.go | 128 - .../docker/cli/command/plugin/disable.go | 36 - .../docker/cli/command/plugin/enable.go | 48 - .../docker/cli/command/plugin/inspect.go | 42 - .../docker/cli/command/plugin/install.go | 168 - .../docker/docker/cli/command/plugin/list.go | 63 - .../docker/docker/cli/command/plugin/push.go | 69 - .../docker/cli/command/plugin/remove.go | 55 - .../docker/docker/cli/command/plugin/set.go | 22 - .../docker/cli/command/plugin/upgrade.go | 90 - .../docker/docker/cli/command/prune/prune.go | 51 - .../docker/docker/cli/command/registry.go | 187 - .../docker/cli/command/registry/login.go | 87 - .../docker/cli/command/registry/logout.go | 77 - .../docker/cli/command/registry/search.go | 126 - .../docker/cli/command/secret/client_test.go | 44 - .../docker/docker/cli/command/secret/cmd.go | 26 - .../docker/cli/command/secret/create.go | 80 - .../docker/cli/command/secret/create_test.go | 126 - .../docker/cli/command/secret/inspect.go | 41 - .../docker/cli/command/secret/inspect_test.go | 149 - .../docker/docker/cli/command/secret/ls.go | 61 - .../docker/cli/command/secret/ls_test.go | 172 - .../docker/cli/command/secret/remove.go | 53 - .../docker/cli/command/secret/remove_test.go | 81 - .../testdata/secret-create-with-name.golden | 1 - ...t-inspect-with-format.json-template.golden | 1 - ...inspect-with-format.simple-template.golden | 1 - ...format.multiple-secrets-with-labels.golden | 26 - ...nspect-without-format.single-secret.golden | 12 - .../secret-list-with-config-format.golden | 2 - .../testdata/secret-list-with-filter.golden | 3 - .../testdata/secret-list-with-format.golden | 2 - .../secret-list-with-quiet-option.golden | 2 - .../secret/testdata/secret-list.golden | 3 - .../docker/docker/cli/command/service/cmd.go | 30 - .../docker/cli/command/service/create.go | 118 - .../docker/cli/command/service/helpers.go | 39 - .../docker/cli/command/service/inspect.go | 94 - .../cli/command/service/inspect_test.go | 140 - .../docker/docker/cli/command/service/list.go | 128 - .../docker/docker/cli/command/service/logs.go | 298 - .../docker/docker/cli/command/service/opts.go | 912 - .../docker/cli/command/service/opts_test.go | 109 - .../docker/cli/command/service/parse.go | 59 - .../cli/command/service/progress/progress.go | 409 - .../docker/docker/cli/command/service/ps.go | 123 - .../docker/cli/command/service/remove.go | 48 - .../docker/cli/command/service/scale.go | 97 - .../docker/cli/command/service/trust.go | 87 - .../docker/cli/command/service/update.go | 1018 - .../docker/cli/command/service/update_test.go | 496 - .../docker/cli/command/stack/client_test.go | 153 - .../docker/docker/cli/command/stack/cmd.go | 35 - .../docker/docker/cli/command/stack/common.go | 60 - .../docker/docker/cli/command/stack/deploy.go | 97 - .../cli/command/stack/deploy_bundlefile.go | 91 - .../cli/command/stack/deploy_composefile.go | 315 - .../docker/cli/command/stack/deploy_test.go | 27 - .../docker/docker/cli/command/stack/list.go | 122 - .../docker/docker/cli/command/stack/opts.go | 51 - .../docker/docker/cli/command/stack/ps.go | 76 - .../docker/docker/cli/command/stack/remove.go | 121 - .../docker/cli/command/stack/remove_test.go | 107 - .../docker/cli/command/stack/services.go | 97 - .../docker/cli/command/swarm/client_test.go | 84 - .../docker/docker/cli/command/swarm/cmd.go | 29 - .../docker/docker/cli/command/swarm/init.go | 96 - .../docker/cli/command/swarm/init_test.go | 130 - .../docker/docker/cli/command/swarm/join.go | 85 - .../docker/cli/command/swarm/join_test.go | 102 - .../docker/cli/command/swarm/join_token.go | 119 - .../cli/command/swarm/join_token_test.go | 216 - .../docker/docker/cli/command/swarm/leave.go | 44 - .../docker/cli/command/swarm/leave_test.go | 52 - .../docker/docker/cli/command/swarm/opts.go | 212 - .../docker/cli/command/swarm/opts_test.go | 110 - .../swarm/testdata/init-init-autolock.golden | 11 - .../command/swarm/testdata/init-init.golden | 4 - .../testdata/jointoken-manager-quiet.golden | 1 - .../testdata/jointoken-manager-rotate.golden | 8 - .../swarm/testdata/jointoken-manager.golden | 6 - .../testdata/jointoken-worker-quiet.golden | 1 - .../swarm/testdata/jointoken-worker.golden | 6 - .../unlockkeys-unlock-key-quiet.golden | 1 - .../unlockkeys-unlock-key-rotate-quiet.golden | 1 - .../unlockkeys-unlock-key-rotate.golden | 9 - .../testdata/unlockkeys-unlock-key.golden | 7 - .../testdata/update-all-flags-quiet.golden | 1 - .../update-autolock-unlock-key.golden | 8 - .../swarm/testdata/update-noargs.golden | 13 - .../docker/docker/cli/command/swarm/unlock.go | 78 - .../docker/cli/command/swarm/unlock_key.go | 86 - .../cli/command/swarm/unlock_key_test.go | 176 - .../docker/cli/command/swarm/unlock_test.go | 101 - .../docker/docker/cli/command/swarm/update.go | 72 - .../docker/cli/command/swarm/update_test.go | 183 - .../docker/docker/cli/command/system/cmd.go | 26 - .../docker/docker/cli/command/system/df.go | 56 - .../docker/cli/command/system/events.go | 140 - .../docker/docker/cli/command/system/info.go | 365 - .../docker/cli/command/system/inspect.go | 216 - .../docker/docker/cli/command/system/prune.go | 96 - .../docker/cli/command/system/version.go | 131 - .../docker/docker/cli/command/task/print.go | 84 - .../docker/docker/cli/command/trust.go | 43 - .../docker/docker/cli/command/utils.go | 119 - .../docker/cli/command/volume/client_test.go | 53 - .../docker/docker/cli/command/volume/cmd.go | 26 - .../docker/cli/command/volume/create.go | 70 - .../docker/cli/command/volume/create_test.go | 142 - .../docker/cli/command/volume/inspect.go | 45 - .../docker/cli/command/volume/inspect_test.go | 151 - .../docker/docker/cli/command/volume/list.go | 73 - .../docker/cli/command/volume/list_test.go | 124 - .../docker/docker/cli/command/volume/prune.go | 78 - .../docker/cli/command/volume/prune_test.go | 133 - .../docker/cli/command/volume/remove.go | 69 - .../docker/cli/command/volume/remove_test.go | 47 - ...e-inspect-with-format.json-template.golden | 1 - ...inspect-with-format.simple-template.golden | 1 - ...-format.multiple-volume-with-labels.golden | 22 - ...nspect-without-format.single-volume.golden | 10 - .../volume-list-with-config-format.golden | 3 - .../testdata/volume-list-with-format.golden | 3 - .../volume-list-without-format.golden | 4 - .../volume/testdata/volume-prune-no.golden | 2 - .../volume/testdata/volume-prune-yes.golden | 7 - .../volume-prune.deletedVolumes.golden | 6 - .../volume/testdata/volume-prune.empty.golden | 1 - .../docker/cli/compose/convert/compose.go | 118 - .../cli/compose/convert/compose_test.go | 134 - .../docker/cli/compose/convert/service.go | 448 - .../cli/compose/convert/service_test.go | 279 - .../docker/cli/compose/convert/volume.go | 87 - .../docker/cli/compose/convert/volume_test.go | 192 - .../compose/interpolation/interpolation.go | 92 - .../interpolation/interpolation_test.go | 57 - .../docker/cli/compose/loader/example1.env | 8 - .../docker/cli/compose/loader/example2.env | 4 - .../cli/compose/loader/full-example.yml | 290 - .../docker/cli/compose/loader/loader.go | 707 - .../docker/cli/compose/loader/loader_test.go | 1135 - .../docker/cli/compose/loader/volume.go | 121 - .../docker/cli/compose/loader/volume_test.go | 148 - .../docker/cli/compose/schema/bindata.go | 283 - .../docker/cli/compose/schema/schema.go | 168 - .../docker/cli/compose/schema/schema_test.go | 52 - .../docker/cli/compose/template/template.go | 100 - .../cli/compose/template/template_test.go | 83 - .../docker/docker/cli/compose/types/types.go | 309 - .../docker/docker/cli/config/config.go | 120 - .../docker/docker/cli/config/config_test.go | 621 - .../docker/docker/cli/config/configdir.go | 25 + .../docker/cli/config/configfile/file.go | 189 - .../docker/cli/config/configfile/file_test.go | 27 - .../cli/config/credentials/credentials.go | 17 - .../cli/config/credentials/default_store.go | 22 - .../credentials/default_store_darwin.go | 3 - .../config/credentials/default_store_linux.go | 3 - .../credentials/default_store_unsupported.go | 5 - .../credentials/default_store_windows.go | 3 - .../cli/config/credentials/file_store.go | 53 - .../cli/config/credentials/file_store_test.go | 139 - .../cli/config/credentials/native_store.go | 144 - .../config/credentials/native_store_test.go | 356 - .../docker/docker/cli/flags/client.go | 13 - .../docker/cli/internal/test/builders/doc.go | 3 - .../docker/cli/internal/test/builders/node.go | 127 - .../cli/internal/test/builders/secret.go | 61 - .../cli/internal/test/builders/service.go | 32 - .../cli/internal/test/builders/swarm.go | 39 - .../docker/cli/internal/test/builders/task.go | 111 - .../cli/internal/test/builders/volume.go | 43 - .../docker/docker/cli/internal/test/cli.go | 71 - .../docker/docker/cli/internal/test/doc.go | 5 - .../github.com/docker/docker/cli/required.go | 69 - .../docker/docker/cli/trust/trust.go | 232 - .../docker/docker/client/build_prune.go | 30 + .../github.com/docker/docker/client/client.go | 69 +- .../docker/docker/client/client_test.go | 304 +- .../docker/docker/client/config_create.go | 25 + .../docker/client/config_create_test.go | 69 + .../docker/docker/client/config_inspect.go | 37 + .../docker/client/config_inspect_test.go | 78 + .../docker/docker/client/config_list.go | 38 + .../docker/docker/client/config_list_test.go | 106 + .../docker/docker/client/config_remove.go | 13 + .../docker/client/config_remove_test.go | 59 + .../docker/docker/client/config_update.go | 21 + .../docker/client/config_update_test.go | 60 + .../docker/docker/client/container_attach.go | 20 + .../docker/docker/client/container_copy.go | 11 +- .../docker/docker/client/container_logs.go | 20 + .../docker/client/container_prune_test.go | 12 +- .../docker/docker/client/container_wait.go | 82 +- .../docker/client/container_wait_test.go | 30 +- .../docker/docker/client/disk_usage_test.go | 55 + .../docker/client/distribution_inspect.go | 35 + .../client/distribution_inspect_test.go | 18 + .../github.com/docker/docker/client/errors.go | 22 + .../github.com/docker/docker/client/hijack.go | 85 +- .../docker/docker/client/image_build.go | 3 + .../docker/docker/client/image_prune_test.go | 12 +- .../docker/docker/client/interface.go | 29 +- .../docker/docker/client/network_inspect.go | 11 +- .../docker/client/network_inspect_test.go | 21 +- .../docker/client/network_prune_test.go | 8 +- .../docker/docker/client/parse_logs.go | 41 + .../docker/docker/client/parse_logs_test.go | 36 + .../github.com/docker/docker/client/ping.go | 18 +- .../docker/docker/client/ping_test.go | 82 + .../docker/docker/client/plugin_upgrade.go | 6 +- .../docker/docker/client/request.go | 77 +- .../docker/docker/client/secret_create.go | 3 + .../docker/client/secret_create_test.go | 16 +- .../docker/docker/client/secret_inspect.go | 3 + .../docker/client/secret_inspect_test.go | 19 +- .../docker/docker/client/secret_list.go | 3 + .../docker/docker/client/secret_list_test.go | 16 +- .../docker/docker/client/secret_remove.go | 3 + .../docker/client/secret_remove_test.go | 16 +- .../docker/docker/client/secret_update.go | 5 +- .../docker/client/secret_update_test.go | 19 +- .../docker/docker/client/service_create.go | 132 +- .../docker/client/service_create_test.go | 153 + .../docker/docker/client/service_update.go | 55 +- .../docker/docker/client/session.go | 19 + .../client/session/filesync/diffcopy.go | 31 + .../client/session/filesync/filesync.go | 183 + .../client/session/filesync/filesync.pb.go | 575 + .../client/session/filesync/filesync.proto | 15 + .../client/session/filesync/filesync_test.go | 71 + .../client/session/filesync/generate.go | 3 + .../client/session/filesync/tarstream.go | 83 + .../docker/docker/client/session/grpc.go | 62 + .../docker/docker/client/session/manager.go | 202 + .../docker/docker/client/session/session.go | 117 + .../client/session/testutil/testutil.go | 70 + .../client/swarm_get_unlock_key_test.go | 60 + .../docker/docker/client/swarm_unlock_test.go | 49 + .../github.com/docker/docker/client/utils.go | 3 +- .../docker/docker/cmd/docker/daemon_none.go | 29 - .../docker/cmd/docker/daemon_none_test.go | 17 - .../docker/cmd/docker/daemon_unit_test.go | 30 - .../docker/docker/cmd/docker/daemon_unix.go | 79 - .../docker/docker/cmd/docker/docker.go | 310 - .../docker/docker/cmd/docker/docker_test.go | 32 - .../docker/cmd/docker/docker_windows.go | 18 - .../docker/docker/cmd/dockerd/config.go | 18 +- .../docker/docker/cmd/dockerd/config_unix.go | 3 +- .../docker/cmd/dockerd/config_unix_test.go | 14 +- .../docker/docker/cmd/dockerd/daemon.go | 248 +- .../docker/docker/cmd/dockerd/daemon_test.go | 88 +- .../docker/docker/cmd/dockerd/daemon_unix.go | 12 - .../docker/cmd/dockerd/daemon_unix_test.go | 65 +- .../docker/docker/cmd/dockerd/docker.go | 19 +- .../common.go => cmd/dockerd/options.go} | 67 +- .../dockerd/options_test.go} | 23 +- .../docker/docker/container/container.go | 229 +- .../docker/container/container_notlinux.go | 4 +- .../docker/container/container_unit_test.go | 32 +- .../docker/docker/container/container_unix.go | 137 +- .../docker/container/container_windows.go | 117 +- .../docker/docker/container/health.go | 7 +- .../docker/docker/container/state.go | 193 +- .../docker/docker/container/state_test.go | 129 +- .../docker/docker/container/stream/attach.go | 68 +- .../docker/docker/container/view.go | 302 + .../docker/docker/container/view_test.go | 106 + .../deb/aarch64/debian-jessie/Dockerfile | 6 +- .../deb/aarch64/debian-stretch/Dockerfile | 6 +- .../contrib/builder/deb/aarch64/generate.sh | 3 +- .../deb/aarch64/ubuntu-trusty/Dockerfile | 6 +- .../deb/aarch64/ubuntu-xenial/Dockerfile | 6 +- .../deb/amd64/debian-jessie/Dockerfile | 6 +- .../deb/amd64/debian-stretch/Dockerfile | 6 +- .../deb/amd64/debian-wheezy/Dockerfile | 6 +- .../contrib/builder/deb/amd64/generate.sh | 3 +- .../deb/amd64/ubuntu-trusty/Dockerfile | 6 +- .../deb/amd64/ubuntu-xenial/Dockerfile | 6 +- .../deb/amd64/ubuntu-yakkety/Dockerfile | 6 +- .../builder/deb/amd64/ubuntu-zesty/Dockerfile | 6 +- .../deb/armhf/debian-jessie/Dockerfile | 6 +- .../contrib/builder/deb/armhf/generate.sh | 3 +- .../deb/armhf/raspbian-jessie/Dockerfile | 6 +- .../deb/armhf/ubuntu-trusty/Dockerfile | 6 +- .../deb/armhf/ubuntu-xenial/Dockerfile | 6 +- .../deb/armhf/ubuntu-yakkety/Dockerfile | 6 +- .../contrib/builder/deb/ppc64le/generate.sh | 7 +- .../deb/ppc64le/ubuntu-trusty/Dockerfile | 6 +- .../deb/ppc64le/ubuntu-xenial/Dockerfile | 6 +- .../deb/ppc64le/ubuntu-yakkety/Dockerfile | 6 +- .../contrib/builder/deb/s390x/generate.sh | 3 +- .../deb/s390x/ubuntu-xenial/Dockerfile | 6 +- .../deb/s390x/ubuntu-yakkety/Dockerfile | 6 +- .../rpm/amd64/amazonlinux-latest/Dockerfile | 4 +- .../builder/rpm/amd64/centos-7/Dockerfile | 4 +- .../builder/rpm/amd64/fedora-24/Dockerfile | 4 +- .../builder/rpm/amd64/fedora-25/Dockerfile | 4 +- .../contrib/builder/rpm/amd64/generate.sh | 5 +- .../rpm/amd64/opensuse-13.2/Dockerfile | 4 +- .../rpm/amd64/oraclelinux-6/Dockerfile | 4 +- .../rpm/amd64/oraclelinux-7/Dockerfile | 4 +- .../builder/rpm/amd64/photon-1.0/Dockerfile | 6 +- .../builder/rpm/armhf/centos-7/Dockerfile | 4 +- .../contrib/builder/rpm/armhf/generate.sh | 3 +- .../builder/rpm/ppc64le/centos-7/Dockerfile | 19 + .../builder/rpm/ppc64le/fedora-24/Dockerfile | 4 +- .../contrib/builder/rpm/ppc64le/generate.sh | 61 +- .../rpm/ppc64le/opensuse-42.1/Dockerfile | 20 + .../docker/contrib/builder/rpm/s390x/build.sh | 10 + .../rpm/s390x/clefos-base-s390x-7/Dockerfile | 19 + .../contrib/builder/rpm/s390x/generate.sh | 144 + .../s390x/opensuse-tumbleweed-1/Dockerfile | 20 + .../docker/contrib/completion/REVIEWERS | 2 - .../docker/contrib/completion/bash/docker | 4597 ---- .../contrib/completion/fish/docker.fish | 409 - .../contrib/completion/powershell/readme.txt | 1 - .../docker/contrib/completion/zsh/REVIEWERS | 2 - .../docker/contrib/completion/zsh/_docker | 3011 --- .../desktop-integration/chromium/Dockerfile | 2 +- .../desktop-integration/gparted/Dockerfile | 2 +- .../contrib/download-frozen-image-v2.sh | 54 +- .../docker/docker/contrib/mkimage-alpine.sh | 11 +- .../docker/docker/contrib/mkimage-busybox.sh | 43 - .../docker/contrib/mkimage-debootstrap.sh | 297 - .../docker/docker/contrib/mkimage-rinse.sh | 123 - .../selinux/docker-engine-selinux/LICENSE | 340 - .../selinux/docker-engine-selinux/Makefile | 16 - .../selinux/docker-engine-selinux/docker.fc | 18 - .../selinux/docker-engine-selinux/docker.if | 461 - .../selinux/docker-engine-selinux/docker.te | 407 - .../docker-engine-selinux/docker_selinux.8.gz | Bin 2847 -> 0 bytes .../Syntaxes/Dockerfile.tmLanguage | 17 + .../docker/contrib/syscall-test/Dockerfile | 3 +- .../docker/contrib/syscall-test/appletalk.c | 12 - .../docker/docker/daemon/apparmor_default.go | 2 +- .../docker/docker/daemon/archive.go | 155 +- .../docker/daemon/archive_tarcopyoptions.go | 15 + .../daemon/archive_tarcopyoptions_unix.go | 25 + .../daemon/archive_tarcopyoptions_windows.go | 12 + .../docker/docker/daemon/archive_unix.go | 35 - .../docker/docker/daemon/archive_windows.go | 5 - .../github.com/docker/docker/daemon/attach.go | 32 +- .../github.com/docker/docker/daemon/build.go | 196 + .../github.com/docker/docker/daemon/cache.go | 6 +- .../docker/docker/daemon/checkpoint.go | 6 +- .../docker/docker/daemon/cluster.go | 2 + .../docker/docker/daemon/cluster/cluster.go | 95 +- .../docker/docker/daemon/cluster/configs.go | 117 + .../cluster/controllers/plugin/controller.go | 261 + .../controllers/plugin/controller_test.go | 390 + .../docker/daemon/cluster/convert/config.go | 61 + .../daemon/cluster/convert/container.go | 60 +- .../docker/daemon/cluster/convert/network.go | 35 +- .../docker/daemon/cluster/convert/node.go | 5 + .../docker/daemon/cluster/convert/service.go | 151 +- .../daemon/cluster/convert/service_test.go | 150 + .../docker/daemon/cluster/convert/swarm.go | 27 + .../docker/daemon/cluster/convert/task.go | 15 +- .../docker/daemon/cluster/executor/backend.go | 10 +- .../cluster/executor/container/adapter.go | 59 +- .../cluster/executor/container/attachment.go | 4 +- .../cluster/executor/container/container.go | 68 +- .../cluster/executor/container/controller.go | 89 +- .../cluster/executor/container/executor.go | 70 +- .../cluster/executor/container/health_test.go | 14 +- .../docker/docker/daemon/cluster/filters.go | 18 + .../docker/daemon/cluster/filters_test.go | 47 +- .../docker/docker/daemon/cluster/helpers.go | 36 + .../docker/daemon/cluster/listen_addr.go | 95 +- .../docker/docker/daemon/cluster/networks.go | 40 +- .../docker/daemon/cluster/noderunner.go | 85 +- .../docker/docker/daemon/cluster/nodes.go | 2 +- .../docker/docker/daemon/cluster/secrets.go | 2 +- .../docker/docker/daemon/cluster/services.go | 293 +- .../docker/docker/daemon/cluster/swarm.go | 46 +- .../docker/docker/daemon/cluster/tasks.go | 20 +- .../github.com/docker/docker/daemon/commit.go | 69 +- .../docker/docker/daemon/config/config.go | 21 +- .../daemon/config/config_common_unix.go | 2 +- .../docker/daemon/config/config_test.go | 13 +- .../docker/daemon/config/config_unix_test.go | 148 +- .../daemon/config/config_windows_test.go | 39 +- .../docker/docker/daemon/configs.go | 23 + .../docker/docker/daemon/configs_linux.go | 7 + .../docker/daemon/configs_unsupported.go | 7 + .../docker/docker/daemon/configs_windows.go | 7 + .../docker/docker/daemon/container.go | 34 +- .../docker/daemon/container_operations.go | 32 +- .../daemon/container_operations_solaris.go | 3 +- .../daemon/container_operations_unix.go | 139 +- .../daemon/container_operations_windows.go | 148 +- .../github.com/docker/docker/daemon/create.go | 65 +- .../docker/docker/daemon/create_unix.go | 6 +- .../github.com/docker/docker/daemon/daemon.go | 407 +- .../docker/docker/daemon/daemon_linux.go | 5 + .../docker/docker/daemon/daemon_solaris.go | 12 +- .../docker/docker/daemon/daemon_test.go | 35 +- .../docker/docker/daemon/daemon_unix.go | 105 +- .../docker/docker/daemon/daemon_unix_test.go | 9 +- .../docker/docker/daemon/daemon_windows.go | 58 +- .../docker/docker/daemon/debugtrap.go | 62 - .../docker/docker/daemon/debugtrap_unix.go | 6 - .../docker/docker/daemon/debugtrap_windows.go | 6 - .../github.com/docker/docker/daemon/delete.go | 47 +- .../docker/docker/daemon/delete_test.go | 114 +- .../docker/docker/daemon/dependency.go | 17 + .../docker/daemon/discovery/discovery_test.go | 4 +- .../docker/docker/daemon/disk_usage.go | 78 +- .../github.com/docker/docker/daemon/events.go | 190 + .../docker/docker/daemon/events/events.go | 15 +- .../docker/daemon/events/events_test.go | 14 +- .../docker/docker/daemon/events/filter.go | 20 + .../docker/docker/daemon/events_test.go | 30 +- .../github.com/docker/docker/daemon/exec.go | 2 +- .../docker/docker/daemon/exec_windows.go | 6 +- .../github.com/docker/docker/daemon/export.go | 5 +- .../docker/docker/daemon/getsize_unix.go | 8 +- .../docker/daemon/graphdriver/aufs/aufs.go | 9 +- .../docker/daemon/graphdriver/btrfs/btrfs.go | 141 +- .../daemon/graphdriver/devmapper/README.md | 4 +- .../graphdriver/devmapper/device_setup.go | 247 + .../daemon/graphdriver/devmapper/deviceset.go | 123 +- .../graphdriver/devmapper/devmapper_test.go | 44 +- .../daemon/graphdriver/devmapper/driver.go | 3 +- .../docker/daemon/graphdriver/driver_linux.go | 2 +- .../daemon/graphdriver/driver_solaris.go | 2 +- .../graphdriver/graphtest/graphtest_unix.go | 52 +- .../graphdriver/graphtest/testutil_unix.go | 118 +- .../docker/daemon/graphdriver/lcow/lcow.go | 539 + .../daemon/graphdriver/overlay/overlay.go | 10 +- .../daemon/graphdriver/overlay2/overlay.go | 63 +- .../daemon/graphdriver/quota/projectquota.go | 2 +- .../graphdriver/register/register_windows.go | 3 +- .../docker/daemon/graphdriver/vfs/driver.go | 39 +- .../daemon/graphdriver/windows/windows.go | 32 +- .../docker/daemon/graphdriver/zfs/zfs.go | 2 +- .../github.com/docker/docker/daemon/health.go | 39 +- .../docker/docker/daemon/health_test.go | 43 +- .../github.com/docker/docker/daemon/image.go | 54 +- .../docker/docker/daemon/image_delete.go | 50 +- .../docker/docker/daemon/image_exporter.go | 16 +- .../docker/docker/daemon/image_history.go | 13 +- .../docker/docker/daemon/image_inspect.go | 26 +- .../docker/docker/daemon/image_pull.go | 51 +- .../docker/docker/daemon/image_push.go | 16 +- .../docker/docker/daemon/image_tag.go | 11 +- .../github.com/docker/docker/daemon/images.go | 53 +- .../github.com/docker/docker/daemon/import.go | 29 +- .../github.com/docker/docker/daemon/info.go | 49 +- .../docker/docker/daemon/info_unix.go | 15 +- .../docker/daemon/initlayer/setup_unix.go | 8 +- .../docker/daemon/initlayer/setup_windows.go | 6 +- .../docker/docker/daemon/inspect.go | 7 +- .../docker/docker/daemon/inspect_solaris.go | 14 - .../docker/docker/daemon/inspect_unix.go | 17 - .../docker/docker/daemon/inspect_windows.go | 15 - .../github.com/docker/docker/daemon/kill.go | 23 +- .../github.com/docker/docker/daemon/list.go | 223 +- .../docker/docker/daemon/list_unix.go | 2 +- .../docker/docker/daemon/list_windows.go | 2 +- .../docker/docker/daemon/logger/adapter.go | 4 +- .../docker/daemon/logger/adapter_test.go | 56 +- .../daemon/logger/awslogs/cloudwatchlogs.go | 230 +- .../logger/awslogs/cloudwatchlogs_test.go | 300 +- .../docker/docker/daemon/logger/factory.go | 17 + .../daemon/logger/gcplogs/gcplogging.go | 51 +- .../docker/daemon/logger/journald/journald.go | 6 +- .../docker/daemon/logger/journald/read.go | 24 +- .../daemon/logger/jsonfilelog/jsonfilelog.go | 55 +- .../jsonfilelog/multireader}/multireader.go | 10 +- .../multireader}/multireader_test.go | 2 +- .../docker/daemon/logger/jsonfilelog/read.go | 53 +- .../logger/loggerutils/rotatefilewriter.go | 21 +- .../docker/docker/daemon/logger/plugin.go | 1 + .../docker/docker/daemon/logger/ring.go | 2 +- .../docker/daemon/logger/syslog/syslog.go | 3 +- .../github.com/docker/docker/daemon/logs.go | 44 +- .../docker/docker/daemon/metrics.go | 142 +- .../docker/docker/daemon/metrics_unix.go | 86 + .../docker/daemon/metrics_unsupported.go | 12 + .../docker/docker/daemon/monitor.go | 80 +- .../github.com/docker/docker/daemon/mounts.go | 37 +- .../github.com/docker/docker/daemon/names.go | 4 - .../docker/docker/daemon/network.go | 46 +- .../docker/docker/daemon/oci_linux.go | 98 +- .../docker/docker/daemon/oci_solaris.go | 3 +- .../docker/docker/daemon/oci_windows.go | 146 +- .../github.com/docker/docker/daemon/prune.go | 231 +- .../github.com/docker/docker/daemon/reload.go | 34 +- .../docker/docker/daemon/reload_test.go | 56 + .../github.com/docker/docker/daemon/rename.go | 10 +- .../docker/docker/daemon/restart.go | 2 +- .../docker/docker/daemon/seccomp_linux.go | 2 +- .../docker/docker/daemon/secrets.go | 13 - .../docker/daemon/secrets_unsupported.go | 2 +- .../docker/docker/daemon/secrets_windows.go | 7 + .../docker/docker/daemon/selinux_linux.go | 6 +- .../github.com/docker/docker/daemon/start.go | 14 +- .../docker/docker/daemon/start_unix.go | 3 +- .../docker/docker/daemon/start_windows.go | 21 +- .../docker/daemon/stats/collector_unix.go | 6 +- .../github.com/docker/docker/daemon/stop.go | 18 +- .../github.com/docker/docker/daemon/update.go | 24 +- .../docker/docker/daemon/volumes.go | 164 +- .../docker/docker/daemon/volumes_unix.go | 23 +- .../docker/docker/daemon/volumes_unix_test.go | 256 + .../docker/docker/daemon/volumes_windows.go | 3 +- .../github.com/docker/docker/daemon/wait.go | 34 +- .../docker/docker/daemon/workdir.go | 3 +- .../docker/docker/distribution/config.go | 26 +- .../docker/docker/distribution/errors.go | 2 +- .../docker/distribution/metadata/metadata.go | 4 +- .../metadata/v1_id_service_test.go | 3 +- .../metadata/v2_metadata_service.go | 2 +- .../metadata/v2_metadata_service_test.go | 3 +- .../docker/docker/distribution/pull_v1.go | 2 +- .../docker/docker/distribution/pull_v2.go | 75 +- .../docker/distribution/pull_v2_windows.go | 20 +- .../docker/docker/distribution/push_v2.go | 21 +- .../docker/distribution/push_v2_test.go | 2 +- .../docker/distribution/registry_unit_test.go | 2 +- .../docker/distribution/utils/progress.go | 2 +- .../docker/distribution/xfer/download.go | 44 +- .../docker/distribution/xfer/download_test.go | 22 +- .../docker/docker/dockerversion/useragent.go | 4 +- .../github.com/docker/docker/docs/README.md | 30 - .../docker/docker/docs/api/v1.21.md | 12 +- .../docker/docker/docs/api/v1.22.md | 12 +- .../docker/docker/docs/api/v1.23.md | 12 +- .../docker/docker/docs/api/v1.24.md | 22 +- .../docker/docker/docs/api/version-history.md | 38 +- .../docker/docker/docs/deprecated.md | 321 - .../docker/docker/docs/extend/EBS_volume.md | 164 - .../docker/docker/docs/extend/config.md | 236 - .../extend/images/authz_additional_info.png | Bin 45916 -> 0 bytes .../docker/docs/extend/images/authz_allow.png | Bin 33505 -> 0 bytes .../docs/extend/images/authz_chunked.png | Bin 33168 -> 0 bytes .../extend/images/authz_connection_hijack.png | Bin 38780 -> 0 bytes .../docker/docs/extend/images/authz_deny.png | Bin 27099 -> 0 bytes .../docker/docker/docs/extend/index.md | 262 - .../docker/docs/extend/legacy_plugins.md | 100 - .../docker/docker/docs/extend/plugin_api.md | 196 - .../docs/extend/plugins_authorization.md | 260 - .../docker/docs/extend/plugins_graphdriver.md | 403 - .../docker/docs/extend/plugins_logging.md | 220 - .../docker/docs/extend/plugins_network.md | 77 - .../docker/docs/extend/plugins_services.md | 186 - .../docker/docs/extend/plugins_volume.md | 360 - .../docker/docker/docs/reference/builder.md | 1838 -- .../docs/reference/commandline/attach.md | 155 - .../docs/reference/commandline/build.md | 550 - .../docker/docs/reference/commandline/cli.md | 308 - .../docs/reference/commandline/commit.md | 117 - .../docs/reference/commandline/container.md | 61 - .../reference/commandline/container_prune.md | 120 - .../docker/docs/reference/commandline/cp.md | 115 - .../docs/reference/commandline/create.md | 260 - .../docs/reference/commandline/deploy.md | 112 - .../docker/docs/reference/commandline/diff.md | 67 - .../docs/reference/commandline/dockerd.md | 1371 -- .../docs/reference/commandline/events.md | 349 - .../docker/docs/reference/commandline/exec.md | 91 - .../docs/reference/commandline/export.md | 48 - .../docs/reference/commandline/history.md | 56 - .../docs/reference/commandline/image.md | 47 - .../docs/reference/commandline/image_prune.md | 165 - .../docs/reference/commandline/images.md | 342 - .../docs/reference/commandline/import.md | 89 - .../docs/reference/commandline/index.md | 184 - .../docker/docs/reference/commandline/info.md | 244 - .../docs/reference/commandline/inspect.md | 122 - .../docker/docs/reference/commandline/kill.md | 35 - .../docker/docs/reference/commandline/load.md | 62 - .../docs/reference/commandline/login.md | 158 - .../docs/reference/commandline/logout.md | 32 - .../docker/docs/reference/commandline/logs.md | 68 - .../docs/reference/commandline/network.md | 51 - .../reference/commandline/network_connect.md | 117 - .../reference/commandline/network_create.md | 224 - .../commandline/network_disconnect.md | 48 - .../reference/commandline/network_inspect.md | 307 - .../docs/reference/commandline/network_ls.md | 250 - .../reference/commandline/network_prune.md | 98 - .../docs/reference/commandline/network_rm.md | 68 - .../docker/docs/reference/commandline/node.md | 42 - .../docs/reference/commandline/node_demote.md | 47 - .../reference/commandline/node_inspect.md | 147 - .../docs/reference/commandline/node_ls.md | 169 - .../reference/commandline/node_promote.md | 45 - .../docs/reference/commandline/node_ps.md | 148 - .../docs/reference/commandline/node_rm.md | 80 - .../docs/reference/commandline/node_update.md | 77 - .../docs/reference/commandline/pause.md | 48 - .../docs/reference/commandline/plugin.md | 44 - .../reference/commandline/plugin_create.md | 65 - .../reference/commandline/plugin_disable.md | 69 - .../reference/commandline/plugin_enable.md | 68 - .../reference/commandline/plugin_inspect.md | 167 - .../reference/commandline/plugin_install.md | 75 - .../docs/reference/commandline/plugin_ls.md | 120 - .../docs/reference/commandline/plugin_push.md | 56 - .../docs/reference/commandline/plugin_rm.md | 61 - .../docs/reference/commandline/plugin_set.md | 167 - .../reference/commandline/plugin_upgrade.md | 88 - .../docker/docs/reference/commandline/port.md | 47 - .../docker/docs/reference/commandline/ps.md | 432 - .../docker/docs/reference/commandline/pull.md | 254 - .../docker/docs/reference/commandline/push.md | 82 - .../docs/reference/commandline/rename.md | 35 - .../docs/reference/commandline/restart.md | 32 - .../docker/docs/reference/commandline/rm.md | 100 - .../docker/docs/reference/commandline/rmi.md | 105 - .../docker/docs/reference/commandline/run.md | 803 - .../docker/docs/reference/commandline/save.md | 62 - .../docs/reference/commandline/search.md | 149 - .../docs/reference/commandline/secret.md | 45 - .../reference/commandline/secret_create.md | 99 - .../reference/commandline/secret_inspect.md | 95 - .../docs/reference/commandline/secret_ls.md | 157 - .../docs/reference/commandline/secret_rm.md | 54 - .../docs/reference/commandline/service.md | 42 - .../reference/commandline/service_create.md | 846 - .../reference/commandline/service_inspect.md | 170 - .../reference/commandline/service_logs.md | 85 - .../docs/reference/commandline/service_ls.md | 164 - .../docs/reference/commandline/service_ps.md | 194 - .../docs/reference/commandline/service_rm.md | 60 - .../reference/commandline/service_scale.md | 104 - .../reference/commandline/service_update.md | 265 - .../docs/reference/commandline/stack.md | 39 - .../reference/commandline/stack_deploy.md | 107 - .../docs/reference/commandline/stack_ls.md | 51 - .../docs/reference/commandline/stack_ps.md | 230 - .../docs/reference/commandline/stack_rm.md | 78 - .../reference/commandline/stack_services.md | 105 - .../docs/reference/commandline/start.md | 34 - .../docs/reference/commandline/stats.md | 140 - .../docker/docs/reference/commandline/stop.md | 37 - .../docs/reference/commandline/swarm.md | 40 - .../docs/reference/commandline/swarm_init.md | 157 - .../docs/reference/commandline/swarm_join.md | 119 - .../reference/commandline/swarm_join_token.md | 114 - .../docs/reference/commandline/swarm_leave.md | 71 - .../reference/commandline/swarm_unlock.md | 48 - .../reference/commandline/swarm_unlock_key.md | 91 - .../reference/commandline/swarm_update.md | 51 - .../docs/reference/commandline/system.md | 37 - .../docs/reference/commandline/system_df.md | 94 - .../reference/commandline/system_prune.md | 104 - .../docker/docs/reference/commandline/tag.md | 84 - .../docker/docs/reference/commandline/top.md | 25 - .../docs/reference/commandline/unpause.md | 44 - .../docs/reference/commandline/update.md | 123 - .../docs/reference/commandline/version.md | 74 - .../docs/reference/commandline/volume.md | 48 - .../reference/commandline/volume_create.md | 125 - .../reference/commandline/volume_inspect.md | 61 - .../docs/reference/commandline/volume_ls.md | 199 - .../reference/commandline/volume_prune.md | 57 - .../docs/reference/commandline/volume_rm.md | 48 - .../docker/docs/reference/commandline/wait.md | 58 - .../docker/docker/docs/reference/glossary.md | 374 - .../docker/docker/docs/reference/index.md | 21 - .../docker/docker/docs/reference/run.md | 1601 -- .../docs/static_files/moby-project-logo.png | Bin 0 -> 20458 bytes .../docker/docker/docs/yaml/Dockerfile | 4 - .../docker/docker/docs/yaml/generate.go | 86 - .../docker/docker/docs/yaml/yaml.go | 212 - .../docker/docker/experimental/README.md | 54 - .../docker/experimental/checkpoint-restore.md | 88 - .../experimental/docker-stacks-and-bundles.md | 205 - .../experimental/images/ipvlan-l3.gliffy | 1 - .../docker/experimental/images/ipvlan-l3.png | Bin 18260 -> 0 bytes .../docker/experimental/images/ipvlan-l3.svg | 1 - .../images/ipvlan_l2_simple.gliffy | 1 - .../experimental/images/ipvlan_l2_simple.png | Bin 20145 -> 0 bytes .../experimental/images/ipvlan_l2_simple.svg | 1 - .../images/macvlan-bridge-ipvlan-l2.gliffy | 1 - .../images/macvlan-bridge-ipvlan-l2.png | Bin 14527 -> 0 bytes .../images/macvlan-bridge-ipvlan-l2.svg | 1 - .../images/multi_tenant_8021q_vlans.gliffy | 1 - .../images/multi_tenant_8021q_vlans.png | Bin 17879 -> 0 bytes .../images/multi_tenant_8021q_vlans.svg | 1 - .../images/vlans-deeper-look.gliffy | 1 - .../experimental/images/vlans-deeper-look.png | Bin 38837 -> 0 bytes .../experimental/images/vlans-deeper-look.svg | 1 - .../docker/experimental/vlan-networks.md | 475 - .../docker/docker/hack/Jenkins/W2L/setup.sh | 4 +- .../github.com/docker/docker/hack/README.md | 68 + .../docker/hack/dockerfile/binaries-commits | 11 +- .../hack/dockerfile/install-binaries.sh | 30 +- .../github.com/docker/docker/hack/install.sh | 2 +- .../hack/integration-cli-on-swarm/README.md | 2 + .../agent/worker/executor.go | 8 +- .../host/enumerate.go | 2 +- .../integration-cli-on-swarm/host/host.go | 12 +- .../github.com/docker/docker/hack/make.ps1 | 54 +- .../github.com/docker/docker/hack/make.sh | 5 +- .../docker/docker/hack/make/.binary | 13 +- .../docker/docker/hack/make/.binary-setup | 1 - .../docker/docker/hack/make/.build-deb/rules | 2 - .../hack/make/.build-rpm/docker-engine.spec | 30 +- .../hack/make/.integration-daemon-setup | 2 +- .../hack/make/.integration-daemon-start | 25 +- .../hack/make/.integration-test-helpers | 3 + .../github.com/docker/docker/hack/make/binary | 5 - .../docker/docker/hack/make/binary-client | 12 - .../docker/docker/hack/make/binary-daemon | 6 +- .../hack/make/build-integration-test-binary | 2 + .../docker/docker/hack/make/build-rpm | 19 +- .../github.com/docker/docker/hack/make/cross | 36 +- .../docker/docker/hack/make/dynbinary | 5 - .../docker/docker/hack/make/dynbinary-client | 12 - .../docker/docker/hack/make/dynbinary-daemon | 2 - .../docker/hack/make/generate-index-listing | 2 +- .../docker/docker/hack/make/install-binary | 4 - .../docker/hack/make/install-binary-client | 10 - .../docker/docker/hack/make/test-unit | 3 + .../github.com/docker/docker/hack/make/tgz | 92 +- .../docker/hack/make/yaml-docs-generator | 12 - .../docker/hack/validate/compose-bindata | 28 - .../docker/docker/hack/validate/default | 1 - .../docker/docker/hack/validate/gofmt | 4 +- .../docker/docker/hack/validate/lint | 2 +- .../github.com/docker/docker/hooks/post_build | 19 - .../docker/docker/image/cache/compare.go | 4 +- .../docker/docker/image/cache/compare_test.go | 46 +- .../github.com/docker/docker/image/fs_test.go | 91 +- .../github.com/docker/docker/image/image.go | 70 + .../docker/docker/image/image_test.go | 11 +- .../docker/docker/image/spec/v1.1.md | 2 +- .../docker/docker/image/spec/v1.2.md | 2 +- .../github.com/docker/docker/image/store.go | 50 +- .../docker/docker/image/store_test.go | 120 +- .../docker/docker/image/tarexport/load.go | 65 +- .../docker/docker/image/tarexport/save.go | 87 +- .../docker/integration-cli/check_test.go | 8 +- .../docker/integration-cli/cli/build/build.go | 52 + .../cli/build/fakecontext/context.go | 124 + .../cli/build/fakegit/fakegit.go | 125 + .../build/fakestorage/fixtures.go} | 8 +- .../cli/build/fakestorage/storage.go | 176 + .../docker/docker/integration-cli/cli/cli.go | 53 + .../docker/integration-cli/daemon/daemon.go | 2 +- .../integration-cli/daemon/daemon_swarm.go | 116 +- .../integration-cli/docker_api_attach_test.go | 2 +- .../integration-cli/docker_api_build_test.go | 307 +- .../docker_api_containers_test.go | 55 +- .../integration-cli/docker_api_create_test.go | 44 +- .../integration-cli/docker_api_events_test.go | 2 +- .../integration-cli/docker_api_images_test.go | 10 +- .../integration-cli/docker_api_info_test.go | 22 + .../integration-cli/docker_api_logs_test.go | 17 +- .../integration-cli/docker_api_resize_test.go | 4 +- .../docker_api_session_test.go | 49 + .../integration-cli/docker_api_stats_test.go | 11 +- .../docker_api_swarm_config_test.go | 118 + .../docker_api_swarm_secret_test.go | 20 +- .../docker_api_swarm_service_test.go | 80 +- .../integration-cli/docker_api_swarm_test.go | 135 +- .../docker_api_volumes_test.go | 13 + .../integration-cli/docker_cli_attach_test.go | 8 +- .../integration-cli/docker_cli_build_test.go | 1565 +- .../docker_cli_build_unix_test.go | 14 +- .../docker_cli_by_digest_test.go | 15 +- .../integration-cli/docker_cli_commit_test.go | 5 +- .../docker_cli_config_create_test.go | 131 + .../docker_cli_config_inspect_test.go | 68 + .../docker_cli_config_ls_test.go | 125 + .../integration-cli/docker_cli_config_test.go | 3 +- .../docker_cli_cp_from_container_test.go | 52 +- .../integration-cli/docker_cli_cp_test.go | 2 +- .../docker_cli_cp_to_container_test.go | 56 +- .../docker_cli_cp_to_container_unix_test.go | 25 +- ...p_utils.go => docker_cli_cp_utils_test.go} | 76 +- .../integration-cli/docker_cli_create_test.go | 81 +- .../integration-cli/docker_cli_daemon_test.go | 124 +- .../integration-cli/docker_cli_diff_test.go | 8 +- .../integration-cli/docker_cli_events_test.go | 29 +- .../docker_cli_events_unix_test.go | 2 +- .../integration-cli/docker_cli_exec_test.go | 15 +- ...cker_cli_external_graphdriver_unix_test.go | 2 +- ...er_cli_external_volume_driver_unix_test.go | 15 + .../integration-cli/docker_cli_health_test.go | 23 + .../integration-cli/docker_cli_images_test.go | 2 +- .../integration-cli/docker_cli_info_test.go | 2 +- .../docker_cli_inspect_test.go | 5 +- .../integration-cli/docker_cli_kill_test.go | 57 +- .../integration-cli/docker_cli_login_test.go | 16 +- .../integration-cli/docker_cli_logout_test.go | 20 +- .../integration-cli/docker_cli_nat_test.go | 7 +- .../docker_cli_netmode_test.go | 2 +- .../docker_cli_network_unix_test.go | 16 +- .../integration-cli/docker_cli_pause_test.go | 23 +- .../docker_cli_plugins_logdriver_test.go | 22 + .../docker_cli_plugins_test.go | 51 +- .../docker_cli_prune_unix_test.go | 46 +- .../integration-cli/docker_cli_ps_test.go | 60 +- .../integration-cli/docker_cli_pull_test.go | 9 +- .../docker_cli_pull_trusted_test.go | 137 +- .../integration-cli/docker_cli_push_test.go | 121 +- .../docker_cli_registry_user_agent_test.go | 73 +- .../integration-cli/docker_cli_rename_test.go | 6 +- .../docker_cli_restart_test.go | 8 +- .../integration-cli/docker_cli_rmi_test.go | 67 +- .../integration-cli/docker_cli_run_test.go | 109 +- .../docker_cli_run_unix_test.go | 12 - .../docker_cli_save_load_unix_test.go | 2 +- .../docker_cli_service_create_test.go | 306 +- .../docker_cli_service_health_test.go | 4 +- .../docker_cli_service_logs_test.go | 122 +- .../docker_cli_service_scale_test.go | 4 +- .../docker_cli_service_update_test.go | 48 +- .../integration-cli/docker_cli_stack_test.go | 19 +- .../integration-cli/docker_cli_start_test.go | 15 +- .../integration-cli/docker_cli_stats_test.go | 17 +- .../integration-cli/docker_cli_swarm_test.go | 388 +- .../docker_cli_swarm_unix_test.go | 6 +- .../integration-cli/docker_cli_top_test.go | 6 +- .../integration-cli/docker_cli_update_test.go | 18 +- .../docker_cli_v2_only_test.go | 50 +- .../integration-cli/docker_cli_volume_test.go | 10 +- .../docker_experimental_network_test.go | 2 +- .../integration-cli/docker_utils_test.go | 415 +- .../integration-cli/environment/clean.go | 8 +- .../environment/environment.go | 20 +- .../fixtures/plugin/basic/basic.go | 34 + .../integration-cli/fixtures/plugin/plugin.go | 183 + .../fixtures_linux_daemon_test.go | 2 +- .../docker/integration-cli/request/request.go | 24 +- .../integration-cli/trust_server_test.go | 50 +- .../docker/docker/keys/launchpad-ppa-zfs.asc | 13 - .../github.com/docker/docker/layer/empty.go | 9 + .../docker/docker/layer/filestore.go | 1 + .../docker/docker/layer/filestore_unix.go | 13 + .../docker/docker/layer/filestore_windows.go | 35 + .../github.com/docker/docker/layer/layer.go | 17 +- .../docker/docker/layer/layer_store.go | 40 +- .../docker/layer/layer_store_windows.go | 4 +- .../docker/docker/layer/layer_test.go | 21 +- .../docker/docker/layer/migration_test.go | 12 +- .../docker/docker/layer/ro_layer.go | 4 + .../docker/docker/layer/ro_layer_unix.go | 7 + .../docker/docker/layer/ro_layer_windows.go | 7 + .../docker/libcontainerd/client_linux.go | 12 +- .../docker/libcontainerd/client_unix.go | 9 +- .../docker/libcontainerd/client_windows.go | 137 +- .../docker/libcontainerd/container_unix.go | 10 +- .../docker/libcontainerd/container_windows.go | 31 +- .../docker/libcontainerd/process_unix.go | 2 +- .../docker/docker/libcontainerd/queue_unix.go | 6 + .../docker/libcontainerd/queue_unix_test.go | 33 + .../docker/libcontainerd/remote_unix.go | 54 +- .../docker/docker/libcontainerd/types.go | 2 +- .../docker/libcontainerd/types_linux.go | 4 +- .../docker/libcontainerd/types_solaris.go | 2 +- .../docker/libcontainerd/types_windows.go | 5 +- .../docker/libcontainerd/utils_linux.go | 6 +- .../docker/libcontainerd/utils_solaris.go | 2 +- .../github.com/docker/docker/man/Dockerfile | 24 - .../docker/docker/man/Dockerfile.5.md | 474 - .../docker/docker/man/Dockerfile.aarch64 | 34 - .../docker/docker/man/Dockerfile.armhf | 43 - .../docker/docker/man/Dockerfile.ppc64le | 35 - .../docker/docker/man/Dockerfile.s390x | 35 - .../github.com/docker/docker/man/README.md | 15 - .../docker/docker/man/docker-build.1.md | 347 - .../docker/docker/man/docker-config-json.5.md | 72 - .../docker/docker/man/docker-run.1.md | 1108 - .../github.com/docker/docker/man/docker.1.md | 70 - .../github.com/docker/docker/man/dockerd.8.md | 734 - .../github.com/docker/docker/man/generate.go | 106 - .../github.com/docker/docker/man/generate.sh | 15 - .../github.com/docker/docker/man/glide.lock | 52 - .../github.com/docker/docker/man/glide.yaml | 12 - .../docker/docker/man/md2man-all.sh | 22 - .../docker/docker/man/src/attach.md | 2 - .../docker/docker/man/src/commit.md | 1 - .../docker/docker/man/src/container/attach.md | 66 - .../docker/docker/man/src/container/commit.md | 30 - .../docker/docker/man/src/container/cp.md | 145 - .../man/src/container/create-example.md | 35 - .../docker/docker/man/src/container/create.md | 87 - .../docker/docker/man/src/container/diff.md | 39 - .../docker/docker/man/src/container/exec.md | 25 - .../docker/docker/man/src/container/export.md | 20 - .../docker/docker/man/src/container/kill.md | 2 - .../docker/docker/man/src/container/logs.md | 28 - .../docker/docker/man/src/container/ls.md | 110 - .../docker/docker/man/src/container/pause.md | 12 - .../docker/docker/man/src/container/port.md | 26 - .../docker/docker/man/src/container/rename.md | 1 - .../docker/man/src/container/restart.md | 1 - .../docker/docker/man/src/container/rm.md | 37 - .../docker/docker/man/src/container/run.md | 1 - .../docker/docker/man/src/container/start.md | 1 - .../docker/docker/man/src/container/stats.md | 43 - .../docker/docker/man/src/container/stop.md | 1 - .../docker/docker/man/src/container/top.md | 11 - .../docker/man/src/container/unpause.md | 6 - .../docker/docker/man/src/container/update.md | 102 - .../docker/docker/man/src/container/wait.md | 8 - .../github.com/docker/docker/man/src/cp.md | 1 - .../docker/docker/man/src/create.md | 1 - .../github.com/docker/docker/man/src/diff.md | 1 - .../docker/docker/man/src/events.md | 1 - .../github.com/docker/docker/man/src/exec.md | 1 - .../docker/docker/man/src/export.md | 1 - .../docker/docker/man/src/history.md | 1 - .../docker/docker/man/src/image/build.md | 1 - .../docker/docker/man/src/image/history.md | 18 - .../docker/docker/man/src/image/import.md | 42 - .../docker/docker/man/src/image/load.md | 25 - .../docker/docker/man/src/image/ls.md | 118 - .../docker/docker/man/src/image/pull.md | 189 - .../docker/docker/man/src/image/push.md | 34 - .../docker/docker/man/src/image/rm.md | 11 - .../docker/docker/man/src/image/save.md | 19 - .../docker/docker/man/src/image/tag.md | 54 - .../docker/docker/man/src/images.md | 1 - .../docker/docker/man/src/import.md | 1 - .../github.com/docker/docker/man/src/info.md | 1 - .../docker/docker/man/src/inspect.md | 286 - .../github.com/docker/docker/man/src/kill.md | 1 - .../github.com/docker/docker/man/src/load.md | 1 - .../github.com/docker/docker/man/src/login.md | 22 - .../docker/docker/man/src/logout.md | 13 - .../github.com/docker/docker/man/src/logs.md | 1 - .../docker/docker/man/src/network/connect.md | 39 - .../docker/docker/man/src/network/create.md | 136 - .../docker/man/src/network/disconnect.md | 5 - .../docker/docker/man/src/network/inspect.md | 183 - .../docker/docker/man/src/network/ls.md | 182 - .../docker/docker/man/src/network/rm.md | 20 - .../github.com/docker/docker/man/src/pause.md | 1 - .../docker/docker/man/src/plugin/ls.md | 43 - .../github.com/docker/docker/man/src/port.md | 1 - .../github.com/docker/docker/man/src/ps.md | 1 - .../github.com/docker/docker/man/src/pull.md | 1 - .../github.com/docker/docker/man/src/push.md | 1 - .../docker/docker/man/src/rename.md | 1 - .../docker/docker/man/src/restart.md | 1 - .../github.com/docker/docker/man/src/rm.md | 1 - .../github.com/docker/docker/man/src/rmi.md | 1 - .../github.com/docker/docker/man/src/save.md | 1 - .../docker/docker/man/src/search.md | 36 - .../github.com/docker/docker/man/src/start.md | 1 - .../github.com/docker/docker/man/src/stats.md | 1 - .../github.com/docker/docker/man/src/stop.md | 1 - .../docker/docker/man/src/system/events.md | 134 - .../docker/docker/man/src/system/info.md | 163 - .../github.com/docker/docker/man/src/tag.md | 1 - .../github.com/docker/docker/man/src/top.md | 1 - .../docker/docker/man/src/unpause.md | 1 - .../docker/docker/man/src/update.md | 1 - .../docker/docker/man/src/version.md | 37 - .../docker/docker/man/src/volume.md | 14 - .../docker/docker/man/src/volume/create.md | 35 - .../docker/docker/man/src/volume/inspect.md | 4 - .../docker/docker/man/src/volume/ls.md | 11 - .../github.com/docker/docker/man/src/wait.md | 1 - .../docker/migrate/v1/migratev1_test.go | 10 +- .../oci/{defaults_linux.go => defaults.go} | 129 +- .../docker/docker/oci/defaults_solaris.go | 20 - .../docker/docker/oci/defaults_windows.go | 19 - .../docker/docker/oci/devices_linux.go | 16 +- .../docker/docker/oci/devices_unsupported.go | 4 +- .../docker/docker/oci/namespaces.go | 2 +- fn/vendor/github.com/docker/docker/opts/ip.go | 2 +- .../github.com/docker/docker/opts/mount.go | 173 - .../docker/docker/opts/mount_test.go | 184 - .../github.com/docker/docker/opts/opts.go | 161 - .../docker/docker/opts/opts_test.go | 45 +- .../github.com/docker/docker/opts/port.go | 162 - .../docker/docker/opts/port_test.go | 295 - .../docker/docker/opts/quotedstring_test.go | 17 +- .../github.com/docker/docker/opts/secret.go | 103 - .../docker/docker/opts/secret_test.go | 79 - .../docker/docker/opts/throttledevice.go | 111 - .../github.com/docker/docker/opts/ulimit.go | 24 + .../docker/docker/opts/weightdevice.go | 89 - .../docker/docker/pkg/aaparser/aaparser.go | 7 +- .../docker/docker/pkg/archive/archive.go | 286 +- .../docker/docker/pkg/archive/archive_test.go | 85 +- .../docker/docker/pkg/archive/archive_unix.go | 31 +- .../docker/pkg/archive/archive_windows.go | 14 +- .../pkg/archive/archive_windows_test.go | 4 +- .../docker/docker/pkg/archive/changes.go | 9 +- .../docker/docker/pkg/archive/changes_test.go | 6 +- .../docker/docker/pkg/archive/diff.go | 30 +- .../docker/pkg/authorization/api_test.go | 75 + .../pkg/authorization/authz_unix_test.go | 2 +- .../docker/pkg/authorization/middleware.go | 27 +- .../pkg/authorization/middleware_test.go | 53 + .../pkg/authorization/middleware_unix_test.go | 65 + .../docker/pkg/chrootarchive/archive.go | 47 +- .../docker/pkg/chrootarchive/archive_test.go | 42 +- .../docker/pkg/devicemapper/devmapper.go | 35 +- .../docker/docker/pkg/filenotify/fsnotify.go | 2 +- .../docker/pkg/filenotify/poller_test.go | 2 +- .../docker/pkg/fileutils/fileutils_test.go | 41 +- .../docker/docker/pkg/gitutils/gitutils.go | 100 - .../docker/pkg/gitutils/gitutils_test.go | 220 - .../docker/docker/pkg/httputils/httputils.go | 56 - .../docker/pkg/httputils/httputils_test.go | 115 - .../docker/pkg/httputils/mimetype_test.go | 13 - .../docker/docker/pkg/idtools/idtools.go | 146 +- .../docker/docker/pkg/idtools/idtools_unix.go | 8 +- .../docker/pkg/idtools/idtools_unix_test.go | 54 +- .../docker/pkg/idtools/idtools_windows.go | 4 +- .../docker/docker/pkg/ioutils/fmt.go | 22 - .../docker/docker/pkg/ioutils/fmt_test.go | 17 - .../pkg/jsonlog/jsonlog_marshalling_test.go | 20 +- .../docker/pkg/jsonlog/jsonlogbytes_test.go | 26 +- .../docker/pkg/jsonmessage/jsonmessage.go | 30 +- .../pkg/jsonmessage/jsonmessage_test.go | 44 +- .../docker/pkg/listeners/listeners_solaris.go | 8 +- .../docker/pkg/listeners/listeners_unix.go | 8 +- .../docker/docker/pkg/mount/mount.go | 31 + .../docker/docker/pkg/mount/mountinfo.go | 14 + .../pkg/namesgenerator/names-generator.go | 8 +- .../docker/docker/pkg/pidfile/pidfile.go | 2 +- .../docker/pkg/platform/utsname_int8_test.go | 16 + .../docker/pkg/platform/utsname_uint8_test.go | 16 + .../docker/docker/pkg/plugins/client.go | 18 +- .../docker/docker/pkg/plugins/client_test.go | 108 +- .../docker/pkg/plugins/discovery_unix_test.go | 39 + .../docker/docker/pkg/plugins/plugin_test.go | 112 + .../pkg/plugins/pluginrpc-gen/parser_test.go | 2 +- .../docker/pkg/plugins/transport/http_test.go | 20 + .../docker/docker/pkg/pools/pools.go | 31 +- .../docker/docker/pkg/pools/pools_test.go | 5 + .../docker/docker/pkg/progress/progress.go | 2 + .../docker/docker/pkg/promise/promise_test.go | 25 + .../docker/docker/pkg/pubsub/publisher.go | 10 + .../docker/docker/pkg/random/random.go | 71 - .../docker/docker/pkg/random/random_test.go | 22 - .../docker/docker/pkg/reexec/reexec_test.go | 53 + .../docker/pkg/registrar/registrar_test.go | 2 +- .../docker/pkg/signal/signal_linux_test.go | 58 + .../docker/docker/pkg/signal/signal_test.go | 33 + .../docker/docker/pkg/stdcopy/stdcopy_test.go | 2 +- .../pkg/streamformatter/streamformatter.go | 187 +- .../streamformatter/streamformatter_test.go | 153 +- .../pkg/streamformatter/streamwriter.go | 47 + .../pkg/streamformatter/streamwriter_test.go | 35 + .../docker/docker/pkg/stringid/stringid.go | 38 +- .../docker/pkg/stringutils/stringutils.go | 4 +- .../docker/docker/pkg/symlink/fs.go | 2 +- .../docker/pkg/sysinfo/sysinfo_linux_test.go | 74 +- .../docker/docker/pkg/system/chtimes.go | 17 - .../docker/docker/pkg/system/filesys.go | 9 +- .../docker/pkg/system/filesys_windows.go | 29 +- .../docker/docker/pkg/system/init.go | 22 + .../docker/docker/pkg/system/init_windows.go | 17 + .../docker/docker/pkg/system/lcow_unix.go | 8 + .../docker/docker/pkg/system/lcow_windows.go | 6 + .../docker/docker/pkg/system/path.go | 21 + .../docker/docker/pkg/system/path_unix.go | 5 - .../docker/docker/pkg/system/path_windows.go | 6 +- .../github.com/docker/docker/pkg/system/rm.go | 80 + .../docker/docker/pkg/system/rm_test.go | 84 + .../docker/pkg/system/syscall_windows.go | 21 +- .../docker/docker/pkg/templates/templates.go | 11 +- .../docker/pkg/templates/templates_test.go | 51 +- .../docker/docker/pkg/term/proxy.go | 74 + .../docker/docker/pkg/term/proxy_test.go | 92 + .../docker/pkg/term/{tc_other.go => tc.go} | 7 +- .../docker/docker/pkg/term/tc_linux_cgo.go | 50 - .../docker/docker/pkg/term/tc_solaris_cgo.go | 16 +- .../github.com/docker/docker/pkg/term/term.go | 5 +- .../docker/docker/pkg/term/term_linux_test.go | 120 + .../docker/docker/pkg/term/term_windows.go | 12 +- .../docker/docker/pkg/term/termios_bsd.go | 42 + .../docker/docker/pkg/term/termios_darwin.go | 69 - .../docker/docker/pkg/term/termios_freebsd.go | 69 - .../docker/docker/pkg/term/termios_linux.go | 33 +- .../docker/docker/pkg/term/termios_openbsd.go | 69 - .../docker/pkg/term/windows/ansi_reader.go | 2 +- .../docker/pkg/term/windows/ansi_writer.go | 2 +- .../docker/docker/pkg/term/windows/console.go | 2 +- .../docker/docker/pkg/term/windows/windows.go | 2 +- .../docker/pkg/term/windows/windows_test.go | 2 +- .../pkg/term/{term_unix.go => winsize.go} | 7 +- ...term_solaris.go => winsize_solaris_cgo.go} | 9 +- .../docker/pkg/testutil/assert/assert.go | 132 - .../docker/docker/pkg/testutil/cmd/command.go | 2 +- .../docker/pkg/testutil/cmd/command_test.go | 4 +- .../docker/docker/pkg/testutil/helpers.go | 33 + .../docker/pkg/testutil/tempfile/tempfile.go | 29 +- .../docker/docker/pkg/testutil/utils.go | 15 - .../docker/docker/pkg/testutil/utils_test.go | 22 - .../pkg/tlsconfig/tlsconfig_clone_go16.go | 31 - .../docker/docker/pkg/urlutil/urlutil.go | 6 - .../docker/docker/pkg/urlutil/urlutil_test.go | 14 - .../docker/docker/plugin/backend_linux.go | 74 +- .../docker/plugin/backend_unsupported.go | 2 +- .../docker/docker/plugin/blobstore.go | 8 +- .../github.com/docker/docker/plugin/defs.go | 11 + .../github.com/docker/docker/plugin/events.go | 111 + .../docker/docker/plugin/manager.go | 49 +- .../docker/docker/plugin/manager_linux.go | 12 +- .../github.com/docker/docker/plugin/store.go | 11 +- .../docker/docker/plugin/v2/plugin.go | 2 + .../docker/docker/plugin/v2/plugin_linux.go | 18 +- .../docker/docker/plugin/v2/settable_test.go | 2 +- fn/vendor/github.com/docker/docker/poule.yml | 7 - .../docker/profiles/seccomp/default.json | 255 +- .../docker/docker/profiles/seccomp/seccomp.go | 26 +- .../profiles/seccomp/seccomp_default.go | 173 +- .../docker/project/RELEASE-CHECKLIST.md | 33 +- .../docker/docker/reference/store.go | 8 +- .../docker/docker/reference/store_test.go | 9 +- .../docker/docker/registry/config.go | 83 +- .../docker/docker/registry/config_test.go | 119 + .../docker/docker/registry/config_unix.go | 2 +- .../docker/docker/registry/endpoint_v1.go | 2 +- .../docker/docker/registry/registry_test.go | 42 + .../resumable}/resumablerequestreader.go | 24 +- .../resumable}/resumablerequestreader_test.go | 132 +- .../docker/docker/registry/service.go | 32 +- .../docker/docker/registry/service_v2.go | 14 +- .../docker/docker/registry/session.go | 53 +- .../docker/docker/reports/2017-05-01.md | 35 + .../docker/docker/reports/2017-05-08.md | 34 + .../docker/docker/reports/2017-05-15.md | 52 + .../docker/docker/reports/2017-06-05.md | 36 + .../docker/docker/reports/2017-06-12.md | 78 + .../docker/docker/reports/2017-06-26.md | 120 + .../docker/reports/builder/2017-05-01.md | 47 + .../docker/reports/builder/2017-05-08.md | 57 + .../docker/reports/builder/2017-05-15.md | 64 + .../docker/reports/builder/2017-05-22.md | 47 + .../docker/reports/builder/2017-05-29.md | 52 + .../docker/reports/builder/2017-06-05.md | 58 + .../docker/reports/builder/2017-06-12.md | 58 + .../docker/reports/builder/2017-06-26.md | 78 + .../docker/docker/runconfig/config.go | 5 + .../docker/docker/runconfig/config_test.go | 8 +- .../docker/docker/runconfig/hostconfig.go | 2 +- .../docker/runconfig/hostconfig_test.go | 10 +- .../docker/runconfig/hostconfig_unix.go | 17 +- .../docker/runconfig/hostconfig_windows.go | 22 +- .../runconfig/hostconfig_windows_test.go | 2 +- .../docker/docker/runconfig/opts/envfile.go | 81 - .../docker/runconfig/opts/envfile_test.go | 141 - .../docker/docker/runconfig/opts/parse.go | 67 - .../github.com/docker/docker/vendor.conf | 68 +- .../docker/docker/volume/drivers/adapter.go | 11 +- .../docker/docker/volume/local/local.go | 12 +- .../docker/docker/volume/local/local_test.go | 19 +- .../docker/docker/volume/local/local_unix.go | 12 + .../docker/volume/local/local_windows.go | 12 + .../docker/docker/volume/store/store_test.go | 2 +- .../docker/volume/testutils/testutils.go | 11 +- .../github.com/docker/docker/volume/volume.go | 75 +- .../docker/docker/volume/volume_linux.go | 2 +- .../docker/docker/volume/volume_test.go | 8 +- .../github.com/go-resty/resty/.travis.yml | 3 +- fn/vendor/github.com/go-resty/resty/README.md | 7 + fn/vendor/github.com/go-resty/resty/client.go | 44 +- .../github.com/go-resty/resty/client_test.go | 21 +- .../github.com/go-resty/resty/default.go | 5 + .../github.com/go-resty/resty/middleware.go | 3 +- .../github.com/go-resty/resty/request.go | 2 +- .../github.com/go-resty/resty/resty_test.go | 6 + fn/vendor/golang.org/x/net/bpf/asm.go | 41 + fn/vendor/golang.org/x/net/bpf/constants.go | 218 + fn/vendor/golang.org/x/net/bpf/doc.go | 82 + .../golang.org/x/net/bpf/instructions.go | 704 + .../golang.org/x/net/bpf/instructions_test.go | 525 + fn/vendor/golang.org/x/net/bpf/setter.go | 10 + .../x/net/bpf/testdata/all_instructions.bpf | 1 + .../x/net/bpf/testdata/all_instructions.txt | 79 + fn/vendor/golang.org/x/net/bpf/vm.go | 140 + .../golang.org/x/net/bpf/vm_aluop_test.go | 512 + fn/vendor/golang.org/x/net/bpf/vm_bpf_test.go | 192 + .../golang.org/x/net/bpf/vm_extension_test.go | 49 + .../golang.org/x/net/bpf/vm_instructions.go | 174 + .../golang.org/x/net/bpf/vm_jump_test.go | 380 + .../golang.org/x/net/bpf/vm_load_test.go | 246 + fn/vendor/golang.org/x/net/bpf/vm_ret_test.go | 115 + .../golang.org/x/net/bpf/vm_scratch_test.go | 247 + fn/vendor/golang.org/x/net/bpf/vm_test.go | 144 + fn/vendor/golang.org/x/net/context/context.go | 325 +- .../golang.org/x/net/context/context_test.go | 46 +- .../x/net/context/ctxhttp/cancelreq.go | 19 - .../x/net/context/ctxhttp/cancelreq_go14.go | 23 - .../x/net/context/ctxhttp/ctxhttp.go | 100 +- .../x/net/context/ctxhttp/ctxhttp_17_test.go | 29 + .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 + .../net/context/ctxhttp/ctxhttp_pre17_test.go | 79 + .../x/net/context/ctxhttp/ctxhttp_test.go | 149 +- fn/vendor/golang.org/x/net/context/go17.go | 72 + .../golang.org/x/net/context/pre_go17.go | 300 + .../x/net/context/withtimeout_test.go | 9 +- fn/vendor/golang.org/x/net/dict/dict.go | 2 +- .../x/net/dns/dnsmessage/example_test.go | 132 + .../x/net/dns/dnsmessage/message.go | 1997 ++ .../x/net/dns/dnsmessage/message_test.go | 1009 + fn/vendor/golang.org/x/net/http2/ciphers.go | 641 + .../golang.org/x/net/http2/ciphers_test.go | 309 + .../x/net/http2/client_conn_pool.go | 35 +- .../x/net/http2/configure_transport.go | 13 +- .../golang.org/x/net/http2/databuffer.go | 146 + .../golang.org/x/net/http2/databuffer_test.go | 157 + fn/vendor/golang.org/x/net/http2/errors.go | 55 +- .../golang.org/x/net/http2/fixed_buffer.go | 60 - .../x/net/http2/fixed_buffer_test.go | 128 - fn/vendor/golang.org/x/net/http2/frame.go | 394 +- .../golang.org/x/net/http2/frame_test.go | 460 +- fn/vendor/golang.org/x/net/http2/go15.go | 11 - fn/vendor/golang.org/x/net/http2/go16.go | 16 + fn/vendor/golang.org/x/net/http2/go17.go | 106 + .../golang.org/x/net/http2/go17_not18.go | 36 + fn/vendor/golang.org/x/net/http2/go18.go | 56 + fn/vendor/golang.org/x/net/http2/go18_test.go | 79 + .../x/net/http2/{not_go15.go => go19.go} | 11 +- fn/vendor/golang.org/x/net/http2/go19_test.go | 60 + .../golang.org/x/net/http2/h2demo/h2demo.go | 142 +- .../golang.org/x/net/http2/h2demo/launch.go | 4 +- .../golang.org/x/net/http2/h2demo/tmpl.go | 1991 ++ fn/vendor/golang.org/x/net/http2/h2i/h2i.go | 30 +- .../golang.org/x/net/http2/hpack/encode.go | 31 +- .../x/net/http2/hpack/encode_test.go | 70 +- .../golang.org/x/net/http2/hpack/hpack.go | 117 +- .../x/net/http2/hpack/hpack_test.go | 191 +- .../golang.org/x/net/http2/hpack/huffman.go | 42 +- .../golang.org/x/net/http2/hpack/tables.go | 255 +- .../x/net/http2/hpack/tables_test.go | 214 + fn/vendor/golang.org/x/net/http2/http2.go | 221 +- .../golang.org/x/net/http2/http2_test.go | 29 +- fn/vendor/golang.org/x/net/http2/not_go16.go | 10 +- fn/vendor/golang.org/x/net/http2/not_go17.go | 87 + fn/vendor/golang.org/x/net/http2/not_go18.go | 29 + fn/vendor/golang.org/x/net/http2/not_go19.go | 16 + fn/vendor/golang.org/x/net/http2/pipe.go | 24 +- fn/vendor/golang.org/x/net/http2/pipe_test.go | 21 + .../golang.org/x/net/http2/priority_test.go | 118 - fn/vendor/golang.org/x/net/http2/server.go | 1802 +- .../x/net/http2/server_push_test.go | 521 + .../golang.org/x/net/http2/server_test.go | 1033 +- fn/vendor/golang.org/x/net/http2/transport.go | 1279 +- .../golang.org/x/net/http2/transport_test.go | 1652 +- fn/vendor/golang.org/x/net/http2/write.go | 201 +- .../golang.org/x/net/http2/writesched.go | 429 +- .../x/net/http2/writesched_priority.go | 452 + .../x/net/http2/writesched_priority_test.go | 541 + .../x/net/http2/writesched_random.go | 72 + .../x/net/http2/writesched_random_test.go | 44 + .../golang.org/x/net/http2/writesched_test.go | 125 + .../golang.org/x/net/http2/z_spec_test.go | 4 +- fn/vendor/golang.org/x/net/icmp/echo.go | 10 +- fn/vendor/golang.org/x/net/icmp/endpoint.go | 2 +- fn/vendor/golang.org/x/net/icmp/extension.go | 6 +- fn/vendor/golang.org/x/net/icmp/interface.go | 17 +- fn/vendor/golang.org/x/net/icmp/ipv4.go | 20 +- fn/vendor/golang.org/x/net/icmp/ipv4_test.go | 46 +- fn/vendor/golang.org/x/net/icmp/ipv6.go | 2 +- .../golang.org/x/net/icmp/listen_posix.go | 6 +- fn/vendor/golang.org/x/net/icmp/message.go | 9 +- .../golang.org/x/net/icmp/messagebody.go | 2 +- fn/vendor/golang.org/x/net/icmp/mpls.go | 4 +- .../golang.org/x/net/icmp/packettoobig.go | 6 +- fn/vendor/golang.org/x/net/icmp/paramprob.go | 9 +- fn/vendor/golang.org/x/net/icmp/ping_test.go | 34 + .../golang.org/x/net/idna/example_test.go | 70 + fn/vendor/golang.org/x/net/idna/idna.go | 686 +- fn/vendor/golang.org/x/net/idna/punycode.go | 23 +- fn/vendor/golang.org/x/net/idna/tables.go | 4477 ++++ fn/vendor/golang.org/x/net/idna/trie.go | 72 + fn/vendor/golang.org/x/net/idna/trieval.go | 114 + .../golang.org/x/net/internal/iana/const.go | 7 +- .../golang.org/x/net/internal/iana/gen.go | 2 +- .../x/net/internal/nettest/error_stub.go | 11 - .../x/net/internal/nettest/helper_bsd.go | 53 + .../x/net/internal/nettest/helper_nobsd.go | 15 + .../{error_posix.go => helper_posix.go} | 0 .../x/net/internal/nettest/helper_stub.go | 32 + .../{rlimit_unix.go => helper_unix.go} | 14 +- .../{stack_windows.go => helper_windows.go} | 16 +- .../x/net/internal/nettest/interface.go | 2 +- .../x/net/internal/nettest/rlimit_stub.go | 9 - .../x/net/internal/nettest/rlimit_windows.go | 7 - .../x/net/internal/nettest/stack.go | 145 +- .../x/net/internal/nettest/stack_stub.go | 18 - .../x/net/internal/nettest/stack_unix.go | 22 - .../x/net/internal/socket/cmsghdr.go | 11 + .../x/net/internal/socket/cmsghdr_bsd.go | 13 + .../internal/socket/cmsghdr_linux_32bit.go | 14 + .../internal/socket/cmsghdr_linux_64bit.go | 14 + .../internal/socket/cmsghdr_solaris_64bit.go | 14 + .../x/net/internal/socket/cmsghdr_stub.go | 17 + .../x/net/internal/socket/defs_darwin.go | 44 + .../x/net/internal/socket/defs_dragonfly.go | 44 + .../x/net/internal/socket/defs_freebsd.go | 44 + .../x/net/internal/socket/defs_linux.go | 49 + .../x/net/internal/socket/defs_netbsd.go | 47 + .../x/net/internal/socket/defs_openbsd.go | 44 + .../x/net/internal/socket/defs_solaris.go | 44 + .../x/net/internal/socket/error_unix.go | 31 + .../x/net/internal/socket/error_windows.go | 26 + .../x/net/internal/socket/iovec_32bit.go | 15 + .../x/net/internal/socket/iovec_64bit.go | 15 + .../internal/socket/iovec_solaris_64bit.go | 15 + .../x/net/internal/socket/iovec_stub.go | 11 + .../x/net/internal/socket/mmsghdr_stub.go | 21 + .../x/net/internal/socket/mmsghdr_unix.go | 42 + .../x/net/internal/socket/msghdr_bsd.go | 39 + .../x/net/internal/socket/msghdr_bsdvar.go | 12 + .../x/net/internal/socket/msghdr_linux.go | 36 + .../net/internal/socket/msghdr_linux_32bit.go | 20 + .../net/internal/socket/msghdr_linux_64bit.go | 20 + .../x/net/internal/socket/msghdr_openbsd.go | 10 + .../internal/socket/msghdr_solaris_64bit.go | 34 + .../x/net/internal/socket/msghdr_stub.go | 14 + .../x/net/internal/socket/rawconn.go | 66 + .../x/net/internal/socket/rawconn_mmsg.go | 74 + .../x/net/internal/socket/rawconn_msg.go | 77 + .../x/net/internal/socket/rawconn_nommsg.go | 18 + .../x/net/internal/socket/rawconn_nomsg.go | 18 + .../x/net/internal/socket/rawconn_stub.go | 25 + .../x/net/internal/socket/reflect.go | 62 + .../x/net/internal/socket/socket.go | 285 + .../net/internal/socket/socket_go1_9_test.go | 256 + .../x/net/internal/socket/socket_test.go | 46 + .../golang.org/x/net/internal/socket/sys.go | 33 + .../x/net/internal/socket/sys_bsd.go | 17 + .../x/net/internal/socket/sys_bsdvar.go | 14 + .../x/net/internal/socket/sys_darwin.go | 7 + .../x/net/internal/socket/sys_dragonfly.go | 7 + .../x/net/internal/socket/sys_linux.go | 27 + .../x/net/internal/socket/sys_linux_386.go | 55 + .../socket/sys_linux_386.s} | 7 +- .../x/net/internal/socket/sys_linux_amd64.go | 10 + .../x/net/internal/socket/sys_linux_arm.go | 10 + .../x/net/internal/socket/sys_linux_arm64.go | 10 + .../x/net/internal/socket/sys_linux_mips.go | 10 + .../x/net/internal/socket/sys_linux_mips64.go | 10 + .../net/internal/socket/sys_linux_mips64le.go | 10 + .../x/net/internal/socket/sys_linux_mipsle.go | 10 + .../x/net/internal/socket/sys_linux_ppc64.go | 10 + .../net/internal/socket/sys_linux_ppc64le.go | 10 + .../x/net/internal/socket/sys_linux_s390x.go | 55 + .../x/net/internal/socket/sys_linux_s390x.s | 11 + .../x/net/internal/socket/sys_netbsd.go | 25 + .../x/net/internal/socket/sys_posix.go | 168 + .../x/net/internal/socket/sys_solaris.go | 71 + .../x/net/internal/socket/sys_solaris_amd64.s | 11 + .../x/net/internal/socket/sys_stub.go | 64 + .../x/net/internal/socket/sys_unix.go | 33 + .../x/net/internal/socket/sys_windows.go | 70 + .../x/net/internal/socket/zsys_darwin_386.go | 59 + .../net/internal/socket/zsys_darwin_amd64.go | 61 + .../x/net/internal/socket/zsys_darwin_arm.go | 59 + .../internal/socket/zsys_dragonfly_amd64.go | 61 + .../x/net/internal/socket/zsys_freebsd_386.go | 59 + .../net/internal/socket/zsys_freebsd_amd64.go | 61 + .../x/net/internal/socket/zsys_freebsd_arm.go | 59 + .../x/net/internal/socket/zsys_linux_386.go | 63 + .../x/net/internal/socket/zsys_linux_amd64.go | 66 + .../x/net/internal/socket/zsys_linux_arm.go | 63 + .../x/net/internal/socket/zsys_linux_arm64.go | 66 + .../x/net/internal/socket/zsys_linux_mips.go | 63 + .../net/internal/socket/zsys_linux_mips64.go | 66 + .../internal/socket/zsys_linux_mips64le.go | 66 + .../net/internal/socket/zsys_linux_mipsle.go | 63 + .../x/net/internal/socket/zsys_linux_ppc64.go | 66 + .../net/internal/socket/zsys_linux_ppc64le.go | 66 + .../x/net/internal/socket/zsys_linux_s390x.go | 66 + .../x/net/internal/socket/zsys_netbsd_386.go | 65 + .../net/internal/socket/zsys_netbsd_amd64.go | 68 + .../x/net/internal/socket/zsys_netbsd_arm.go | 59 + .../x/net/internal/socket/zsys_openbsd_386.go | 59 + .../net/internal/socket/zsys_openbsd_amd64.go | 61 + .../x/net/internal/socket/zsys_openbsd_arm.go | 59 + .../net/internal/socket/zsys_solaris_amd64.go | 60 + .../x/net/internal/timeseries/timeseries.go | 2 +- fn/vendor/golang.org/x/net/ipv4/batch.go | 191 + fn/vendor/golang.org/x/net/ipv4/bpf_test.go | 93 + fn/vendor/golang.org/x/net/ipv4/control.go | 76 +- .../golang.org/x/net/ipv4/control_bsd.go | 24 +- .../golang.org/x/net/ipv4/control_pktinfo.go | 22 +- .../golang.org/x/net/ipv4/control_stub.go | 20 +- .../golang.org/x/net/ipv4/control_test.go | 21 + .../golang.org/x/net/ipv4/control_unix.go | 121 +- .../golang.org/x/net/ipv4/control_windows.go | 25 +- .../golang.org/x/net/ipv4/defs_darwin.go | 32 +- .../golang.org/x/net/ipv4/defs_dragonfly.go | 4 +- .../golang.org/x/net/ipv4/defs_freebsd.go | 28 +- fn/vendor/golang.org/x/net/ipv4/defs_linux.go | 51 +- .../golang.org/x/net/ipv4/defs_netbsd.go | 4 +- .../golang.org/x/net/ipv4/defs_openbsd.go | 4 +- .../golang.org/x/net/ipv4/defs_solaris.go | 77 +- .../ipv4/{dgramopt_posix.go => dgramopt.go} | 132 +- .../golang.org/x/net/ipv4/dgramopt_stub.go | 106 - fn/vendor/golang.org/x/net/ipv4/doc.go | 24 +- fn/vendor/golang.org/x/net/ipv4/endpoint.go | 76 +- .../golang.org/x/net/ipv4/example_test.go | 2 +- fn/vendor/golang.org/x/net/ipv4/gen.go | 11 +- .../{genericopt_posix.go => genericopt.go} | 36 +- .../golang.org/x/net/ipv4/genericopt_stub.go | 29 - fn/vendor/golang.org/x/net/ipv4/header.go | 109 +- .../golang.org/x/net/ipv4/header_test.go | 291 +- fn/vendor/golang.org/x/net/ipv4/helper.go | 23 +- .../golang.org/x/net/ipv4/helper_stub.go | 23 - .../golang.org/x/net/ipv4/helper_unix.go | 50 - .../golang.org/x/net/ipv4/helper_windows.go | 49 - fn/vendor/golang.org/x/net/ipv4/icmp.go | 4 +- fn/vendor/golang.org/x/net/ipv4/icmp_linux.go | 8 +- fn/vendor/golang.org/x/net/ipv4/icmp_stub.go | 12 +- .../x/net/ipv4/mocktransponder_test.go | 21 - .../golang.org/x/net/ipv4/multicast_test.go | 8 +- .../x/net/ipv4/multicastlistener_test.go | 38 +- .../x/net/ipv4/multicastsockopt_test.go | 6 +- fn/vendor/golang.org/x/net/ipv4/packet.go | 58 +- .../golang.org/x/net/ipv4/packet_go1_8.go | 56 + .../golang.org/x/net/ipv4/packet_go1_9.go | 67 + fn/vendor/golang.org/x/net/ipv4/payload.go | 14 +- .../golang.org/x/net/ipv4/payload_cmsg.go | 61 +- .../x/net/ipv4/payload_cmsg_go1_8.go | 59 + .../x/net/ipv4/payload_cmsg_go1_9.go | 67 + .../golang.org/x/net/ipv4/payload_nocmsg.go | 12 +- .../x/net/ipv4/readwrite_go1_8_test.go | 248 + .../x/net/ipv4/readwrite_go1_9_test.go | 388 + .../golang.org/x/net/ipv4/readwrite_test.go | 112 +- fn/vendor/golang.org/x/net/ipv4/sockopt.go | 14 +- .../x/net/ipv4/sockopt_asmreq_stub.go | 21 - .../x/net/ipv4/sockopt_asmreq_unix.go | 46 - .../x/net/ipv4/sockopt_asmreq_windows.go | 45 - .../x/net/ipv4/sockopt_asmreqn_stub.go | 17 - .../golang.org/x/net/ipv4/sockopt_posix.go | 71 + .../x/net/ipv4/sockopt_ssmreq_stub.go | 17 - .../x/net/ipv4/sockopt_ssmreq_unix.go | 61 - .../golang.org/x/net/ipv4/sockopt_stub.go | 37 +- .../golang.org/x/net/ipv4/sockopt_unix.go | 122 - .../golang.org/x/net/ipv4/sockopt_windows.go | 68 - .../ipv4/{sockopt_asmreq.go => sys_asmreq.go} | 44 +- .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 25 + ...sockopt_asmreqn_unix.go => sys_asmreqn.go} | 20 +- .../sys_asmreqn_stub.go} | 14 +- fn/vendor/golang.org/x/net/ipv4/sys_bpf.go | 23 + .../golang.org/x/net/ipv4/sys_bpf_stub.go | 16 + fn/vendor/golang.org/x/net/ipv4/sys_bsd.go | 31 +- fn/vendor/golang.org/x/net/ipv4/sys_darwin.go | 99 +- .../golang.org/x/net/ipv4/sys_dragonfly.go | 35 + .../golang.org/x/net/ipv4/sys_freebsd.go | 55 +- fn/vendor/golang.org/x/net/ipv4/sys_linux.go | 52 +- .../golang.org/x/net/ipv4/sys_openbsd.go | 34 - .../golang.org/x/net/ipv4/sys_solaris.go | 57 + fn/vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 21 + fn/vendor/golang.org/x/net/ipv4/sys_stub.go | 6 +- .../golang.org/x/net/ipv4/sys_windows.go | 36 +- .../x/net/ipv4/syscall_linux_386.go | 31 - .../golang.org/x/net/ipv4/syscall_unix.go | 26 - .../golang.org/x/net/ipv4/thunk_linux_386.s | 8 - .../golang.org/x/net/ipv4/unicast_test.go | 23 +- .../x/net/ipv4/unicastsockopt_test.go | 23 +- .../golang.org/x/net/ipv4/zsys_darwin.go | 32 +- .../golang.org/x/net/ipv4/zsys_dragonfly.go | 6 +- .../golang.org/x/net/ipv4/zsys_freebsd_386.go | 34 +- .../x/net/ipv4/zsys_freebsd_amd64.go | 34 +- .../golang.org/x/net/ipv4/zsys_freebsd_arm.go | 34 +- .../golang.org/x/net/ipv4/zsys_linux_386.go | 64 +- .../golang.org/x/net/ipv4/zsys_linux_amd64.go | 64 +- .../golang.org/x/net/ipv4/zsys_linux_arm.go | 64 +- .../golang.org/x/net/ipv4/zsys_linux_arm64.go | 66 +- .../golang.org/x/net/ipv4/zsys_linux_mips.go | 148 + .../x/net/ipv4/zsys_linux_mips64.go | 66 +- .../x/net/ipv4/zsys_linux_mips64le.go | 66 +- .../x/net/ipv4/zsys_linux_mipsle.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_ppc.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_ppc64.go | 66 +- .../x/net/ipv4/zsys_linux_ppc64le.go | 66 +- .../golang.org/x/net/ipv4/zsys_linux_s390x.go | 150 + .../golang.org/x/net/ipv4/zsys_netbsd.go | 4 +- .../golang.org/x/net/ipv4/zsys_openbsd.go | 4 +- .../golang.org/x/net/ipv4/zsys_solaris.go | 94 +- fn/vendor/golang.org/x/net/ipv6/batch.go | 119 + fn/vendor/golang.org/x/net/ipv6/bpf_test.go | 96 + fn/vendor/golang.org/x/net/ipv6/control.go | 106 +- .../x/net/ipv6/control_rfc2292_unix.go | 36 +- .../x/net/ipv6/control_rfc3542_unix.go | 79 +- .../golang.org/x/net/ipv6/control_stub.go | 20 +- .../golang.org/x/net/ipv6/control_test.go | 21 + .../golang.org/x/net/ipv6/control_unix.go | 135 +- .../golang.org/x/net/ipv6/control_windows.go | 25 +- .../golang.org/x/net/ipv6/defs_darwin.go | 32 +- .../golang.org/x/net/ipv6/defs_dragonfly.go | 20 +- .../golang.org/x/net/ipv6/defs_freebsd.go | 32 +- fn/vendor/golang.org/x/net/ipv6/defs_linux.go | 47 +- .../golang.org/x/net/ipv6/defs_netbsd.go | 20 +- .../golang.org/x/net/ipv6/defs_openbsd.go | 20 +- .../golang.org/x/net/ipv6/defs_solaris.go | 38 +- .../ipv6/{dgramopt_posix.go => dgramopt.go} | 152 +- .../golang.org/x/net/ipv6/dgramopt_stub.go | 119 - fn/vendor/golang.org/x/net/ipv6/doc.go | 33 +- fn/vendor/golang.org/x/net/ipv6/endpoint.go | 45 +- fn/vendor/golang.org/x/net/ipv6/gen.go | 11 +- .../{genericopt_posix.go => genericopt.go} | 36 +- .../golang.org/x/net/ipv6/genericopt_stub.go | 30 - fn/vendor/golang.org/x/net/ipv6/header.go | 3 +- fn/vendor/golang.org/x/net/ipv6/helper.go | 20 +- .../golang.org/x/net/ipv6/helper_stub.go | 19 - .../golang.org/x/net/ipv6/helper_unix.go | 46 - .../golang.org/x/net/ipv6/helper_windows.go | 45 - fn/vendor/golang.org/x/net/ipv6/icmp.go | 7 +- fn/vendor/golang.org/x/net/ipv6/icmp_bsd.go | 10 +- fn/vendor/golang.org/x/net/ipv6/icmp_linux.go | 10 +- .../golang.org/x/net/ipv6/icmp_solaris.go | 27 +- fn/vendor/golang.org/x/net/ipv6/icmp_stub.go | 14 +- fn/vendor/golang.org/x/net/ipv6/icmp_test.go | 6 +- .../golang.org/x/net/ipv6/icmp_windows.go | 14 +- .../x/net/ipv6/mocktransponder_test.go | 2 +- .../golang.org/x/net/ipv6/multicast_test.go | 24 +- .../x/net/ipv6/multicastlistener_test.go | 41 +- .../x/net/ipv6/multicastsockopt_test.go | 4 +- fn/vendor/golang.org/x/net/ipv6/payload.go | 14 +- .../golang.org/x/net/ipv6/payload_cmsg.go | 49 +- .../x/net/ipv6/payload_cmsg_go1_8.go | 55 + .../x/net/ipv6/payload_cmsg_go1_9.go | 57 + .../golang.org/x/net/ipv6/payload_nocmsg.go | 10 +- .../x/net/ipv6/readwrite_go1_8_test.go | 242 + .../x/net/ipv6/readwrite_go1_9_test.go | 373 + .../golang.org/x/net/ipv6/readwrite_test.go | 123 +- fn/vendor/golang.org/x/net/ipv6/sockopt.go | 15 +- .../x/net/ipv6/sockopt_asmreq_unix.go | 22 - .../x/net/ipv6/sockopt_asmreq_windows.go | 21 - .../golang.org/x/net/ipv6/sockopt_posix.go | 87 + .../x/net/ipv6/sockopt_ssmreq_unix.go | 59 - .../golang.org/x/net/ipv6/sockopt_stub.go | 41 +- .../golang.org/x/net/ipv6/sockopt_test.go | 8 +- .../golang.org/x/net/ipv6/sockopt_unix.go | 122 - .../golang.org/x/net/ipv6/sockopt_windows.go | 86 - fn/vendor/golang.org/x/net/ipv6/sys_asmreq.go | 24 + .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 17 + fn/vendor/golang.org/x/net/ipv6/sys_bpf.go | 23 + .../golang.org/x/net/ipv6/sys_bpf_stub.go | 16 + fn/vendor/golang.org/x/net/ipv6/sys_bsd.go | 49 +- fn/vendor/golang.org/x/net/ipv6/sys_darwin.go | 135 +- .../golang.org/x/net/ipv6/sys_freebsd.go | 73 +- fn/vendor/golang.org/x/net/ipv6/sys_linux.go | 64 +- .../golang.org/x/net/ipv6/sys_solaris.go | 74 + fn/vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 21 + fn/vendor/golang.org/x/net/ipv6/sys_stub.go | 6 +- .../golang.org/x/net/ipv6/sys_windows.go | 40 +- .../x/net/ipv6/syscall_linux_386.go | 31 - .../golang.org/x/net/ipv6/syscall_unix.go | 26 - .../golang.org/x/net/ipv6/unicast_test.go | 26 +- .../x/net/ipv6/unicastsockopt_test.go | 21 +- .../golang.org/x/net/ipv6/zsys_darwin.go | 34 +- .../golang.org/x/net/ipv6/zsys_dragonfly.go | 24 +- .../golang.org/x/net/ipv6/zsys_freebsd_386.go | 40 +- .../x/net/ipv6/zsys_freebsd_amd64.go | 40 +- .../golang.org/x/net/ipv6/zsys_freebsd_arm.go | 40 +- .../golang.org/x/net/ipv6/zsys_linux_386.go | 62 +- .../golang.org/x/net/ipv6/zsys_linux_amd64.go | 62 +- .../golang.org/x/net/ipv6/zsys_linux_arm.go | 62 +- .../golang.org/x/net/ipv6/zsys_linux_arm64.go | 64 +- .../golang.org/x/net/ipv6/zsys_linux_mips.go | 170 + .../x/net/ipv6/zsys_linux_mips64.go | 64 +- .../x/net/ipv6/zsys_linux_mips64le.go | 64 +- .../x/net/ipv6/zsys_linux_mipsle.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_ppc.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_ppc64.go | 64 +- .../x/net/ipv6/zsys_linux_ppc64le.go | 64 +- .../golang.org/x/net/ipv6/zsys_linux_s390x.go | 172 + .../golang.org/x/net/ipv6/zsys_netbsd.go | 22 +- .../golang.org/x/net/ipv6/zsys_openbsd.go | 22 +- .../golang.org/x/net/ipv6/zsys_solaris.go | 52 +- .../golang.org/x/net/lex/httplex/httplex.go | 351 + .../x/net/lex/httplex/httplex_test.go | 119 + fn/vendor/golang.org/x/net/lif/address.go | 105 + .../golang.org/x/net/lif/address_test.go | 123 + fn/vendor/golang.org/x/net/lif/binary.go | 115 + .../golang.org/x/net/lif/defs_solaris.go | 90 + fn/vendor/golang.org/x/net/lif/lif.go | 43 + fn/vendor/golang.org/x/net/lif/link.go | 126 + fn/vendor/golang.org/x/net/lif/link_test.go | 63 + fn/vendor/golang.org/x/net/lif/sys.go | 21 + .../golang.org/x/net/lif/sys_solaris_amd64.s | 8 + fn/vendor/golang.org/x/net/lif/syscall.go | 28 + .../x/net/lif/zsys_solaris_amd64.go | 103 + .../golang.org/x/net/nettest/conntest.go | 456 + .../golang.org/x/net/nettest/conntest_go16.go | 24 + .../golang.org/x/net/nettest/conntest_go17.go | 24 + .../golang.org/x/net/nettest/conntest_test.go | 76 + fn/vendor/golang.org/x/net/netutil/listen.go | 2 +- .../golang.org/x/net/netutil/listen_test.go | 2 +- fn/vendor/golang.org/x/net/proxy/proxy.go | 44 +- .../golang.org/x/net/proxy/proxy_test.go | 73 + fn/vendor/golang.org/x/net/proxy/socks5.go | 59 +- .../golang.org/x/net/publicsuffix/gen.go | 305 +- .../golang.org/x/net/publicsuffix/list.go | 2 + .../x/net/publicsuffix/list_test.go | 14 +- .../golang.org/x/net/publicsuffix/table.go | 17894 ++++++++-------- .../x/net/publicsuffix/table_test.go | 1556 +- fn/vendor/golang.org/x/net/route/address.go | 425 + .../x/net/route/address_darwin_test.go | 63 + .../golang.org/x/net/route/address_test.go | 103 + fn/vendor/golang.org/x/net/route/binary.go | 90 + .../golang.org/x/net/route/defs_darwin.go | 114 + .../golang.org/x/net/route/defs_dragonfly.go | 113 + .../golang.org/x/net/route/defs_freebsd.go | 337 + .../golang.org/x/net/route/defs_netbsd.go | 112 + .../golang.org/x/net/route/defs_openbsd.go | 105 + fn/vendor/golang.org/x/net/route/interface.go | 64 + .../x/net/route/interface_announce.go | 32 + .../x/net/route/interface_classic.go | 66 + .../x/net/route/interface_freebsd.go | 78 + .../x/net/route/interface_multicast.go | 30 + .../x/net/route/interface_openbsd.go | 90 + fn/vendor/golang.org/x/net/route/message.go | 72 + .../x/net/route/message_darwin_test.go | 34 + .../x/net/route/message_freebsd_test.go | 92 + .../golang.org/x/net/route/message_test.go | 239 + fn/vendor/golang.org/x/net/route/route.go | 123 + .../golang.org/x/net/route/route_classic.go | 67 + .../golang.org/x/net/route/route_openbsd.go | 65 + .../golang.org/x/net/route/route_test.go | 386 + fn/vendor/golang.org/x/net/route/sys.go | 39 + .../golang.org/x/net/route/sys_darwin.go | 87 + .../golang.org/x/net/route/sys_dragonfly.go | 76 + .../golang.org/x/net/route/sys_freebsd.go | 155 + .../golang.org/x/net/route/sys_netbsd.go | 71 + .../golang.org/x/net/route/sys_openbsd.go | 79 + fn/vendor/golang.org/x/net/route/syscall.go | 28 + .../golang.org/x/net/route/zsys_darwin.go | 99 + .../golang.org/x/net/route/zsys_dragonfly.go | 98 + .../x/net/route/zsys_freebsd_386.go | 126 + .../x/net/route/zsys_freebsd_amd64.go | 123 + .../x/net/route/zsys_freebsd_arm.go | 123 + .../golang.org/x/net/route/zsys_netbsd.go | 97 + .../golang.org/x/net/route/zsys_openbsd.go | 90 + fn/vendor/golang.org/x/net/trace/events.go | 26 +- fn/vendor/golang.org/x/net/trace/histogram.go | 15 +- fn/vendor/golang.org/x/net/trace/trace.go | 131 +- .../golang.org/x/net/trace/trace_go16.go | 21 + .../golang.org/x/net/trace/trace_go17.go | 21 + .../golang.org/x/net/trace/trace_test.go | 132 + fn/vendor/golang.org/x/net/webdav/file.go | 63 +- .../golang.org/x/net/webdav/file_go1.6.go | 17 + .../golang.org/x/net/webdav/file_go1.7.go | 16 + .../golang.org/x/net/webdav/file_test.go | 85 +- .../x/net/webdav/internal/xml/example_test.go | 2 +- .../x/net/webdav/internal/xml/marshal.go | 8 +- .../x/net/webdav/internal/xml/marshal_test.go | 2 +- .../x/net/webdav/internal/xml/read.go | 10 +- .../x/net/webdav/internal/xml/typeinfo.go | 2 +- .../x/net/webdav/internal/xml/xml.go | 6 +- .../x/net/webdav/internal/xml/xml_test.go | 2 +- fn/vendor/golang.org/x/net/webdav/prop.go | 93 +- .../golang.org/x/net/webdav/prop_test.go | 60 +- fn/vendor/golang.org/x/net/webdav/webdav.go | 79 +- .../golang.org/x/net/webdav/webdav_test.go | 192 +- fn/vendor/golang.org/x/net/webdav/xml.go | 156 +- fn/vendor/golang.org/x/net/webdav/xml_test.go | 47 +- .../golang.org/x/net/websocket/client.go | 15 +- fn/vendor/golang.org/x/net/websocket/dial.go | 24 + .../golang.org/x/net/websocket/dial_test.go | 43 + fn/vendor/golang.org/x/net/websocket/hybi.go | 3 - .../golang.org/x/net/websocket/websocket.go | 44 +- .../x/net/websocket/websocket_test.go | 78 + fn/vendor/golang.org/x/net/xsrftoken/xsrf.go | 14 +- .../golang.org/x/net/xsrftoken/xsrf_test.go | 2 +- .../golang.org/x/sys/unix/linux/types.go | 3 + fn/vendor/golang.org/x/sys/unix/mkerrors.sh | 2 + .../x/sys/unix/zerrors_linux_386.go | 10 + .../x/sys/unix/zerrors_linux_amd64.go | 10 + .../x/sys/unix/zerrors_linux_arm.go | 10 + .../x/sys/unix/zerrors_linux_arm64.go | 10 + .../x/sys/unix/zerrors_linux_mips.go | 10 + .../x/sys/unix/zerrors_linux_mips64.go | 10 + .../x/sys/unix/zerrors_linux_mips64le.go | 10 + .../x/sys/unix/zerrors_linux_mipsle.go | 10 + .../x/sys/unix/zerrors_linux_ppc64.go | 10 + .../x/sys/unix/zerrors_linux_ppc64le.go | 10 + .../x/sys/unix/zerrors_linux_s390x.go | 10 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 2 + .../x/sys/unix/ztypes_linux_amd64.go | 2 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2 + .../x/sys/unix/ztypes_linux_arm64.go | 2 + .../x/sys/unix/ztypes_linux_mips.go | 2 + .../x/sys/unix/ztypes_linux_mips64.go | 2 + .../x/sys/unix/ztypes_linux_mips64le.go | 2 + .../x/sys/unix/ztypes_linux_mipsle.go | 2 + .../x/sys/unix/ztypes_linux_ppc64.go | 2 + .../x/sys/unix/ztypes_linux_ppc64le.go | 2 + .../x/sys/unix/ztypes_linux_s390x.go | 2 + fn/version.go | 2 +- 2041 files changed, 87557 insertions(+), 107499 deletions(-) create mode 100644 fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/paginators-1.json create mode 100644 fn/vendor/github.com/docker/docker/api/errors/errors_test.go create mode 100644 fn/vendor/github.com/docker/docker/api/server/backend/build/backend.go create mode 100644 fn/vendor/github.com/docker/docker/api/server/backend/build/tag.go create mode 100644 fn/vendor/github.com/docker/docker/api/server/httputils/httputils_test.go delete mode 100644 fn/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go create mode 100644 fn/vendor/github.com/docker/docker/api/server/middleware/debug_test.go create mode 100644 fn/vendor/github.com/docker/docker/api/server/router/distribution/backend.go create mode 100644 fn/vendor/github.com/docker/docker/api/server/router/distribution/distribution.go create mode 100644 fn/vendor/github.com/docker/docker/api/server/router/distribution/distribution_routes.go create mode 100644 fn/vendor/github.com/docker/docker/api/server/router/session/backend.go create mode 100644 fn/vendor/github.com/docker/docker/api/server/router/session/session.go create mode 100644 fn/vendor/github.com/docker/docker/api/server/router/session/session_routes.go create mode 100644 fn/vendor/github.com/docker/docker/api/types/backend/build.go create mode 100644 fn/vendor/github.com/docker/docker/api/types/container/waitcondition.go create mode 100644 fn/vendor/github.com/docker/docker/api/types/swarm/config.go create mode 100644 fn/vendor/github.com/docker/docker/api/types/swarm/runtime.go create mode 100644 fn/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go create mode 100644 fn/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go create mode 100644 fn/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto create mode 100644 fn/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go create mode 100644 fn/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go create mode 100644 fn/vendor/github.com/docker/docker/builder/dockerfile/copy.go create mode 100644 fn/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go create mode 100644 fn/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go create mode 100644 fn/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go create mode 100644 fn/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go create mode 100644 fn/vendor/github.com/docker/docker/builder/dockerfile/metrics.go create mode 100644 fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile create mode 100644 fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continue-at-eof/result delete mode 100644 fn/vendor/github.com/docker/docker/builder/dockerignore.go create mode 100644 fn/vendor/github.com/docker/docker/builder/fscache/fscache.go create mode 100644 fn/vendor/github.com/docker/docker/builder/fscache/fscache_test.go create mode 100644 fn/vendor/github.com/docker/docker/builder/fscache/naivedriver.go create mode 100644 fn/vendor/github.com/docker/docker/builder/remotecontext/archive.go create mode 100644 fn/vendor/github.com/docker/docker/builder/remotecontext/detect.go rename fn/vendor/github.com/docker/docker/builder/{dockerignore_test.go => remotecontext/detect_test.go} (62%) create mode 100644 fn/vendor/github.com/docker/docker/builder/remotecontext/generate.go rename fn/vendor/github.com/docker/docker/builder/{ => remotecontext}/git.go (61%) create mode 100644 fn/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go create mode 100644 fn/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go rename fn/vendor/github.com/docker/docker/{pkg/httputils => builder/remotecontext}/mimetype.go (73%) create mode 100644 fn/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go rename fn/vendor/github.com/docker/docker/builder/{ => remotecontext}/remote.go (63%) rename fn/vendor/github.com/docker/docker/builder/{ => remotecontext}/remote_test.go (75%) create mode 100644 fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go create mode 100644 fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go create mode 100644 fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto create mode 100644 fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum_test.go rename fn/vendor/github.com/docker/docker/builder/{ => remotecontext}/utils_test.go (92%) delete mode 100644 fn/vendor/github.com/docker/docker/builder/tarsum.go delete mode 100644 fn/vendor/github.com/docker/docker/builder/tarsum_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/checkpoint/create.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/checkpoint/list.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/cli.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/commands/commands.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/attach.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/commit.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/cp.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/create.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/diff.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/exec.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/exec_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/export.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/hijack.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/inspect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/kill.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/list.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/logs.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/opts.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/opts_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/pause.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/port.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/prune.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/ps_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/rename.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/restart.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/rm.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/run.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/start.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/stats.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/stop.go delete mode 100755 fn/vendor/github.com/docker/docker/cli/command/container/testdata/utf16.env delete mode 100755 fn/vendor/github.com/docker/docker/cli/command/container/testdata/utf16be.env delete mode 100755 fn/vendor/github.com/docker/docker/cli/command/container/testdata/utf8.env delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/testdata/valid.env delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/testdata/valid.label delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/top.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/tty.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/unpause.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/update.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/utils.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/container/wait.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/events_utils.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/checkpoint.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/checkpoint_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/container.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/container_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/custom.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/diff.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/diff_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/disk_usage_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/formatter.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/image.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/image_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/network.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/network_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/node.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/node_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/plugin.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/plugin_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/reflect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/secret.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/secret_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/service.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/service_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/stats.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/task.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/task_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/volume.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/idresolver/client_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/idresolver/idresolver_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/build.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/build/context.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/build/context_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/build/context_unix.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/build/context_windows.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/history.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/import.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/inspect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/list.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/load.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/prune.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/pull.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/push.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/remove.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/save.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/tag.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/trust.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/image/trust_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/in.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/inspect/inspector.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/network/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/network/connect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/network/create.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/network/disconnect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/network/inspect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/network/list.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/network/prune.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/network/remove.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/client_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/demote.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/demote_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/inspect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/inspect_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/list.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/list_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/opts.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/promote.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/promote_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/ps.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/ps_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/remove.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/remove_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.manager.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.simple.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-ps.simple.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-ps.with-errors.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/update.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/node/update_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/out.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/create.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/disable.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/enable.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/inspect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/install.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/list.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/push.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/remove.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/set.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/prune/prune.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/registry.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/registry/login.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/registry/logout.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/registry/search.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/client_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/create.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/create_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/inspect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/inspect_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/ls.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/ls_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/remove.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/remove_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-create-with-name.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-config-format.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-filter.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-format.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-quiet-option.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/create.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/helpers.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/inspect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/inspect_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/list.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/logs.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/opts.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/opts_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/parse.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/progress/progress.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/ps.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/remove.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/scale.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/trust.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/update.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/service/update_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/client_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/common.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/deploy.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/deploy_composefile.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/deploy_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/list.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/opts.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/ps.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/remove.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/remove_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/stack/services.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/client_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/init.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/init_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/join.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/join_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/join_token.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/join_token_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/leave.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/leave_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/opts.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/init-init-autolock.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/init-init.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager-quiet.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager-rotate.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-worker-quiet.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-worker.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-all-flags-quiet.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-autolock-unlock-key.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-noargs.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/unlock.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_key_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/update.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/swarm/update_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/system/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/system/df.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/system/events.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/system/info.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/system/inspect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/system/prune.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/system/version.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/task/print.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/trust.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/utils.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/client_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/cmd.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/create.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/create_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/inspect.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/inspect_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/list.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/list_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/prune.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/prune_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/remove.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/remove_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-inspect-with-format.json-template.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-inspect-with-format.simple-template.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-inspect-without-format.multiple-volume-with-labels.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-inspect-without-format.single-volume.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-list-with-config-format.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-list-with-format.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-list-without-format.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-prune-no.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-prune-yes.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-prune.deletedVolumes.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/command/volume/testdata/volume-prune.empty.golden delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/convert/compose.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/convert/compose_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/convert/service.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/convert/service_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/convert/volume.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/convert/volume_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/loader/example1.env delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/loader/example2.env delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/loader/full-example.yml delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/loader/loader.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/loader/loader_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/loader/volume.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/loader/volume_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/schema/bindata.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/schema/schema.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/schema/schema_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/template/template.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/template/template_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/compose/types/types.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/config.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/config_test.go create mode 100644 fn/vendor/github.com/docker/docker/cli/config/configdir.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/configfile/file.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/configfile/file_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/credentials/credentials.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/credentials/default_store.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/credentials/default_store_darwin.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/credentials/default_store_linux.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/credentials/default_store_unsupported.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/credentials/default_store_windows.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/credentials/file_store.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/credentials/file_store_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/credentials/native_store.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/config/credentials/native_store_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/flags/client.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/internal/test/builders/doc.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/internal/test/builders/node.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/internal/test/builders/secret.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/internal/test/builders/service.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/internal/test/builders/swarm.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/internal/test/builders/task.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/internal/test/builders/volume.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/internal/test/cli.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/internal/test/doc.go delete mode 100644 fn/vendor/github.com/docker/docker/cli/trust/trust.go create mode 100644 fn/vendor/github.com/docker/docker/client/build_prune.go create mode 100644 fn/vendor/github.com/docker/docker/client/config_create.go create mode 100644 fn/vendor/github.com/docker/docker/client/config_create_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/config_inspect.go create mode 100644 fn/vendor/github.com/docker/docker/client/config_inspect_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/config_list.go create mode 100644 fn/vendor/github.com/docker/docker/client/config_list_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/config_remove.go create mode 100644 fn/vendor/github.com/docker/docker/client/config_remove_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/config_update.go create mode 100644 fn/vendor/github.com/docker/docker/client/config_update_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/disk_usage_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/distribution_inspect.go create mode 100644 fn/vendor/github.com/docker/docker/client/distribution_inspect_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/parse_logs.go create mode 100644 fn/vendor/github.com/docker/docker/client/parse_logs_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/ping_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/session.go create mode 100644 fn/vendor/github.com/docker/docker/client/session/filesync/diffcopy.go create mode 100644 fn/vendor/github.com/docker/docker/client/session/filesync/filesync.go create mode 100644 fn/vendor/github.com/docker/docker/client/session/filesync/filesync.pb.go create mode 100644 fn/vendor/github.com/docker/docker/client/session/filesync/filesync.proto create mode 100644 fn/vendor/github.com/docker/docker/client/session/filesync/filesync_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/session/filesync/generate.go create mode 100644 fn/vendor/github.com/docker/docker/client/session/filesync/tarstream.go create mode 100644 fn/vendor/github.com/docker/docker/client/session/grpc.go create mode 100644 fn/vendor/github.com/docker/docker/client/session/manager.go create mode 100644 fn/vendor/github.com/docker/docker/client/session/session.go create mode 100644 fn/vendor/github.com/docker/docker/client/session/testutil/testutil.go create mode 100644 fn/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go create mode 100644 fn/vendor/github.com/docker/docker/client/swarm_unlock_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cmd/docker/daemon_none.go delete mode 100644 fn/vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cmd/docker/daemon_unix.go delete mode 100644 fn/vendor/github.com/docker/docker/cmd/docker/docker.go delete mode 100644 fn/vendor/github.com/docker/docker/cmd/docker/docker_test.go delete mode 100644 fn/vendor/github.com/docker/docker/cmd/docker/docker_windows.go rename fn/vendor/github.com/docker/docker/{cli/flags/common.go => cmd/dockerd/options.go} (62%) rename fn/vendor/github.com/docker/docker/{cli/flags/common_test.go => cmd/dockerd/options_test.go} (57%) create mode 100644 fn/vendor/github.com/docker/docker/container/view.go create mode 100644 fn/vendor/github.com/docker/docker/container/view_test.go create mode 100644 fn/vendor/github.com/docker/docker/contrib/builder/rpm/ppc64le/centos-7/Dockerfile create mode 100644 fn/vendor/github.com/docker/docker/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile create mode 100755 fn/vendor/github.com/docker/docker/contrib/builder/rpm/s390x/build.sh create mode 100644 fn/vendor/github.com/docker/docker/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile create mode 100755 fn/vendor/github.com/docker/docker/contrib/builder/rpm/s390x/generate.sh create mode 100644 fn/vendor/github.com/docker/docker/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile delete mode 100644 fn/vendor/github.com/docker/docker/contrib/completion/REVIEWERS delete mode 100644 fn/vendor/github.com/docker/docker/contrib/completion/bash/docker delete mode 100644 fn/vendor/github.com/docker/docker/contrib/completion/fish/docker.fish delete mode 100644 fn/vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt delete mode 100644 fn/vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS delete mode 100644 fn/vendor/github.com/docker/docker/contrib/completion/zsh/_docker delete mode 100755 fn/vendor/github.com/docker/docker/contrib/mkimage-busybox.sh delete mode 100755 fn/vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh delete mode 100755 fn/vendor/github.com/docker/docker/contrib/mkimage-rinse.sh delete mode 100644 fn/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE delete mode 100644 fn/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile delete mode 100644 fn/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc delete mode 100644 fn/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if delete mode 100644 fn/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te delete mode 100644 fn/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz delete mode 100644 fn/vendor/github.com/docker/docker/contrib/syscall-test/appletalk.c create mode 100644 fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/build.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/cluster/configs.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller_test.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/cluster/convert/config.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/configs.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/configs_linux.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/configs_unsupported.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/configs_windows.go delete mode 100644 fn/vendor/github.com/docker/docker/daemon/debugtrap.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/dependency.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/device_setup.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go rename fn/vendor/github.com/docker/docker/{pkg/ioutils => daemon/logger/jsonfilelog/multireader}/multireader.go (96%) rename fn/vendor/github.com/docker/docker/{pkg/ioutils => daemon/logger/jsonfilelog/multireader}/multireader_test.go (99%) create mode 100644 fn/vendor/github.com/docker/docker/daemon/metrics_unix.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/metrics_unsupported.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/secrets_windows.go create mode 100644 fn/vendor/github.com/docker/docker/daemon/volumes_unix_test.go delete mode 100644 fn/vendor/github.com/docker/docker/docs/README.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/deprecated.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/EBS_volume.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/config.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/images/authz_allow.png delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/images/authz_chunked.png delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/images/authz_deny.png delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/index.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/plugin_api.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/plugins_logging.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/plugins_network.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/plugins_services.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/extend/plugins_volume.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/builder.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/attach.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/build.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/cli.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/commit.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/container.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/cp.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/create.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/diff.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/dockerd.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/events.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/exec.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/export.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/history.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/image.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/image_prune.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/images.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/import.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/index.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/info.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/kill.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/load.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/login.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/logout.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/logs.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/network.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/node.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/pause.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/port.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/ps.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/pull.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/push.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/rename.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/restart.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/rm.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/run.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/save.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/search.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/secret.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/service.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/stack.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/start.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/stats.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/stop.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/system.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/tag.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/top.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/update.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/version.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/volume.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/commandline/wait.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/glossary.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/index.md delete mode 100644 fn/vendor/github.com/docker/docker/docs/reference/run.md create mode 100644 fn/vendor/github.com/docker/docker/docs/static_files/moby-project-logo.png delete mode 100644 fn/vendor/github.com/docker/docker/docs/yaml/Dockerfile delete mode 100644 fn/vendor/github.com/docker/docker/docs/yaml/generate.go delete mode 100644 fn/vendor/github.com/docker/docker/docs/yaml/yaml.go delete mode 100644 fn/vendor/github.com/docker/docker/experimental/README.md delete mode 100644 fn/vendor/github.com/docker/docker/experimental/checkpoint-restore.md delete mode 100644 fn/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.svg delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.svg delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.svg delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.svg delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png delete mode 100644 fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg delete mode 100644 fn/vendor/github.com/docker/docker/experimental/vlan-networks.md create mode 100644 fn/vendor/github.com/docker/docker/hack/README.md delete mode 100644 fn/vendor/github.com/docker/docker/hack/make/binary-client delete mode 100644 fn/vendor/github.com/docker/docker/hack/make/dynbinary-client mode change 100644 => 100755 fn/vendor/github.com/docker/docker/hack/make/install-binary delete mode 100644 fn/vendor/github.com/docker/docker/hack/make/install-binary-client delete mode 100644 fn/vendor/github.com/docker/docker/hack/make/yaml-docs-generator delete mode 100755 fn/vendor/github.com/docker/docker/hack/validate/compose-bindata delete mode 100755 fn/vendor/github.com/docker/docker/hooks/post_build create mode 100644 fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakecontext/context.go create mode 100644 fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakegit/fakegit.go rename fn/vendor/github.com/docker/docker/integration-cli/{fixtures_test.go => cli/build/fakestorage/fixtures.go} (88%) create mode 100644 fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/storage.go create mode 100644 fn/vendor/github.com/docker/docker/integration-cli/docker_api_session_test.go create mode 100644 fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_config_test.go create mode 100644 fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_create_test.go create mode 100644 fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_inspect_test.go create mode 100644 fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_ls_test.go rename fn/vendor/github.com/docker/docker/integration-cli/{docker_cli_cp_utils.go => docker_cli_cp_utils_test.go} (78%) create mode 100644 fn/vendor/github.com/docker/docker/integration-cli/fixtures/plugin/basic/basic.go create mode 100644 fn/vendor/github.com/docker/docker/integration-cli/fixtures/plugin/plugin.go delete mode 100644 fn/vendor/github.com/docker/docker/keys/launchpad-ppa-zfs.asc create mode 100644 fn/vendor/github.com/docker/docker/layer/filestore_unix.go create mode 100644 fn/vendor/github.com/docker/docker/layer/filestore_windows.go create mode 100644 fn/vendor/github.com/docker/docker/layer/ro_layer_unix.go create mode 100644 fn/vendor/github.com/docker/docker/libcontainerd/queue_unix_test.go delete mode 100644 fn/vendor/github.com/docker/docker/man/Dockerfile delete mode 100644 fn/vendor/github.com/docker/docker/man/Dockerfile.5.md delete mode 100644 fn/vendor/github.com/docker/docker/man/Dockerfile.aarch64 delete mode 100644 fn/vendor/github.com/docker/docker/man/Dockerfile.armhf delete mode 100644 fn/vendor/github.com/docker/docker/man/Dockerfile.ppc64le delete mode 100644 fn/vendor/github.com/docker/docker/man/Dockerfile.s390x delete mode 100644 fn/vendor/github.com/docker/docker/man/README.md delete mode 100644 fn/vendor/github.com/docker/docker/man/docker-build.1.md delete mode 100644 fn/vendor/github.com/docker/docker/man/docker-config-json.5.md delete mode 100644 fn/vendor/github.com/docker/docker/man/docker-run.1.md delete mode 100644 fn/vendor/github.com/docker/docker/man/docker.1.md delete mode 100644 fn/vendor/github.com/docker/docker/man/dockerd.8.md delete mode 100644 fn/vendor/github.com/docker/docker/man/generate.go delete mode 100755 fn/vendor/github.com/docker/docker/man/generate.sh delete mode 100644 fn/vendor/github.com/docker/docker/man/glide.lock delete mode 100644 fn/vendor/github.com/docker/docker/man/glide.yaml delete mode 100755 fn/vendor/github.com/docker/docker/man/md2man-all.sh delete mode 100644 fn/vendor/github.com/docker/docker/man/src/attach.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/commit.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/attach.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/commit.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/cp.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/create-example.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/create.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/diff.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/exec.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/export.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/kill.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/logs.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/ls.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/pause.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/port.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/rename.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/restart.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/rm.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/run.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/start.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/stats.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/stop.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/top.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/unpause.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/update.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/container/wait.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/cp.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/create.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/diff.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/events.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/exec.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/export.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/history.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/image/build.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/image/history.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/image/import.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/image/load.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/image/ls.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/image/pull.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/image/push.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/image/rm.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/image/save.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/image/tag.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/images.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/import.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/info.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/inspect.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/kill.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/load.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/login.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/logout.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/logs.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/network/connect.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/network/create.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/network/disconnect.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/network/inspect.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/network/ls.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/network/rm.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/pause.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/plugin/ls.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/port.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/ps.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/pull.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/push.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/rename.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/restart.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/rm.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/rmi.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/save.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/search.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/start.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/stats.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/stop.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/system/events.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/system/info.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/tag.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/top.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/unpause.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/update.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/version.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/volume.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/volume/create.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/volume/inspect.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/volume/ls.md delete mode 100644 fn/vendor/github.com/docker/docker/man/src/wait.md rename fn/vendor/github.com/docker/docker/oci/{defaults_linux.go => defaults.go} (60%) delete mode 100644 fn/vendor/github.com/docker/docker/oci/defaults_solaris.go delete mode 100644 fn/vendor/github.com/docker/docker/oci/defaults_windows.go delete mode 100644 fn/vendor/github.com/docker/docker/opts/mount.go delete mode 100644 fn/vendor/github.com/docker/docker/opts/mount_test.go delete mode 100644 fn/vendor/github.com/docker/docker/opts/port.go delete mode 100644 fn/vendor/github.com/docker/docker/opts/port_test.go delete mode 100644 fn/vendor/github.com/docker/docker/opts/secret.go delete mode 100644 fn/vendor/github.com/docker/docker/opts/secret_test.go delete mode 100644 fn/vendor/github.com/docker/docker/opts/throttledevice.go delete mode 100644 fn/vendor/github.com/docker/docker/opts/weightdevice.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/authorization/api_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/httputils/httputils.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/ioutils/fmt.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/platform/utsname_int8_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/platform/utsname_uint8_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/promise/promise_test.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/random/random.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/random/random_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/signal/signal_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/system/init.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/system/init_windows.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/system/lcow_unix.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/system/lcow_windows.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/system/path.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/system/rm.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/system/rm_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/term/proxy.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/term/proxy_test.go rename fn/vendor/github.com/docker/docker/pkg/term/{tc_other.go => tc.go} (50%) delete mode 100644 fn/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/term/term_linux_test.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/term/termios_bsd.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/term/termios_darwin.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go rename fn/vendor/github.com/docker/docker/pkg/term/{term_unix.go => winsize.go} (67%) rename fn/vendor/github.com/docker/docker/pkg/term/{term_solaris.go => winsize_solaris_cgo.go} (73%) delete mode 100644 fn/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go create mode 100644 fn/vendor/github.com/docker/docker/pkg/testutil/helpers.go delete mode 100644 fn/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go create mode 100644 fn/vendor/github.com/docker/docker/plugin/events.go rename fn/vendor/github.com/docker/docker/{pkg/httputils => registry/resumable}/resumablerequestreader.go (66%) rename fn/vendor/github.com/docker/docker/{pkg/httputils => registry/resumable}/resumablerequestreader_test.go (69%) create mode 100644 fn/vendor/github.com/docker/docker/reports/2017-05-01.md create mode 100644 fn/vendor/github.com/docker/docker/reports/2017-05-08.md create mode 100644 fn/vendor/github.com/docker/docker/reports/2017-05-15.md create mode 100644 fn/vendor/github.com/docker/docker/reports/2017-06-05.md create mode 100644 fn/vendor/github.com/docker/docker/reports/2017-06-12.md create mode 100644 fn/vendor/github.com/docker/docker/reports/2017-06-26.md create mode 100644 fn/vendor/github.com/docker/docker/reports/builder/2017-05-01.md create mode 100644 fn/vendor/github.com/docker/docker/reports/builder/2017-05-08.md create mode 100644 fn/vendor/github.com/docker/docker/reports/builder/2017-05-15.md create mode 100644 fn/vendor/github.com/docker/docker/reports/builder/2017-05-22.md create mode 100644 fn/vendor/github.com/docker/docker/reports/builder/2017-05-29.md create mode 100644 fn/vendor/github.com/docker/docker/reports/builder/2017-06-05.md create mode 100644 fn/vendor/github.com/docker/docker/reports/builder/2017-06-12.md create mode 100644 fn/vendor/github.com/docker/docker/reports/builder/2017-06-26.md delete mode 100644 fn/vendor/github.com/docker/docker/runconfig/opts/envfile.go delete mode 100644 fn/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go create mode 100644 fn/vendor/golang.org/x/net/bpf/asm.go create mode 100644 fn/vendor/golang.org/x/net/bpf/constants.go create mode 100644 fn/vendor/golang.org/x/net/bpf/doc.go create mode 100644 fn/vendor/golang.org/x/net/bpf/instructions.go create mode 100644 fn/vendor/golang.org/x/net/bpf/instructions_test.go create mode 100644 fn/vendor/golang.org/x/net/bpf/setter.go create mode 100644 fn/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf create mode 100644 fn/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt create mode 100644 fn/vendor/golang.org/x/net/bpf/vm.go create mode 100644 fn/vendor/golang.org/x/net/bpf/vm_aluop_test.go create mode 100644 fn/vendor/golang.org/x/net/bpf/vm_bpf_test.go create mode 100644 fn/vendor/golang.org/x/net/bpf/vm_extension_test.go create mode 100644 fn/vendor/golang.org/x/net/bpf/vm_instructions.go create mode 100644 fn/vendor/golang.org/x/net/bpf/vm_jump_test.go create mode 100644 fn/vendor/golang.org/x/net/bpf/vm_load_test.go create mode 100644 fn/vendor/golang.org/x/net/bpf/vm_ret_test.go create mode 100644 fn/vendor/golang.org/x/net/bpf/vm_scratch_test.go create mode 100644 fn/vendor/golang.org/x/net/bpf/vm_test.go delete mode 100644 fn/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go delete mode 100644 fn/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go create mode 100644 fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go create mode 100644 fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go create mode 100644 fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go create mode 100644 fn/vendor/golang.org/x/net/context/go17.go create mode 100644 fn/vendor/golang.org/x/net/context/pre_go17.go create mode 100644 fn/vendor/golang.org/x/net/dns/dnsmessage/example_test.go create mode 100644 fn/vendor/golang.org/x/net/dns/dnsmessage/message.go create mode 100644 fn/vendor/golang.org/x/net/dns/dnsmessage/message_test.go create mode 100644 fn/vendor/golang.org/x/net/http2/ciphers.go create mode 100644 fn/vendor/golang.org/x/net/http2/ciphers_test.go create mode 100644 fn/vendor/golang.org/x/net/http2/databuffer.go create mode 100644 fn/vendor/golang.org/x/net/http2/databuffer_test.go delete mode 100644 fn/vendor/golang.org/x/net/http2/fixed_buffer.go delete mode 100644 fn/vendor/golang.org/x/net/http2/fixed_buffer_test.go delete mode 100644 fn/vendor/golang.org/x/net/http2/go15.go create mode 100644 fn/vendor/golang.org/x/net/http2/go16.go create mode 100644 fn/vendor/golang.org/x/net/http2/go17.go create mode 100644 fn/vendor/golang.org/x/net/http2/go17_not18.go create mode 100644 fn/vendor/golang.org/x/net/http2/go18.go create mode 100644 fn/vendor/golang.org/x/net/http2/go18_test.go rename fn/vendor/golang.org/x/net/http2/{not_go15.go => go19.go} (51%) create mode 100644 fn/vendor/golang.org/x/net/http2/go19_test.go create mode 100644 fn/vendor/golang.org/x/net/http2/h2demo/tmpl.go create mode 100644 fn/vendor/golang.org/x/net/http2/hpack/tables_test.go create mode 100644 fn/vendor/golang.org/x/net/http2/not_go17.go create mode 100644 fn/vendor/golang.org/x/net/http2/not_go18.go create mode 100644 fn/vendor/golang.org/x/net/http2/not_go19.go delete mode 100644 fn/vendor/golang.org/x/net/http2/priority_test.go create mode 100644 fn/vendor/golang.org/x/net/http2/server_push_test.go create mode 100644 fn/vendor/golang.org/x/net/http2/writesched_priority.go create mode 100644 fn/vendor/golang.org/x/net/http2/writesched_priority_test.go create mode 100644 fn/vendor/golang.org/x/net/http2/writesched_random.go create mode 100644 fn/vendor/golang.org/x/net/http2/writesched_random_test.go create mode 100644 fn/vendor/golang.org/x/net/http2/writesched_test.go create mode 100644 fn/vendor/golang.org/x/net/idna/example_test.go create mode 100644 fn/vendor/golang.org/x/net/idna/tables.go create mode 100644 fn/vendor/golang.org/x/net/idna/trie.go create mode 100644 fn/vendor/golang.org/x/net/idna/trieval.go delete mode 100644 fn/vendor/golang.org/x/net/internal/nettest/error_stub.go create mode 100644 fn/vendor/golang.org/x/net/internal/nettest/helper_bsd.go create mode 100644 fn/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go rename fn/vendor/golang.org/x/net/internal/nettest/{error_posix.go => helper_posix.go} (100%) create mode 100644 fn/vendor/golang.org/x/net/internal/nettest/helper_stub.go rename fn/vendor/golang.org/x/net/internal/nettest/{rlimit_unix.go => helper_unix.go} (67%) rename fn/vendor/golang.org/x/net/internal/nettest/{stack_windows.go => helper_windows.go} (80%) delete mode 100644 fn/vendor/golang.org/x/net/internal/nettest/rlimit_stub.go delete mode 100644 fn/vendor/golang.org/x/net/internal/nettest/rlimit_windows.go delete mode 100644 fn/vendor/golang.org/x/net/internal/nettest/stack_stub.go delete mode 100644 fn/vendor/golang.org/x/net/internal/nettest/stack_unix.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/cmsghdr.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/defs_darwin.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/defs_freebsd.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/defs_linux.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/defs_netbsd.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/defs_openbsd.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/defs_solaris.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/error_unix.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/error_windows.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/iovec_32bit.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/iovec_64bit.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/iovec_stub.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/msghdr_linux.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/msghdr_stub.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/rawconn.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/rawconn_msg.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/rawconn_stub.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/reflect.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/socket.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/socket_test.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_bsd.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_darwin.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_386.go rename fn/vendor/golang.org/x/net/{ipv6/thunk_linux_386.s => internal/socket/sys_linux_386.s} (59%) create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_netbsd.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_posix.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_solaris.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_stub.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_unix.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/sys_windows.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go create mode 100644 fn/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/batch.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/bpf_test.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/control_test.go rename fn/vendor/golang.org/x/net/ipv4/{dgramopt_posix.go => dgramopt.go} (70%) delete mode 100644 fn/vendor/golang.org/x/net/ipv4/dgramopt_stub.go rename fn/vendor/golang.org/x/net/ipv4/{genericopt_posix.go => genericopt.go} (61%) delete mode 100644 fn/vendor/golang.org/x/net/ipv4/genericopt_stub.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/helper_stub.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/helper_unix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/helper_windows.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/mocktransponder_test.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/packet_go1_8.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/packet_go1_9.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/sockopt_posix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/sockopt_unix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/sockopt_windows.go rename fn/vendor/golang.org/x/net/ipv4/{sockopt_asmreq.go => sys_asmreq.go} (57%) create mode 100644 fn/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go rename fn/vendor/golang.org/x/net/ipv4/{sockopt_asmreqn_unix.go => sys_asmreqn.go} (50%) rename fn/vendor/golang.org/x/net/{ipv6/sockopt_ssmreq_stub.go => ipv4/sys_asmreqn_stub.go} (51%) create mode 100644 fn/vendor/golang.org/x/net/ipv4/sys_bpf.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/sys_dragonfly.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/sys_openbsd.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/sys_solaris.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/sys_ssmreq.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/syscall_linux_386.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/syscall_unix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv4/thunk_linux_386.s create mode 100644 fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go create mode 100644 fn/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/batch.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/bpf_test.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/control_test.go rename fn/vendor/golang.org/x/net/ipv6/{dgramopt_posix.go => dgramopt.go} (69%) delete mode 100644 fn/vendor/golang.org/x/net/ipv6/dgramopt_stub.go rename fn/vendor/golang.org/x/net/ipv6/{genericopt_posix.go => genericopt.go} (61%) delete mode 100644 fn/vendor/golang.org/x/net/ipv6/genericopt_stub.go delete mode 100644 fn/vendor/golang.org/x/net/ipv6/helper_stub.go delete mode 100644 fn/vendor/golang.org/x/net/ipv6/helper_unix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv6/helper_windows.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go delete mode 100644 fn/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/sockopt_posix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv6/sockopt_unix.go delete mode 100644 fn/vendor/golang.org/x/net/ipv6/sockopt_windows.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/sys_asmreq.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/sys_bpf.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/sys_solaris.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/sys_ssmreq.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go delete mode 100644 fn/vendor/golang.org/x/net/ipv6/syscall_linux_386.go delete mode 100644 fn/vendor/golang.org/x/net/ipv6/syscall_unix.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go create mode 100644 fn/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go create mode 100644 fn/vendor/golang.org/x/net/lex/httplex/httplex.go create mode 100644 fn/vendor/golang.org/x/net/lex/httplex/httplex_test.go create mode 100644 fn/vendor/golang.org/x/net/lif/address.go create mode 100644 fn/vendor/golang.org/x/net/lif/address_test.go create mode 100644 fn/vendor/golang.org/x/net/lif/binary.go create mode 100644 fn/vendor/golang.org/x/net/lif/defs_solaris.go create mode 100644 fn/vendor/golang.org/x/net/lif/lif.go create mode 100644 fn/vendor/golang.org/x/net/lif/link.go create mode 100644 fn/vendor/golang.org/x/net/lif/link_test.go create mode 100644 fn/vendor/golang.org/x/net/lif/sys.go create mode 100644 fn/vendor/golang.org/x/net/lif/sys_solaris_amd64.s create mode 100644 fn/vendor/golang.org/x/net/lif/syscall.go create mode 100644 fn/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go create mode 100644 fn/vendor/golang.org/x/net/nettest/conntest.go create mode 100644 fn/vendor/golang.org/x/net/nettest/conntest_go16.go create mode 100644 fn/vendor/golang.org/x/net/nettest/conntest_go17.go create mode 100644 fn/vendor/golang.org/x/net/nettest/conntest_test.go create mode 100644 fn/vendor/golang.org/x/net/route/address.go create mode 100644 fn/vendor/golang.org/x/net/route/address_darwin_test.go create mode 100644 fn/vendor/golang.org/x/net/route/address_test.go create mode 100644 fn/vendor/golang.org/x/net/route/binary.go create mode 100644 fn/vendor/golang.org/x/net/route/defs_darwin.go create mode 100644 fn/vendor/golang.org/x/net/route/defs_dragonfly.go create mode 100644 fn/vendor/golang.org/x/net/route/defs_freebsd.go create mode 100644 fn/vendor/golang.org/x/net/route/defs_netbsd.go create mode 100644 fn/vendor/golang.org/x/net/route/defs_openbsd.go create mode 100644 fn/vendor/golang.org/x/net/route/interface.go create mode 100644 fn/vendor/golang.org/x/net/route/interface_announce.go create mode 100644 fn/vendor/golang.org/x/net/route/interface_classic.go create mode 100644 fn/vendor/golang.org/x/net/route/interface_freebsd.go create mode 100644 fn/vendor/golang.org/x/net/route/interface_multicast.go create mode 100644 fn/vendor/golang.org/x/net/route/interface_openbsd.go create mode 100644 fn/vendor/golang.org/x/net/route/message.go create mode 100644 fn/vendor/golang.org/x/net/route/message_darwin_test.go create mode 100644 fn/vendor/golang.org/x/net/route/message_freebsd_test.go create mode 100644 fn/vendor/golang.org/x/net/route/message_test.go create mode 100644 fn/vendor/golang.org/x/net/route/route.go create mode 100644 fn/vendor/golang.org/x/net/route/route_classic.go create mode 100644 fn/vendor/golang.org/x/net/route/route_openbsd.go create mode 100644 fn/vendor/golang.org/x/net/route/route_test.go create mode 100644 fn/vendor/golang.org/x/net/route/sys.go create mode 100644 fn/vendor/golang.org/x/net/route/sys_darwin.go create mode 100644 fn/vendor/golang.org/x/net/route/sys_dragonfly.go create mode 100644 fn/vendor/golang.org/x/net/route/sys_freebsd.go create mode 100644 fn/vendor/golang.org/x/net/route/sys_netbsd.go create mode 100644 fn/vendor/golang.org/x/net/route/sys_openbsd.go create mode 100644 fn/vendor/golang.org/x/net/route/syscall.go create mode 100644 fn/vendor/golang.org/x/net/route/zsys_darwin.go create mode 100644 fn/vendor/golang.org/x/net/route/zsys_dragonfly.go create mode 100644 fn/vendor/golang.org/x/net/route/zsys_freebsd_386.go create mode 100644 fn/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go create mode 100644 fn/vendor/golang.org/x/net/route/zsys_freebsd_arm.go create mode 100644 fn/vendor/golang.org/x/net/route/zsys_netbsd.go create mode 100644 fn/vendor/golang.org/x/net/route/zsys_openbsd.go create mode 100644 fn/vendor/golang.org/x/net/trace/trace_go16.go create mode 100644 fn/vendor/golang.org/x/net/trace/trace_go17.go create mode 100644 fn/vendor/golang.org/x/net/webdav/file_go1.6.go create mode 100644 fn/vendor/golang.org/x/net/webdav/file_go1.7.go create mode 100644 fn/vendor/golang.org/x/net/websocket/dial.go create mode 100644 fn/vendor/golang.org/x/net/websocket/dial_test.go diff --git a/fn/glide.lock b/fn/glide.lock index 7392248bc..4b747fdef 100644 --- a/fn/glide.lock +++ b/fn/glide.lock @@ -1,10 +1,10 @@ -hash: aac209bf551382f2b8637d3f771a7e94b9ec9749acad17f76f4406a1f4d4fefd -updated: 2017-07-06T12:45:48.209197618-07:00 +hash: aa34bfe25a59519219d2f4b1c274ba576a15f906a05a54cb1fd086a52e24788a +updated: 2017-07-07T15:30:24.342413836-07:00 imports: - name: github.com/asaskevich/govalidator version: aa5cce4a76edb1a5acecab1870c17abbffb5419e - name: github.com/aws/aws-sdk-go - version: a5f553bccbe022ff6ac8079509d9d91da1992e2e + version: b1a7b51924b90a6ecdbaeb17e96418740ff07a1e subpackages: - aws - aws/awserr @@ -42,7 +42,7 @@ imports: subpackages: - semver - name: github.com/docker/docker - version: 89658bed64c2a8fe05a978e5b87dbec409d57a0f + version: 05c7c311390911daebcf5d9519dee813fc02a887 subpackages: - pkg/jsonlog - pkg/term @@ -90,7 +90,7 @@ imports: - name: github.com/go-openapi/validate version: 8a82927c942c94794a5cd8b8b50ce2f48a955c0c - name: github.com/go-resty/resty - version: 52992d9da3c3eff6bfa873ca253ea1e03b352356 + version: f214013978f4ea5632ef88a5371b2028699a9d19 - name: github.com/jmespath/go-jmespath version: bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d - name: github.com/jmoiron/jsonq @@ -128,20 +128,23 @@ imports: - name: github.com/urfave/cli version: 0bdeddeeb0f650497d603c4ad7b20cfe685682f6 - name: golang.org/x/net - version: f315505cf3349909cdf013ea56690da34e96a451 + version: 054b33e6527139ad5b1ec2f6232c3b175bd9a30c subpackages: - context - context/ctxhttp - idna - publicsuffix - name: golang.org/x/sys - version: 6faef541c73732f438fb660a212750a9ba9f9362 + version: 739734461d1c916b6c72a63d7efda2b27edb369f subpackages: - unix + - windows - name: golang.org/x/text version: cfdf022e86b4ecfb646e1efbd7db175dd623a8fa subpackages: + - secure/bidirule - transform + - unicode/bidi - unicode/norm - width - name: gopkg.in/mgo.v2 diff --git a/fn/glide.yaml b/fn/glide.yaml index c7aaa08c5..1299f16c7 100644 --- a/fn/glide.yaml +++ b/fn/glide.yaml @@ -4,8 +4,8 @@ import: repo: https://github.com/sirupsen/logrus vcs: git version: v0.11.5 - subpackages: - - hooks/syslog +- package: github.com/sirupsen/logrus + version: v0.11.5 - package: github.com/aws/aws-sdk-go version: ^1.8.36 subpackages: diff --git a/fn/start.go b/fn/start.go index f53404c1e..a6d979b83 100644 --- a/fn/start.go +++ b/fn/start.go @@ -2,12 +2,12 @@ package main import ( "fmt" + "log" "os" "os/exec" "os/signal" "syscall" - "github.com/Sirupsen/logrus" "github.com/urfave/cli" ) @@ -34,7 +34,7 @@ func start(c *cli.Context) error { // OR dind: docker run --rm -it --name functions -v ${PWD}/data:/app/data --privileged -p 8080:8080 funcy/functions wd, err := os.Getwd() if err != nil { - logrus.WithError(err).Fatalln("Getwd failed") + log.Fatalln("Getwd failed:", err) } args := []string{"run", "--rm", "-i", "--name", "functions", @@ -51,7 +51,7 @@ func start(c *cli.Context) error { cmd.Stderr = os.Stderr err = cmd.Start() if err != nil { - logrus.WithError(err).Fatalln("starting command failed") + log.Fatalln("starting command failed:", err) } done := make(chan error, 1) @@ -64,16 +64,16 @@ func start(c *cli.Context) error { select { case <-sigC: - logrus.Infoln("interrupt caught, exiting") + log.Println("interrupt caught, exiting") err = cmd.Process.Kill() if err != nil { - logrus.WithError(err).Errorln("Could not kill process") + log.Println("error: could not kill process:", err) } case err := <-done: if err != nil { - logrus.WithError(err).Errorln("processed finished with error") + log.Println("error: processed finished with error", err) } else { - logrus.Println("process done gracefully without error") + log.Println("process finished gracefully without error") } } return nil diff --git a/fn/update.go b/fn/update.go index edfcf674f..6ab445e0d 100644 --- a/fn/update.go +++ b/fn/update.go @@ -1,12 +1,12 @@ package main import ( + "log" "os" "os/exec" "os/signal" "syscall" - "github.com/Sirupsen/logrus" "github.com/urfave/cli" ) @@ -27,7 +27,7 @@ func update(c *cli.Context) error { cmd.Stderr = os.Stderr err := cmd.Start() if err != nil { - logrus.WithError(err).Fatalln("starting command failed") + log.Fatalln("starting command failed:", err) } done := make(chan error, 1) @@ -39,16 +39,16 @@ func update(c *cli.Context) error { signal.Notify(sigC, os.Interrupt, syscall.SIGTERM) select { case <-sigC: - logrus.Infoln("interrupt caught, exiting") + log.Println("interrupt caught, exiting") err = cmd.Process.Kill() if err != nil { - logrus.WithError(err).Errorln("Could not kill process") + log.Println("error: could not kill process") } case err := <-done: if err != nil { - logrus.WithError(err).Errorln("processed finished with error") + log.Println("processed finished with error:", err) } else { - logrus.Println("process done gracefully without error") + log.Println("process finished gracefully without error") } } return nil diff --git a/fn/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md b/fn/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md index 28cbcc832..16afa62e6 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md +++ b/fn/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md @@ -1,3 +1,17 @@ +Release v1.10.8 (2017-07-06) +=== + +### Service Client Updates +* `service/ds`: Updates service API, documentation, and paginators + * You can now improve the resilience and performance of your Microsoft AD directory by deploying additional domain controllers. Added UpdateNumberofDomainControllers API that allows you to update the number of domain controllers you want for your directory, and DescribeDomainControllers API that allows you to describe the detailed information of each domain controller of your directory. Also added the 'DesiredNumberOfDomainControllers' field to the DescribeDirectories API output for Microsoft AD. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/kinesis`: Updates service API and documentation + * You can now encrypt your data at rest within an Amazon Kinesis Stream using server-side encryption. Server-side encryption via AWS KMS makes it easy for customers to meet strict data management requirements by encrypting their data at rest within the Amazon Kinesis Streams, a fully managed real-time data processing service. +* `service/kms`: Updates service API and documentation + * This release of AWS Key Management Service introduces the ability to determine whether a key is AWS managed or customer managed. +* `service/ssm`: Updates service API and documentation + * Amazon EC2 Systems Manager now expands Patching support to Amazon Linux, Red Hat and Ubuntu in addition to the already supported Windows Server. + Release v1.10.7 (2017-07-05) === diff --git a/fn/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/fn/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 1313478f2..e25a460fb 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -15,11 +15,11 @@ import ( // the MaxRetries method: // // type retryer struct { -// service.DefaultRetryer +// client.DefaultRetryer // } // // // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } diff --git a/fn/vendor/github.com/aws/aws-sdk-go/aws/config.go b/fn/vendor/github.com/aws/aws-sdk-go/aws/config.go index d1f31f1c6..ae3a28696 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -95,7 +95,7 @@ type Config struct { // recoverable failures. // // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. + // the client.DefaultRetryer will be used. // // When both Retryer and MaxRetries are non-nil, the former is used and // the latter ignored. diff --git a/fn/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/fn/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index cc2937158..ba0c07b25 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -314,9 +314,12 @@ var awsPartition = partition{ "athena": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "autoscaling": service{ @@ -466,10 +469,13 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -478,8 +484,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -515,11 +523,14 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1883,6 +1894,18 @@ var awscnPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ @@ -1996,6 +2019,12 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "storagegateway": service{ Endpoints: endpoints{ @@ -2270,6 +2299,12 @@ var awsusgovPartition = partition{ }, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "streams.dynamodb": service{ Defaults: endpoint{ CredentialScope: credentialScope{ diff --git a/fn/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/fn/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 8d369c1b8..2c05dbdc2 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -8,7 +8,7 @@ import ( ) // Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer +// The default implementation used by most services is the client.DefaultRetryer // structure, which contains basic retry logic using exponential backoff. type Retryer interface { RetryRules(*Request) time.Duration diff --git a/fn/vendor/github.com/aws/aws-sdk-go/aws/version.go b/fn/vendor/github.com/aws/aws-sdk-go/aws/version.go index 4f8aff41a..74b51a1e1 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.10.7" +const SDKVersion = "1.10.8" diff --git a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json index 9eb831a32..2bde9f4b0 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json +++ b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json @@ -302,6 +302,23 @@ {"shape":"ServiceException"} ] }, + "DescribeDomainControllers":{ + "name":"DescribeDomainControllers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDomainControllersRequest"}, + "output":{"shape":"DescribeDomainControllersResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + }, "DescribeEventTopics":{ "name":"DescribeEventTopics", "http":{ @@ -582,6 +599,24 @@ {"shape":"ServiceException"} ] }, + "UpdateNumberOfDomainControllers":{ + "name":"UpdateNumberOfDomainControllers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateNumberOfDomainControllersRequest"}, + "output":{"shape":"UpdateNumberOfDomainControllersResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"DirectoryUnavailableException"}, + {"shape":"DomainControllerLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, "UpdateRadius":{ "name":"UpdateRadius", "http":{ @@ -1025,6 +1060,23 @@ "NextToken":{"shape":"NextToken"} } }, + "DescribeDomainControllersRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "DomainControllerIds":{"shape":"DomainControllerIds"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"Limit"} + } + }, + "DescribeDomainControllersResult":{ + "type":"structure", + "members":{ + "DomainControllers":{"shape":"DomainControllers"}, + "NextToken":{"shape":"NextToken"} + } + }, "DescribeEventTopicsRequest":{ "type":"structure", "members":{ @@ -1076,6 +1128,10 @@ "min":0, "pattern":"^([a-zA-Z0-9_])[\\\\a-zA-Z0-9_@#%*+=:?./!\\s-]*$" }, + "DesiredNumberOfDomainControllers":{ + "type":"integer", + "min":2 + }, "DirectoryConnectSettings":{ "type":"structure", "required":[ @@ -1122,7 +1178,8 @@ "RadiusSettings":{"shape":"RadiusSettings"}, "RadiusStatus":{"shape":"RadiusStatus"}, "StageReason":{"shape":"StageReason"}, - "SsoEnabled":{"shape":"SsoEnabled"} + "SsoEnabled":{"shape":"SsoEnabled"}, + "DesiredNumberOfDomainControllers":{"shape":"DesiredNumberOfDomainControllers"} } }, "DirectoryDescriptions":{ @@ -1256,6 +1313,54 @@ "type":"list", "member":{"shape":"IpAddr"} }, + "DomainController":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "DomainControllerId":{"shape":"DomainControllerId"}, + "DnsIpAddr":{"shape":"IpAddr"}, + "VpcId":{"shape":"VpcId"}, + "SubnetId":{"shape":"SubnetId"}, + "AvailabilityZone":{"shape":"AvailabilityZone"}, + "Status":{"shape":"DomainControllerStatus"}, + "StatusReason":{"shape":"DomainControllerStatusReason"}, + "LaunchTime":{"shape":"LaunchTime"}, + "StatusLastUpdatedDateTime":{"shape":"LastUpdatedDateTime"} + } + }, + "DomainControllerId":{ + "type":"string", + "pattern":"^dc-[0-9a-f]{10}$" + }, + "DomainControllerIds":{ + "type":"list", + "member":{"shape":"DomainControllerId"} + }, + "DomainControllerLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "DomainControllerStatus":{ + "type":"string", + "enum":[ + "Creating", + "Active", + "Impaired", + "Restoring", + "Deleting", + "Deleted", + "Failed" + ] + }, + "DomainControllerStatusReason":{"type":"string"}, + "DomainControllers":{ + "type":"list", + "member":{"shape":"DomainController"} + }, "EnableRadiusRequest":{ "type":"structure", "required":[ @@ -1928,6 +2033,22 @@ "members":{ } }, + "UpdateNumberOfDomainControllersRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "DesiredNumber" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "DesiredNumber":{"shape":"DesiredNumberOfDomainControllers"} + } + }, + "UpdateNumberOfDomainControllersResult":{ + "type":"structure", + "members":{ + } + }, "UpdateRadiusRequest":{ "type":"structure", "required":[ diff --git a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json index 90cce4638..81e33240d 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json +++ b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json @@ -20,6 +20,7 @@ "DeregisterEventTopic": "

Removes the specified directory as a publisher to the specified SNS topic.

", "DescribeConditionalForwarders": "

Obtains information about the conditional forwarders for this account.

If no input parameters are provided for RemoteDomainNames, this request describes all conditional forwarders for the specified directory ID.

", "DescribeDirectories": "

Obtains information about the directories that belong to this account.

You can retrieve information about specific directories by passing the directory identifiers in the DirectoryIds parameter. Otherwise, all directories that belong to the current account are returned.

This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeDirectoriesResult.NextToken member contains a token that you pass in the next call to DescribeDirectories to retrieve the next set of items.

You can also specify a maximum number of return results with the Limit parameter.

", + "DescribeDomainControllers": "

Provides information about any domain controllers in your directory.

", "DescribeEventTopics": "

Obtains information about which SNS topics receive status messages from the specified directory.

If no input parameters are provided, such as DirectoryId or TopicName, this request describes all of the associations in the account.

", "DescribeSnapshots": "

Obtains information about the directory snapshots that belong to this account.

This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeSnapshots.NextToken member contains a token that you pass in the next call to DescribeSnapshots to retrieve the next set of items.

You can also specify a maximum number of return results with the Limit parameter.

", "DescribeTrusts": "

Obtains information about the trust relationships for this account.

If no input parameters are provided, such as DirectoryId or TrustIds, this request describes all the trust relationships belonging to the account.

", @@ -38,6 +39,7 @@ "RestoreFromSnapshot": "

Restores a directory using an existing directory snapshot.

When you restore a directory from a snapshot, any changes made to the directory after the snapshot date are overwritten.

This action returns as soon as the restore operation is initiated. You can monitor the progress of the restore operation by calling the DescribeDirectories operation with the directory identifier. When the DirectoryDescription.Stage value changes to Active, the restore operation is complete.

", "StartSchemaExtension": "

Applies a schema extension to a Microsoft AD directory.

", "UpdateConditionalForwarder": "

Updates a conditional forwarder that has been set up for your AWS directory.

", + "UpdateNumberOfDomainControllers": "

Adds or removes domain controllers to or from the directory. Based on the difference between current value and new value (provided through this API call), domain controllers will be added or removed. It may take up to 45 minutes for any new domain controllers to become fully active once the requested number of domain controllers is updated. During this time, you cannot make another update request.

", "UpdateRadius": "

Updates the Remote Authentication Dial In User Service (RADIUS) server information for an AD Connector directory.

", "VerifyTrust": "

AWS Directory Service for Microsoft Active Directory allows you to configure and verify trust relationships.

This action verifies a trust relationship between your Microsoft AD in the AWS cloud and an external domain.

" }, @@ -115,7 +117,8 @@ "AvailabilityZone": { "base": null, "refs": { - "AvailabilityZones$member": null + "AvailabilityZones$member": null, + "DomainController$AvailabilityZone": "

The Availability Zone where the domain controller is located.

" } }, "AvailabilityZones": { @@ -375,6 +378,16 @@ "refs": { } }, + "DescribeDomainControllersRequest": { + "base": null, + "refs": { + } + }, + "DescribeDomainControllersResult": { + "base": null, + "refs": { + } + }, "DescribeEventTopicsRequest": { "base": "

Describes event topics.

", "refs": { @@ -418,6 +431,13 @@ "StartSchemaExtensionRequest$Description": "

A description of the schema extension.

" } }, + "DesiredNumberOfDomainControllers": { + "base": null, + "refs": { + "DirectoryDescription$DesiredNumberOfDomainControllers": "

The desired number of domain controllers in the directory if the directory is Microsoft AD.

", + "UpdateNumberOfDomainControllersRequest$DesiredNumber": "

The number of domain controllers desired in the directory.

" + } + }, "DirectoryConnectSettings": { "base": "

Contains information for the ConnectDirectory operation when an AD Connector directory is being created.

", "refs": { @@ -461,6 +481,7 @@ "DeleteDirectoryResult$DirectoryId": "

The directory identifier.

", "DeregisterEventTopicRequest$DirectoryId": "

The Directory ID to remove as a publisher. This directory will no longer send messages to the specified SNS topic.

", "DescribeConditionalForwardersRequest$DirectoryId": "

The directory ID for which to get the list of associated conditional forwarders.

", + "DescribeDomainControllersRequest$DirectoryId": "

Identifier of the directory for which to retrieve the domain controller information.

", "DescribeEventTopicsRequest$DirectoryId": "

The Directory ID for which to get the list of associated SNS topics. If this member is null, associations for all Directory IDs are returned.

", "DescribeSnapshotsRequest$DirectoryId": "

The identifier of the directory for which to retrieve snapshot information.

", "DescribeTrustsRequest$DirectoryId": "

The Directory ID of the AWS directory that is a part of the requested trust relationship.

", @@ -468,6 +489,7 @@ "DirectoryIds$member": null, "DisableRadiusRequest$DirectoryId": "

The identifier of the directory for which to disable MFA.

", "DisableSsoRequest$DirectoryId": "

The identifier of the directory for which to disable single-sign on.

", + "DomainController$DirectoryId": "

Identifier of the directory where the domain controller resides.

", "EnableRadiusRequest$DirectoryId": "

The identifier of the directory for which to enable MFA.

", "EnableSsoRequest$DirectoryId": "

The identifier of the directory for which to enable single-sign on.

", "EventTopic$DirectoryId": "

The Directory ID of an AWS Directory Service directory that will publish status messages to an SNS topic.

", @@ -482,6 +504,7 @@ "StartSchemaExtensionRequest$DirectoryId": "

The identifier of the directory for which the schema extension will be applied to.

", "Trust$DirectoryId": "

The Directory ID of the AWS directory involved in the trust relationship.

", "UpdateConditionalForwarderRequest$DirectoryId": "

The directory ID of the AWS directory for which to update the conditional forwarder.

", + "UpdateNumberOfDomainControllersRequest$DirectoryId": "

Identifier of the directory to which the domain controllers will be added or removed.

", "UpdateRadiusRequest$DirectoryId": "

The identifier of the directory for which to update the RADIUS server information.

" } }, @@ -549,7 +572,7 @@ "base": "

Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation.

", "refs": { "CreateDirectoryRequest$VpcSettings": "

A DirectoryVpcSettings object that contains additional information for the operation.

", - "CreateMicrosoftADRequest$VpcSettings": null + "CreateMicrosoftADRequest$VpcSettings": "

Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation.

" } }, "DirectoryVpcSettingsDescription": { @@ -589,6 +612,48 @@ "UpdateConditionalForwarderRequest$DnsIpAddrs": "

The updated IP addresses of the remote DNS server associated with the conditional forwarder.

" } }, + "DomainController": { + "base": "

Contains information about the domain controllers for a specified directory.

", + "refs": { + "DomainControllers$member": null + } + }, + "DomainControllerId": { + "base": null, + "refs": { + "DomainController$DomainControllerId": "

Identifies a specific domain controller in the directory.

", + "DomainControllerIds$member": null + } + }, + "DomainControllerIds": { + "base": null, + "refs": { + "DescribeDomainControllersRequest$DomainControllerIds": "

A list of identifiers for the domain controllers whose information will be provided.

" + } + }, + "DomainControllerLimitExceededException": { + "base": "

The maximum allowed number of domain controllers per directory was exceeded. The default limit per directory is 20 domain controllers.

", + "refs": { + } + }, + "DomainControllerStatus": { + "base": null, + "refs": { + "DomainController$Status": "

The status of the domain controller.

" + } + }, + "DomainControllerStatusReason": { + "base": null, + "refs": { + "DomainController$StatusReason": "

A description of the domain controller state.

" + } + }, + "DomainControllers": { + "base": null, + "refs": { + "DescribeDomainControllersResult$DomainControllers": "

List of the DomainController objects that were retrieved.

" + } + }, "EnableRadiusRequest": { "base": "

Contains the inputs for the EnableRadius operation.

", "refs": { @@ -644,6 +709,7 @@ "ClientException$Message": null, "DirectoryLimitExceededException$Message": null, "DirectoryUnavailableException$Message": null, + "DomainControllerLimitExceededException$Message": null, "EntityAlreadyExistsException$Message": null, "EntityDoesNotExistException$Message": null, "InsufficientPermissionsException$Message": null, @@ -695,6 +761,7 @@ "base": null, "refs": { "DnsIpAddrs$member": null, + "DomainController$DnsIpAddr": "

The IP address of the domain controller.

", "IpAddrs$member": null } }, @@ -749,13 +816,15 @@ "base": null, "refs": { "DirectoryDescription$StageLastUpdatedDateTime": "

The date and time that the stage was last updated.

", + "DomainController$StatusLastUpdatedDateTime": "

The date and time that the status was last updated.

", "Trust$LastUpdatedDateTime": "

The date and time that the trust relationship was last updated.

" } }, "LaunchTime": { "base": null, "refs": { - "DirectoryDescription$LaunchTime": "

Specifies when the directory was created.

" + "DirectoryDescription$LaunchTime": "

Specifies when the directory was created.

", + "DomainController$LaunchTime": "

Specifies when the domain controller was created.

" } }, "LdifContent": { @@ -768,6 +837,7 @@ "base": null, "refs": { "DescribeDirectoriesRequest$Limit": "

The maximum number of items to return. If this value is zero, the maximum number of items is specified by the limitations of the operation.

", + "DescribeDomainControllersRequest$Limit": "

The maximum number of items to return.

", "DescribeSnapshotsRequest$Limit": "

The maximum number of objects to return.

", "DescribeTrustsRequest$Limit": "

The maximum number of objects to return.

", "DirectoryLimits$CloudOnlyDirectoriesLimit": "

The maximum number of cloud directories allowed in the region.

", @@ -824,6 +894,8 @@ "refs": { "DescribeDirectoriesRequest$NextToken": "

The DescribeDirectoriesResult.NextToken value from a previous call to DescribeDirectories. Pass null if this is the first call.

", "DescribeDirectoriesResult$NextToken": "

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeDirectories to retrieve the next set of items.

", + "DescribeDomainControllersRequest$NextToken": "

The DescribeDomainControllers.NextToken value from a previous call to DescribeDomainControllers. Pass null if this is the first call.

", + "DescribeDomainControllersResult$NextToken": "

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeDomainControllers retrieve the next set of items.

", "DescribeSnapshotsRequest$NextToken": "

The DescribeSnapshotsResult.NextToken value from a previous call to DescribeSnapshots. Pass null if this is the first call.

", "DescribeSnapshotsResult$NextToken": "

If not null, more results are available. Pass this value in the NextToken member of a subsequent call to DescribeSnapshots.

", "DescribeTrustsRequest$NextToken": "

The DescribeTrustsResult.NextToken value from a previous call to DescribeTrusts. Pass null if this is the first call.

", @@ -960,6 +1032,7 @@ "ClientException$RequestId": null, "DirectoryLimitExceededException$RequestId": null, "DirectoryUnavailableException$RequestId": null, + "DomainControllerLimitExceededException$RequestId": null, "EntityAlreadyExistsException$RequestId": null, "EntityDoesNotExistException$RequestId": null, "InsufficientPermissionsException$RequestId": null, @@ -1154,6 +1227,7 @@ "SubnetId": { "base": null, "refs": { + "DomainController$SubnetId": "

Identifier of the subnet in the VPC that contains the domain controller.

", "SubnetIds$member": null } }, @@ -1307,6 +1381,16 @@ "refs": { } }, + "UpdateNumberOfDomainControllersRequest": { + "base": null, + "refs": { + } + }, + "UpdateNumberOfDomainControllersResult": { + "base": null, + "refs": { + } + }, "UpdateRadiusRequest": { "base": "

Contains the inputs for the UpdateRadius operation.

", "refs": { @@ -1354,7 +1438,8 @@ "DirectoryConnectSettings$VpcId": "

The identifier of the VPC in which the AD Connector is created.

", "DirectoryConnectSettingsDescription$VpcId": "

The identifier of the VPC that the AD Connector is in.

", "DirectoryVpcSettings$VpcId": "

The identifier of the VPC in which to create the directory.

", - "DirectoryVpcSettingsDescription$VpcId": "

The identifier of the VPC that the directory is in.

" + "DirectoryVpcSettingsDescription$VpcId": "

The identifier of the VPC that the directory is in.

", + "DomainController$VpcId": "

The identifier of the VPC that contains the domain controller.

" } } } diff --git a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/paginators-1.json b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/paginators-1.json new file mode 100644 index 000000000..da0b8729d --- /dev/null +++ b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/paginators-1.json @@ -0,0 +1,9 @@ +{ + "pagination": { + "DescribeDomainControllers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "Limit" + } + } +} diff --git a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json index 31eaf9519..7548f0122 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json +++ b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json @@ -131,7 +131,13 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArgumentException"}, {"shape":"ProvisionedThroughputExceededException"}, - {"shape":"ExpiredIteratorException"} + {"shape":"ExpiredIteratorException"}, + {"shape":"KMSDisabledException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"KMSAccessDeniedException"}, + {"shape":"KMSNotFoundException"}, + {"shape":"KMSOptInRequired"}, + {"shape":"KMSThrottlingException"} ] }, "GetShardIterator":{ @@ -212,7 +218,13 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArgumentException"}, - {"shape":"ProvisionedThroughputExceededException"} + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"KMSDisabledException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"KMSAccessDeniedException"}, + {"shape":"KMSNotFoundException"}, + {"shape":"KMSOptInRequired"}, + {"shape":"KMSThrottlingException"} ] }, "PutRecords":{ @@ -226,7 +238,13 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArgumentException"}, - {"shape":"ProvisionedThroughputExceededException"} + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"KMSDisabledException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"KMSAccessDeniedException"}, + {"shape":"KMSNotFoundException"}, + {"shape":"KMSOptInRequired"}, + {"shape":"KMSThrottlingException"} ] }, "RemoveTagsFromStream":{ @@ -257,6 +275,40 @@ {"shape":"LimitExceededException"} ] }, + "StartStreamEncryption":{ + "name":"StartStreamEncryption", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartStreamEncryptionInput"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"KMSDisabledException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"KMSAccessDeniedException"}, + {"shape":"KMSNotFoundException"}, + {"shape":"KMSOptInRequired"}, + {"shape":"KMSThrottlingException"} + ] + }, + "StopStreamEncryption":{ + "name":"StopStreamEncryption", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopStreamEncryptionInput"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "UpdateShardCount":{ "name":"UpdateShardCount", "http":{ @@ -379,6 +431,13 @@ "ShardLevelMetrics":{"shape":"MetricsNameList"} } }, + "EncryptionType":{ + "type":"string", + "enum":[ + "NONE", + "KMS" + ] + }, "EnhancedMetrics":{ "type":"structure", "members":{ @@ -482,6 +541,53 @@ }, "exception":true }, + "KMSAccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "KMSDisabledException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "KMSInvalidStateException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "KMSNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "KMSOptInRequired":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "KMSThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "KeyId":{ + "type":"string", + "max":2048, + "min":1 + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -613,7 +719,8 @@ ], "members":{ "ShardId":{"shape":"ShardId"}, - "SequenceNumber":{"shape":"SequenceNumber"} + "SequenceNumber":{"shape":"SequenceNumber"}, + "EncryptionType":{"shape":"EncryptionType"} } }, "PutRecordsInput":{ @@ -632,7 +739,8 @@ "required":["Records"], "members":{ "FailedRecordCount":{"shape":"PositiveIntegerObject"}, - "Records":{"shape":"PutRecordsResultEntryList"} + "Records":{"shape":"PutRecordsResultEntryList"}, + "EncryptionType":{"shape":"EncryptionType"} } }, "PutRecordsRequestEntry":{ @@ -679,7 +787,8 @@ "SequenceNumber":{"shape":"SequenceNumber"}, "ApproximateArrivalTimestamp":{"shape":"Timestamp"}, "Data":{"shape":"Data"}, - "PartitionKey":{"shape":"PartitionKey"} + "PartitionKey":{"shape":"PartitionKey"}, + "EncryptionType":{"shape":"EncryptionType"} } }, "RecordList":{ @@ -785,6 +894,32 @@ "NewStartingHashKey":{"shape":"HashKey"} } }, + "StartStreamEncryptionInput":{ + "type":"structure", + "required":[ + "StreamName", + "EncryptionType", + "KeyId" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "EncryptionType":{"shape":"EncryptionType"}, + "KeyId":{"shape":"KeyId"} + } + }, + "StopStreamEncryptionInput":{ + "type":"structure", + "required":[ + "StreamName", + "EncryptionType", + "KeyId" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "EncryptionType":{"shape":"EncryptionType"}, + "KeyId":{"shape":"KeyId"} + } + }, "StreamARN":{"type":"string"}, "StreamDescription":{ "type":"structure", @@ -806,7 +941,9 @@ "HasMoreShards":{"shape":"BooleanObject"}, "RetentionPeriodHours":{"shape":"PositiveIntegerObject"}, "StreamCreationTimestamp":{"shape":"Timestamp"}, - "EnhancedMonitoring":{"shape":"EnhancedMonitoringList"} + "EnhancedMonitoring":{"shape":"EnhancedMonitoringList"}, + "EncryptionType":{"shape":"EncryptionType"}, + "KeyId":{"shape":"KeyId"} } }, "StreamName":{ diff --git a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json index c81ef01da..0c1e4eb60 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json +++ b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json @@ -16,11 +16,13 @@ "ListStreams": "

Lists your Amazon Kinesis streams.

The number of streams may be too large to return from a single call to ListStreams. You can limit the number of returned streams using the Limit parameter. If you do not specify a value for the Limit parameter, Amazon Kinesis uses the default limit, which is currently 10.

You can detect if there are more streams available to list by using the HasMoreStreams flag from the returned output. If there are more streams available, you can request more streams by using the name of the last stream returned by the ListStreams request in the ExclusiveStartStreamName parameter in a subsequent request to ListStreams. The group of stream names returned by the subsequent request is then added to the list. You can continue this process until all the stream names have been collected in the list.

ListStreams has a limit of 5 transactions per second per account.

", "ListTagsForStream": "

Lists the tags for the specified Amazon Kinesis stream.

", "MergeShards": "

Merges two adjacent shards in an Amazon Kinesis stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps. For example, if you have two shards, one with a hash key range of 276...381 and the other with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards.

MergeShards is called when there is a need to reduce the overall capacity of a stream because of excess capacity that is not being used. You must specify the shard to be merged and the adjacent shard for a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Streams Developer Guide.

If the stream is in the ACTIVE state, you can call MergeShards. If a stream is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. If the specified stream does not exist, MergeShards returns a ResourceNotFoundException.

You can use DescribeStream to check the state of the stream, which is returned in StreamStatus.

MergeShards is an asynchronous operation. Upon receiving a MergeShards request, Amazon Kinesis immediately returns a response and sets the StreamStatus to UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

You use DescribeStream to determine the shard IDs that are specified in the MergeShards request.

If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException.

MergeShards has limit of 5 transactions per second per account.

", - "PutRecord": "

Writes a single data record into an Amazon Kinesis stream. Call PutRecord to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Amazon Kinesis to distribute data across shards. Amazon Kinesis segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to.

Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.

Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

If a PutRecord request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.

Data records are accessible for only 24 hours from the time that they are added to a stream.

", - "PutRecords": "

Writes multiple data records into an Amazon Kinesis stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data ingestion and processing.

Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array.

The response Records array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records.

A successfully-processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream.

An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

By default, data records are accessible for only 24 hours from the time that they are added to an Amazon Kinesis stream. This retention period can be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod operations.

", + "PutRecord": "

Writes a single data record into an Amazon Kinesis stream. Call PutRecord to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Amazon Kinesis to distribute data across shards. Amazon Kinesis segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to.

Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.

Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

If a PutRecord request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.

By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

", + "PutRecords": "

Writes multiple data records into an Amazon Kinesis stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data ingestion and processing.

Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array.

The response Records array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records.

A successfully-processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream.

An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

", "RemoveTagsFromStream": "

Removes tags from the specified Amazon Kinesis stream. Removed tags are deleted and cannot be recovered after this operation successfully completes.

If you specify a tag that does not exist, it is ignored.

", "SplitShard": "

Splits a shard into two new shards in the Amazon Kinesis stream to increase the stream's capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested.

You can also use SplitShard when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard to increase stream capacity, so that more Amazon Kinesis applications can simultaneously read data from the stream for real-time processing.

You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might simply be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information about splitting shards, see Split a Shard in the Amazon Kinesis Streams Developer Guide.

You can use DescribeStream to determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey parameters that are specified in the SplitShard request.

SplitShard is an asynchronous operation. Upon receiving a SplitShard request, Amazon Kinesis immediately returns a response and sets the stream status to UPDATING. After the operation is completed, Amazon Kinesis sets the stream status to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

You can use DescribeStream to check the status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. If a stream is in CREATING or UPDATING or DELETING states, DescribeStream returns a ResourceInUseException.

If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. If you try to create more shards than are authorized for your account, you receive a LimitExceededException.

For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase this limit, contact AWS Support.

If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException.

SplitShard has limit of 5 transactions per second per account.

", - "UpdateShardCount": "

Updates the shard count of the specified stream to the specified number of shards.

Updating the shard count is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. After the update is complete, Amazon Kinesis sets the status of the stream back to ACTIVE. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING.

To update the shard count, Amazon Kinesis performs splits and merges and individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges.

This operation has a rate limit of twice per rolling 24 hour period. You cannot scale above double your current shard count, scale below half your current shard count, or exceed the shard limits for your account.

For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase a limit, contact AWS Support.

" + "StartStreamEncryption": "

Enables or updates server-side encryption using an AWS KMS key for a specified stream.

Starting encryption is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. After the update is complete, Amazon Kinesis sets the status of the stream back to ACTIVE. Updating or applying encryption normally takes a few seconds to complete but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE, records written to the stream will begin to be encrypted.

API Limits: You can successfully apply a new AWS KMS key for server-side encryption 25 times in a rolling 24 hour period.

Note: It can take up to 5 seconds after the stream is in an ACTIVE status before all records written to the stream are encrypted. After you’ve enabled encryption, you can verify encryption was applied by inspecting the API response from PutRecord or PutRecords.

", + "StopStreamEncryption": "

Disables server-side encryption for a specified stream.

Stopping encryption is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. After the update is complete, Amazon Kinesis sets the status of the stream back to ACTIVE. Stopping encryption normally takes a few seconds to complete but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE records written to the stream will no longer be encrypted by the Amazon Kinesis Streams service.

API Limits: You can successfully disable server-side encryption 25 times in a rolling 24 hour period.

Note: It can take up to 5 seconds after the stream is in an ACTIVE status before all records written to the stream are no longer subject to encryption. After you’ve disabled encryption, you can verify encryption was not applied by inspecting the API response from PutRecord or PutRecords.

", + "UpdateShardCount": "

Updates the shard count of the specified stream to the specified number of shards.

Updating the shard count is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. After the update is complete, Amazon Kinesis sets the status of the stream back to ACTIVE. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING.

To update the shard count, Amazon Kinesis performs splits or merges on individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges.

This operation has the following limits, which are per region per account unless otherwise noted:

For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase a limit, contact AWS Support.

" }, "shapes": { "AddTagsToStreamInput": { @@ -95,6 +97,17 @@ "refs": { } }, + "EncryptionType": { + "base": null, + "refs": { + "PutRecordOutput$EncryptionType": "

The encryption type to use on the record. This parameter can be one of the following values:

", + "PutRecordsOutput$EncryptionType": "

The encryption type used on the records. This parameter can be one of the following values:

", + "Record$EncryptionType": "

The encryption type used on the record. This parameter can be one of the following values:

", + "StartStreamEncryptionInput$EncryptionType": "

The encryption type to use. This parameter can be one of the following values:

", + "StopStreamEncryptionInput$EncryptionType": "

The encryption type. This parameter can be one of the following values:

", + "StreamDescription$EncryptionType": "

The server-side encryption type used on the stream. This parameter can be one of the following values:

" + } + }, "EnhancedMetrics": { "base": "

Represents enhanced metrics types.

", "refs": { @@ -123,6 +136,12 @@ "refs": { "ExpiredIteratorException$message": "

A message that provides information about the error.

", "InvalidArgumentException$message": "

A message that provides information about the error.

", + "KMSAccessDeniedException$message": "

A message that provides information about the error.

", + "KMSDisabledException$message": "

A message that provides information about the error.

", + "KMSInvalidStateException$message": "

A message that provides information about the error.

", + "KMSNotFoundException$message": "

A message that provides information about the error.

", + "KMSOptInRequired$message": "

A message that provides information about the error.

", + "KMSThrottlingException$message": "

A message that provides information about the error.

", "LimitExceededException$message": "

A message that provides information about the error.

", "ProvisionedThroughputExceededException$message": "

A message that provides information about the error.

", "PutRecordsResultEntry$ErrorMessage": "

The error message for an individual record result. An ErrorCode value of ProvisionedThroughputExceededException has an error message that includes the account ID, stream name, and shard ID. An ErrorCode value of InternalFailure has the error message \"Internal Service Failure\".

", @@ -187,6 +206,44 @@ "refs": { } }, + "KMSAccessDeniedException": { + "base": "

The ciphertext references a key that doesn't exist or that you don't have access to.

", + "refs": { + } + }, + "KMSDisabledException": { + "base": "

The request was rejected because the specified CMK isn't enabled.

", + "refs": { + } + }, + "KMSInvalidStateException": { + "base": "

The request was rejected because the state of the specified resource isn't valid for this request. For more information, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

", + "refs": { + } + }, + "KMSNotFoundException": { + "base": "

The request was rejected because the specified entity or resource couldn't be found.

", + "refs": { + } + }, + "KMSOptInRequired": { + "base": "

The AWS access key ID needs a subscription for the service.

", + "refs": { + } + }, + "KMSThrottlingException": { + "base": "

The request was denied due to request throttling. For more information about throttling, see Limits in the AWS Key Management Service Developer Guide.

", + "refs": { + } + }, + "KeyId": { + "base": null, + "refs": { + "StartStreamEncryptionInput$KeyId": "

The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias aws/kinesis.

", + "StopStreamEncryptionInput$KeyId": "

The GUID for the customer-managed key that was used for encryption.

", + "StreamDescription$KeyId": "

The GUID for the customer-managed KMS key used for encryption on the stream.

" + } + }, "LimitExceededException": { "base": "

The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed (5).

", "refs": { @@ -361,7 +418,7 @@ "PutRecordInput$SequenceNumberForOrdering": "

Guarantees strictly increasing sequence numbers, for puts from the same client and to the same partition key. Usage: set the SequenceNumberForOrdering of record n to the sequence number of record n-1 (as returned in the result when putting record n-1). If this parameter is not set, records will be coarsely ordered based on arrival time.

", "PutRecordOutput$SequenceNumber": "

The sequence number identifier that was assigned to the put data record. The sequence number for the record is unique across all records in the stream. A sequence number is the identifier associated with every record put into the stream.

", "PutRecordsResultEntry$SequenceNumber": "

The sequence number for an individual record result.

", - "Record$SequenceNumber": "

The unique identifier of the record in the stream.

", + "Record$SequenceNumber": "

The unique identifier of the record within its shard.

", "SequenceNumberRange$StartingSequenceNumber": "

The starting sequence number for the range.

", "SequenceNumberRange$EndingSequenceNumber": "

The ending sequence number for the range. Shards that are in the OPEN state have an ending sequence number of null.

" } @@ -425,6 +482,16 @@ "refs": { } }, + "StartStreamEncryptionInput": { + "base": null, + "refs": { + } + }, + "StopStreamEncryptionInput": { + "base": null, + "refs": { + } + }, "StreamARN": { "base": null, "refs": { @@ -457,6 +524,8 @@ "PutRecordsInput$StreamName": "

The stream name associated with the request.

", "RemoveTagsFromStreamInput$StreamName": "

The name of the stream.

", "SplitShardInput$StreamName": "

The name of the stream for the shard split.

", + "StartStreamEncryptionInput$StreamName": "

The name of the stream for which to start encrypting records.

", + "StopStreamEncryptionInput$StreamName": "

The name of the stream on which to stop encrypting records.

", "StreamDescription$StreamName": "

The name of the stream being described.

", "StreamNameList$member": null, "UpdateShardCountInput$StreamName": "

The name of the stream.

", diff --git a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json index 5e3f38f0d..4790acd94 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json +++ b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json @@ -1122,6 +1122,13 @@ "KeyArn":{"shape":"ArnType"} } }, + "KeyManagerType":{ + "type":"string", + "enum":[ + "AWS", + "CUSTOMER" + ] + }, "KeyMetadata":{ "type":"structure", "required":["KeyId"], @@ -1137,7 +1144,8 @@ "DeletionDate":{"shape":"DateType"}, "ValidTo":{"shape":"DateType"}, "Origin":{"shape":"OriginType"}, - "ExpirationModel":{"shape":"ExpirationModelType"} + "ExpirationModel":{"shape":"ExpirationModelType"}, + "KeyManager":{"shape":"KeyManagerType"} } }, "KeyState":{ diff --git a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json index e1fd02215..568a22203 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json +++ b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json @@ -559,6 +559,12 @@ "KeyList$member": null } }, + "KeyManagerType": { + "base": null, + "refs": { + "KeyMetadata$KeyManager": "

The CMK's manager. CMKs are either customer-managed or AWS-managed. For more information about the difference, see Customer Master Keys in the AWS Key Management Service Developer Guide.

" + } + }, "KeyMetadata": { "base": "

Contains metadata about a customer master key (CMK).

This data type is used as a response element for the CreateKey and DescribeKey operations.

", "refs": { diff --git a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json index 6348a099b..572264823 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json +++ b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json @@ -425,6 +425,7 @@ "errors":[ {"shape":"InvalidResourceId"}, {"shape":"DoesNotExistException"}, + {"shape":"UnsupportedOperatingSystem"}, {"shape":"InternalServerError"} ] }, @@ -679,7 +680,8 @@ "input":{"shape":"GetDeployablePatchSnapshotForInstanceRequest"}, "output":{"shape":"GetDeployablePatchSnapshotForInstanceResult"}, "errors":[ - {"shape":"InternalServerError"} + {"shape":"InternalServerError"}, + {"shape":"UnsupportedOperatingSystem"} ] }, "GetDocument":{ @@ -1965,10 +1967,12 @@ "type":"structure", "required":["Name"], "members":{ + "OperatingSystem":{"shape":"OperatingSystem"}, "Name":{"shape":"BaselineName"}, "GlobalFilters":{"shape":"PatchFilterGroup"}, "ApprovalRules":{"shape":"PatchRuleGroup"}, "ApprovedPatches":{"shape":"PatchIdList"}, + "ApprovedPatchesComplianceLevel":{"shape":"PatchComplianceLevel"}, "RejectedPatches":{"shape":"PatchIdList"}, "Description":{"shape":"BaselineDescription"}, "ClientToken":{ @@ -2619,6 +2623,7 @@ "shape":"PatchBaselineMaxResults", "box":true }, + "Filters":{"shape":"PatchOrchestratorFilterList"}, "NextToken":{"shape":"NextToken"} } }, @@ -2950,12 +2955,14 @@ "GetDefaultPatchBaselineRequest":{ "type":"structure", "members":{ + "OperatingSystem":{"shape":"OperatingSystem"} } }, "GetDefaultPatchBaselineResult":{ "type":"structure", "members":{ - "BaselineId":{"shape":"BaselineId"} + "BaselineId":{"shape":"BaselineId"}, + "OperatingSystem":{"shape":"OperatingSystem"} } }, "GetDeployablePatchSnapshotForInstanceRequest":{ @@ -2974,7 +2981,8 @@ "members":{ "InstanceId":{"shape":"InstanceId"}, "SnapshotId":{"shape":"SnapshotId"}, - "SnapshotDownloadUrl":{"shape":"SnapshotDownloadUrl"} + "SnapshotDownloadUrl":{"shape":"SnapshotDownloadUrl"}, + "Product":{"shape":"Product"} } }, "GetDocumentRequest":{ @@ -3199,14 +3207,16 @@ "type":"structure", "required":["PatchGroup"], "members":{ - "PatchGroup":{"shape":"PatchGroup"} + "PatchGroup":{"shape":"PatchGroup"}, + "OperatingSystem":{"shape":"OperatingSystem"} } }, "GetPatchBaselineForPatchGroupResult":{ "type":"structure", "members":{ "BaselineId":{"shape":"BaselineId"}, - "PatchGroup":{"shape":"PatchGroup"} + "PatchGroup":{"shape":"PatchGroup"}, + "OperatingSystem":{"shape":"OperatingSystem"} } }, "GetPatchBaselineRequest":{ @@ -3221,9 +3231,11 @@ "members":{ "BaselineId":{"shape":"BaselineId"}, "Name":{"shape":"BaselineName"}, + "OperatingSystem":{"shape":"OperatingSystem"}, "GlobalFilters":{"shape":"PatchFilterGroup"}, "ApprovalRules":{"shape":"PatchRuleGroup"}, "ApprovedPatches":{"shape":"PatchIdList"}, + "ApprovedPatchesComplianceLevel":{"shape":"PatchComplianceLevel"}, "RejectedPatches":{"shape":"PatchIdList"}, "PatchGroups":{"shape":"PatchGroupList"}, "CreatedDate":{"shape":"DateTime"}, @@ -3461,8 +3473,8 @@ "MissingCount":{"shape":"PatchMissingCount"}, "FailedCount":{"shape":"PatchFailedCount"}, "NotApplicableCount":{"shape":"PatchNotApplicableCount"}, - "OperationStartTime":{"shape":"PatchOperationStartTime"}, - "OperationEndTime":{"shape":"PatchOperationEndTime"}, + "OperationStartTime":{"shape":"DateTime"}, + "OperationEndTime":{"shape":"DateTime"}, "Operation":{"shape":"PatchOperationType"} } }, @@ -4510,6 +4522,15 @@ "Invocation" ] }, + "OperatingSystem":{ + "type":"string", + "enum":[ + "WINDOWS", + "AMAZON_LINUX", + "UBUNTU", + "REDHAT_ENTERPRISE_LINUX" + ] + }, "OwnerInformation":{ "type":"string", "max":128, @@ -4727,6 +4748,7 @@ "members":{ "BaselineId":{"shape":"BaselineId"}, "BaselineName":{"shape":"BaselineName"}, + "OperatingSystem":{"shape":"OperatingSystem"}, "BaselineDescription":{"shape":"BaselineDescription"}, "DefaultBaseline":{"shape":"DefaultBaseline"} } @@ -4757,7 +4779,7 @@ "Classification":{"shape":"PatchClassification"}, "Severity":{"shape":"PatchSeverity"}, "State":{"shape":"PatchComplianceDataState"}, - "InstalledTime":{"shape":"PatchInstalledTime"} + "InstalledTime":{"shape":"DateTime"} } }, "PatchComplianceDataList":{ @@ -4774,6 +4796,17 @@ "FAILED" ] }, + "PatchComplianceLevel":{ + "type":"string", + "enum":[ + "CRITICAL", + "HIGH", + "MEDIUM", + "LOW", + "INFORMATIONAL", + "UNSPECIFIED" + ] + }, "PatchComplianceMaxResults":{ "type":"integer", "max":100, @@ -4815,7 +4848,10 @@ "PRODUCT", "CLASSIFICATION", "MSRC_SEVERITY", - "PATCH_ID" + "PATCH_ID", + "SECTION", + "PRIORITY", + "SEVERITY" ] }, "PatchFilterList":{ @@ -4858,7 +4894,8 @@ }, "PatchId":{ "type":"string", - "pattern":"(^KB[0-9]{1,7}$)|(^MS[0-9]{2}\\-[0-9]{3}$)" + "max":100, + "min":1 }, "PatchIdList":{ "type":"list", @@ -4868,7 +4905,6 @@ }, "PatchInstalledCount":{"type":"integer"}, "PatchInstalledOtherCount":{"type":"integer"}, - "PatchInstalledTime":{"type":"timestamp"}, "PatchKbNumber":{"type":"string"}, "PatchLanguage":{"type":"string"}, "PatchList":{ @@ -4879,8 +4915,6 @@ "PatchMsrcNumber":{"type":"string"}, "PatchMsrcSeverity":{"type":"string"}, "PatchNotApplicableCount":{"type":"integer"}, - "PatchOperationEndTime":{"type":"timestamp"}, - "PatchOperationStartTime":{"type":"timestamp"}, "PatchOperationType":{ "type":"string", "enum":[ @@ -4925,6 +4959,7 @@ ], "members":{ "PatchFilterGroup":{"shape":"PatchFilterGroup"}, + "ComplianceLevel":{"shape":"PatchComplianceLevel"}, "ApproveAfterDays":{ "shape":"ApproveAfterDays", "box":true @@ -4949,6 +4984,7 @@ "type":"structure", "members":{ "DeploymentStatus":{"shape":"PatchDeploymentStatus"}, + "ComplianceLevel":{"shape":"PatchComplianceLevel"}, "ApprovalDate":{"shape":"DateTime"} } }, @@ -4976,6 +5012,7 @@ "locationName":"PlatformType" } }, + "Product":{"type":"string"}, "PutInventoryRequest":{ "type":"structure", "required":[ @@ -5514,6 +5551,13 @@ }, "exception":true }, + "UnsupportedOperatingSystem":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, "UnsupportedParameterType":{ "type":"structure", "members":{ @@ -5663,6 +5707,7 @@ "GlobalFilters":{"shape":"PatchFilterGroup"}, "ApprovalRules":{"shape":"PatchRuleGroup"}, "ApprovedPatches":{"shape":"PatchIdList"}, + "ApprovedPatchesComplianceLevel":{"shape":"PatchComplianceLevel"}, "RejectedPatches":{"shape":"PatchIdList"}, "Description":{"shape":"BaselineDescription"} } @@ -5672,9 +5717,11 @@ "members":{ "BaselineId":{"shape":"BaselineId"}, "Name":{"shape":"BaselineName"}, + "OperatingSystem":{"shape":"OperatingSystem"}, "GlobalFilters":{"shape":"PatchFilterGroup"}, "ApprovalRules":{"shape":"PatchRuleGroup"}, "ApprovedPatches":{"shape":"PatchIdList"}, + "ApprovedPatchesComplianceLevel":{"shape":"PatchComplianceLevel"}, "RejectedPatches":{"shape":"PatchIdList"}, "CreatedDate":{"shape":"DateTime"}, "ModifiedDate":{"shape":"DateTime"}, diff --git a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json index b3dee5c7e..789a773ff 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json +++ b/fn/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json @@ -10,7 +10,7 @@ "CreateDocument": "

Creates a Systems Manager document.

After you create a document, you can use CreateAssociation to associate it with one or more running instances.

", "CreateMaintenanceWindow": "

Creates a new Maintenance Window.

", "CreatePatchBaseline": "

Creates a patch baseline.

", - "CreateResourceDataSync": "

Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the ListResourceDataSync operation.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. To view an example of a restrictive Amazon S3 bucket policy for Resource Data Sync, see Creating a Resource Data Sync.

", + "CreateResourceDataSync": "

Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the ListResourceDataSync operation.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. To view an example of a restrictive Amazon S3 bucket policy for Resource Data Sync, see Configuring Resource Data Sync for Inventory.

", "DeleteActivation": "

Deletes an activation. You are not required to delete an activation. If you delete an activation, you can no longer use it to register additional managed instances. Deleting an activation does not de-register managed instances. You must manually de-register managed instances.

", "DeleteAssociation": "

Disassociates the specified Systems Manager document from the specified instance.

When you disassociate a document from an instance, it does not change the configuration of the instance. To change the configuration state of an instance after you disassociate a document, you must create a new document with the desired configuration and associate it with the instance.

", "DeleteDocument": "

Deletes the Systems Manager document and all instance associations to the document.

Before you delete the document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.

", @@ -30,7 +30,7 @@ "DescribeDocument": "

Describes the specified SSM document.

", "DescribeDocumentPermission": "

Describes the permissions for a Systems Manager document. If you created the document, you are the owner. If a document is shared, it can either be shared privately (by specifying a user's AWS account ID) or publicly (All).

", "DescribeEffectiveInstanceAssociations": "

All associations for the instance(s).

", - "DescribeEffectivePatchesForPatchBaseline": "

Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline.

", + "DescribeEffectivePatchesForPatchBaseline": "

Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline. Note that this API applies only to Windows patch baselines.

", "DescribeInstanceAssociationsStatus": "

The status of the associations for the instance(s).

", "DescribeInstanceInformation": "

Describes one or more of your instances. You can use this to get information about instances like the operating system platform, the SSM Agent version (Linux), status etc. If you specify one or more instance IDs, it returns information for those instances. If you do not specify instance IDs, it returns information for all your instances. If you specify an instance ID that is not valid or an instance that you do not own, you receive an error.

", "DescribeInstancePatchStates": "

Retrieves the high-level patch state of one or more instances.

", @@ -48,8 +48,8 @@ "DescribePatchGroups": "

Lists all patch groups that have been registered with patch baselines.

", "GetAutomationExecution": "

Get detailed information about a particular Automation execution.

", "GetCommandInvocation": "

Returns detailed information about command execution for an invocation or plugin.

", - "GetDefaultPatchBaseline": "

Retrieves the default patch baseline.

", - "GetDeployablePatchSnapshotForInstance": "

Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-ApplyPatchBaseline Systems Manager document.

", + "GetDefaultPatchBaseline": "

Retrieves the default patch baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.

", + "GetDeployablePatchSnapshotForInstance": "

Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document.

", "GetDocument": "

Gets the contents of the specified SSM document.

", "GetInventory": "

Query inventory information.

", "GetInventorySchema": "

Return a list of inventory type names for the account, or return a list of attribute names for a specific Inventory item type.

", @@ -761,6 +761,8 @@ "InstanceInformation$RegistrationDate": "

The date the server or VM was registered with AWS as a managed instance.

", "InstanceInformation$LastAssociationExecutionDate": "

The date the association was last executed.

", "InstanceInformation$LastSuccessfulAssociationExecutionDate": "

The last date the association was successfully run.

", + "InstancePatchState$OperationStartTime": "

The time the most recent patching operation was started on the instance.

", + "InstancePatchState$OperationEndTime": "

The time the most recent patching operation completed on the instance.

", "MaintenanceWindowExecution$StartTime": "

The time the execution started.

", "MaintenanceWindowExecution$EndTime": "

The time the execution finished.

", "MaintenanceWindowExecutionTaskIdentity$StartTime": "

The time the task execution started.

", @@ -770,6 +772,7 @@ "ParameterHistory$LastModifiedDate": "

Date the parameter was last changed or updated.

", "ParameterMetadata$LastModifiedDate": "

Date the parameter was last changed or updated.

", "Patch$ReleaseDate": "

The date the patch was released.

", + "PatchComplianceData$InstalledTime": "

The date/time the patch was installed on the instance. Note that not all operating systems provide this level of information.

", "PatchStatus$ApprovalDate": "

The date the patch was approved (or will be approved if the status is PENDING_APPROVAL).

", "StepExecution$ExecutionStartTime": "

If a step has begun execution, this contains the time the step started. If the step is in Pending status, this field is not populated.

", "StepExecution$ExecutionEndTime": "

If a step has finished execution, this contains the time the execution ended. If the step has not yet concluded, this field is not populated.

", @@ -780,7 +783,7 @@ "DefaultBaseline": { "base": null, "refs": { - "PatchBaselineIdentity$DefaultBaseline": "

Whether this is the default baseline.

" + "PatchBaselineIdentity$DefaultBaseline": "

Whether this is the default baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.

" } }, "DefaultInstanceName": { @@ -2937,6 +2940,19 @@ "NotificationConfig$NotificationType": "

Command: Receive notification when the status of a command changes. Invocation: For commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes.

" } }, + "OperatingSystem": { + "base": null, + "refs": { + "CreatePatchBaselineRequest$OperatingSystem": "

Defines the operating system the patch baseline applies to. Supported operating systems include WINDOWS, AMAZON_LINUX, UBUNTU and REDHAT_ENTERPRISE_LINUX. The Default value is WINDOWS.

", + "GetDefaultPatchBaselineRequest$OperatingSystem": "

Returns the default patch baseline for the specified operating system.

", + "GetDefaultPatchBaselineResult$OperatingSystem": "

The operating system for the returned patch baseline.

", + "GetPatchBaselineForPatchGroupRequest$OperatingSystem": "

Returns he operating system rule specified for patch groups using the patch baseline.

", + "GetPatchBaselineForPatchGroupResult$OperatingSystem": "

The operating system rule specified for patch groups using the patch baseline.

", + "GetPatchBaselineResult$OperatingSystem": "

Returns the operating system specified for the patch baseline.

", + "PatchBaselineIdentity$OperatingSystem": "

Defines the operating system the patch baseline applies to. Supported operating systems include WINDOWS, AMAZON_LINUX, UBUNTU and REDHAT_ENTERPRISE_LINUX. The Default value is WINDOWS.

", + "UpdatePatchBaselineResult$OperatingSystem": "

The operating system rule used by the updated patch baseline.

" + } + }, "OwnerInformation": { "base": null, "refs": { @@ -3211,6 +3227,17 @@ "PatchComplianceData$State": "

The state of the patch on the instance (INSTALLED, INSTALLED_OTHER, MISSING, NOT_APPLICABLE or FAILED).

" } }, + "PatchComplianceLevel": { + "base": null, + "refs": { + "CreatePatchBaselineRequest$ApprovedPatchesComplianceLevel": "

Defines the compliance level for approved patches. This means that if an approved patch is reported as missing, this is the severity of the compliance violation. Valid compliance severity levels include the following: CRITICAL, HIGH, MEDIUM, LOW, INFORMATIONAL, UNSPECIFIED. The default value is UNSPECIFIED.

", + "GetPatchBaselineResult$ApprovedPatchesComplianceLevel": "

Returns the specified compliance severity level for approved patches in the patch baseline.

", + "PatchRule$ComplianceLevel": "

A compliance severity level for all approved patches in a patch baseline. Valid compliance severity levels include the following: Unspecified, Critical, High, Medium, Low, and Informational.

", + "PatchStatus$ComplianceLevel": "

The compliance severity level for a patch.

", + "UpdatePatchBaselineRequest$ApprovedPatchesComplianceLevel": "

Assigns a new compliance severity level to an existing patch baseline.

", + "UpdatePatchBaselineResult$ApprovedPatchesComplianceLevel": "

The compliance severity level assigned to the patch baseline after the update completed.

" + } + }, "PatchComplianceMaxResults": { "base": null, "refs": { @@ -3349,17 +3376,11 @@ "InstancePatchState$InstalledOtherCount": "

The number of patches not specified in the patch baseline that are installed on the instance.

" } }, - "PatchInstalledTime": { - "base": null, - "refs": { - "PatchComplianceData$InstalledTime": "

The date/time the patch was installed on the instance.

" - } - }, "PatchKbNumber": { "base": null, "refs": { "Patch$KbNumber": "

The Microsoft Knowledge Base ID of the patch.

", - "PatchComplianceData$KBId": "

The Microsoft Knowledge Base ID of the patch.

" + "PatchComplianceData$KBId": "

The operating system-specific ID of the patch.

" } }, "PatchLanguage": { @@ -3398,18 +3419,6 @@ "InstancePatchState$NotApplicableCount": "

The number of patches from the patch baseline that aren't applicable for the instance and hence aren't installed on the instance.

" } }, - "PatchOperationEndTime": { - "base": null, - "refs": { - "InstancePatchState$OperationEndTime": "

The time the most recent patching operation completed on the instance.

" - } - }, - "PatchOperationStartTime": { - "base": null, - "refs": { - "InstancePatchState$OperationStartTime": "

The time the most recent patching operation was started on the instance.

" - } - }, "PatchOperationType": { "base": null, "refs": { @@ -3433,7 +3442,8 @@ "refs": { "DescribeAvailablePatchesRequest$Filters": "

Filters used to scope down the returned patches.

", "DescribeInstancePatchesRequest$Filters": "

Each entry in the array is a structure containing:

Key (string, between 1 and 128 characters)

Values (array of strings, each string between 1 and 256 characters)

", - "DescribePatchBaselinesRequest$Filters": "

Each element in the array is a structure containing:

Key: (string, \"NAME_PREFIX\" or \"OWNER\")

Value: (array of strings, exactly 1 entry, between 1 and 255 characters)

" + "DescribePatchBaselinesRequest$Filters": "

Each element in the array is a structure containing:

Key: (string, \"NAME_PREFIX\" or \"OWNER\")

Value: (array of strings, exactly 1 entry, between 1 and 255 characters)

", + "DescribePatchGroupsRequest$Filters": "

One or more filters. Use a filter to return a more specific list of results.

" } }, "PatchOrchestratorFilterValue": { @@ -3526,6 +3536,12 @@ "DocumentIdentifier$PlatformTypes": "

The operating system platform.

" } }, + "Product": { + "base": null, + "refs": { + "GetDeployablePatchSnapshotForInstanceResult$Product": "

Returns the specific operating system (for example Windows Server 2012 or Amazon Linux 2015.09) on the instance for the specified patch snapshot.

" + } + }, "PutInventoryRequest": { "base": null, "refs": { @@ -3986,6 +4002,7 @@ "TooManyUpdates$Message": null, "TotalSizeLimitExceededException$Message": null, "UnsupportedInventorySchemaVersionException$Message": null, + "UnsupportedOperatingSystem$Message": null, "UnsupportedParameterType$message": null, "UnsupportedPlatformType$Message": null } @@ -4102,6 +4119,11 @@ "refs": { } }, + "UnsupportedOperatingSystem": { + "base": "

The operating systems you specified is not supported, or the operation is not supported for the operating system. Valid operating systems include: Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu.

", + "refs": { + } + }, "UnsupportedParameterType": { "base": "

The parameter type is not supported.

", "refs": { diff --git a/fn/vendor/github.com/aws/aws-sdk-go/models/endpoints/endpoints.json b/fn/vendor/github.com/aws/aws-sdk-go/models/endpoints/endpoints.json index 656464642..204f5b1dd 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/models/endpoints/endpoints.json +++ b/fn/vendor/github.com/aws/aws-sdk-go/models/endpoints/endpoints.json @@ -131,6 +131,9 @@ }, "athena" : { "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -274,10 +277,13 @@ "ap-northeast-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -285,8 +291,10 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, @@ -320,11 +328,14 @@ "ap-northeast-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -1604,6 +1615,16 @@ "cn-north-1" : { } } }, + "ecr" : { + "endpoints" : { + "cn-north-1" : { } + } + }, + "ecs" : { + "endpoints" : { + "cn-north-1" : { } + } + }, "elasticache" : { "endpoints" : { "cn-north-1" : { } @@ -1709,6 +1730,11 @@ "cn-north-1" : { } } }, + "ssm" : { + "endpoints" : { + "cn-north-1" : { } + } + }, "storagegateway" : { "endpoints" : { "cn-north-1" : { } @@ -1929,6 +1955,11 @@ } } }, + "ssm" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "streams.dynamodb" : { "defaults" : { "credentialScope" : { @@ -1952,4 +1983,4 @@ } } ], "version" : 3 -} \ No newline at end of file +} diff --git a/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go b/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go index cfc8bb057..549f25724 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go @@ -1748,6 +1748,157 @@ func (c *DirectoryService) DescribeDirectoriesWithContext(ctx aws.Context, input return out, req.Send() } +const opDescribeDomainControllers = "DescribeDomainControllers" + +// DescribeDomainControllersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDomainControllers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeDomainControllers for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDomainControllers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDomainControllersRequest method. +// req, resp := client.DescribeDomainControllersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/DescribeDomainControllers +func (c *DirectoryService) DescribeDomainControllersRequest(input *DescribeDomainControllersInput) (req *request.Request, output *DescribeDomainControllersOutput) { + op := &request.Operation{ + Name: opDescribeDomainControllers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDomainControllersInput{} + } + + output = &DescribeDomainControllersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDomainControllers API operation for AWS Directory Service. +// +// Provides information about any domain controllers in your directory. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Directory Service's +// API operation DescribeDomainControllers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityDoesNotExistException "EntityDoesNotExistException" +// The specified entity could not be found. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The NextToken value is not valid. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters are not valid. +// +// * ErrCodeClientException "ClientException" +// A client exception has occurred. +// +// * ErrCodeServiceException "ServiceException" +// An exception has occurred in AWS Directory Service. +// +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The operation is not supported. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/DescribeDomainControllers +func (c *DirectoryService) DescribeDomainControllers(input *DescribeDomainControllersInput) (*DescribeDomainControllersOutput, error) { + req, out := c.DescribeDomainControllersRequest(input) + return out, req.Send() +} + +// DescribeDomainControllersWithContext is the same as DescribeDomainControllers with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDomainControllers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectoryService) DescribeDomainControllersWithContext(ctx aws.Context, input *DescribeDomainControllersInput, opts ...request.Option) (*DescribeDomainControllersOutput, error) { + req, out := c.DescribeDomainControllersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeDomainControllersPages iterates over the pages of a DescribeDomainControllers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDomainControllers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDomainControllers operation. +// pageNum := 0 +// err := client.DescribeDomainControllersPages(params, +// func(page *DescribeDomainControllersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DirectoryService) DescribeDomainControllersPages(input *DescribeDomainControllersInput, fn func(*DescribeDomainControllersOutput, bool) bool) error { + return c.DescribeDomainControllersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDomainControllersPagesWithContext same as DescribeDomainControllersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectoryService) DescribeDomainControllersPagesWithContext(ctx aws.Context, input *DescribeDomainControllersInput, fn func(*DescribeDomainControllersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDomainControllersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDomainControllersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeDomainControllersOutput), !p.HasNextPage()) + } + return p.Err() +} + const opDescribeEventTopics = "DescribeEventTopics" // DescribeEventTopicsRequest generates a "aws/request.Request" representing the @@ -3411,6 +3562,110 @@ func (c *DirectoryService) UpdateConditionalForwarderWithContext(ctx aws.Context return out, req.Send() } +const opUpdateNumberOfDomainControllers = "UpdateNumberOfDomainControllers" + +// UpdateNumberOfDomainControllersRequest generates a "aws/request.Request" representing the +// client's request for the UpdateNumberOfDomainControllers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateNumberOfDomainControllers for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateNumberOfDomainControllers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateNumberOfDomainControllersRequest method. +// req, resp := client.UpdateNumberOfDomainControllersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/UpdateNumberOfDomainControllers +func (c *DirectoryService) UpdateNumberOfDomainControllersRequest(input *UpdateNumberOfDomainControllersInput) (req *request.Request, output *UpdateNumberOfDomainControllersOutput) { + op := &request.Operation{ + Name: opUpdateNumberOfDomainControllers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateNumberOfDomainControllersInput{} + } + + output = &UpdateNumberOfDomainControllersOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateNumberOfDomainControllers API operation for AWS Directory Service. +// +// Adds or removes domain controllers to or from the directory. Based on the +// difference between current value and new value (provided through this API +// call), domain controllers will be added or removed. It may take up to 45 +// minutes for any new domain controllers to become fully active once the requested +// number of domain controllers is updated. During this time, you cannot make +// another update request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Directory Service's +// API operation UpdateNumberOfDomainControllers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityDoesNotExistException "EntityDoesNotExistException" +// The specified entity could not be found. +// +// * ErrCodeDirectoryUnavailableException "DirectoryUnavailableException" +// The specified directory is unavailable or could not be found. +// +// * ErrCodeDomainControllerLimitExceededException "DomainControllerLimitExceededException" +// The maximum allowed number of domain controllers per directory was exceeded. +// The default limit per directory is 20 domain controllers. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters are not valid. +// +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The operation is not supported. +// +// * ErrCodeClientException "ClientException" +// A client exception has occurred. +// +// * ErrCodeServiceException "ServiceException" +// An exception has occurred in AWS Directory Service. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/UpdateNumberOfDomainControllers +func (c *DirectoryService) UpdateNumberOfDomainControllers(input *UpdateNumberOfDomainControllersInput) (*UpdateNumberOfDomainControllersOutput, error) { + req, out := c.UpdateNumberOfDomainControllersRequest(input) + return out, req.Send() +} + +// UpdateNumberOfDomainControllersWithContext is the same as UpdateNumberOfDomainControllers with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateNumberOfDomainControllers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectoryService) UpdateNumberOfDomainControllersWithContext(ctx aws.Context, input *UpdateNumberOfDomainControllersInput, opts ...request.Option) (*UpdateNumberOfDomainControllersOutput, error) { + req, out := c.UpdateNumberOfDomainControllersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateRadius = "UpdateRadius" // UpdateRadiusRequest generates a "aws/request.Request" representing the @@ -5430,6 +5685,109 @@ func (s *DescribeDirectoriesOutput) SetNextToken(v string) *DescribeDirectoriesO return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/DescribeDomainControllersRequest +type DescribeDomainControllersInput struct { + _ struct{} `type:"structure"` + + // Identifier of the directory for which to retrieve the domain controller information. + // + // DirectoryId is a required field + DirectoryId *string `type:"string" required:"true"` + + // A list of identifiers for the domain controllers whose information will be + // provided. + DomainControllerIds []*string `type:"list"` + + // The maximum number of items to return. + Limit *int64 `type:"integer"` + + // The DescribeDomainControllers.NextToken value from a previous call to DescribeDomainControllers. + // Pass null if this is the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDomainControllersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainControllersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDomainControllersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDomainControllersInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryId sets the DirectoryId field's value. +func (s *DescribeDomainControllersInput) SetDirectoryId(v string) *DescribeDomainControllersInput { + s.DirectoryId = &v + return s +} + +// SetDomainControllerIds sets the DomainControllerIds field's value. +func (s *DescribeDomainControllersInput) SetDomainControllerIds(v []*string) *DescribeDomainControllersInput { + s.DomainControllerIds = v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeDomainControllersInput) SetLimit(v int64) *DescribeDomainControllersInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDomainControllersInput) SetNextToken(v string) *DescribeDomainControllersInput { + s.NextToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/DescribeDomainControllersResult +type DescribeDomainControllersOutput struct { + _ struct{} `type:"structure"` + + // List of the DomainController objects that were retrieved. + DomainControllers []*DomainController `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to DescribeDomainControllers retrieve the + // next set of items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDomainControllersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainControllersOutput) GoString() string { + return s.String() +} + +// SetDomainControllers sets the DomainControllers field's value. +func (s *DescribeDomainControllersOutput) SetDomainControllers(v []*DomainController) *DescribeDomainControllersOutput { + s.DomainControllers = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDomainControllersOutput) SetNextToken(v string) *DescribeDomainControllersOutput { + s.NextToken = &v + return s +} + // Describes event topics. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/DescribeEventTopicsRequest type DescribeEventTopicsInput struct { @@ -5876,6 +6234,10 @@ type DirectoryDescription struct { // The textual description for the directory. Description *string `type:"string"` + // The desired number of domain controllers in the directory if the directory + // is Microsoft AD. + DesiredNumberOfDomainControllers *int64 `min:"2" type:"integer"` + // The directory identifier. DirectoryId *string `type:"string"` @@ -5961,6 +6323,12 @@ func (s *DirectoryDescription) SetDescription(v string) *DirectoryDescription { return s } +// SetDesiredNumberOfDomainControllers sets the DesiredNumberOfDomainControllers field's value. +func (s *DirectoryDescription) SetDesiredNumberOfDomainControllers(v int64) *DirectoryDescription { + s.DesiredNumberOfDomainControllers = &v + return s +} + // SetDirectoryId sets the DirectoryId field's value. func (s *DirectoryDescription) SetDirectoryId(v string) *DirectoryDescription { s.DirectoryId = &v @@ -6399,6 +6767,112 @@ func (s DisableSsoOutput) GoString() string { return s.String() } +// Contains information about the domain controllers for a specified directory. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/DomainController +type DomainController struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the domain controller is located. + AvailabilityZone *string `type:"string"` + + // Identifier of the directory where the domain controller resides. + DirectoryId *string `type:"string"` + + // The IP address of the domain controller. + DnsIpAddr *string `type:"string"` + + // Identifies a specific domain controller in the directory. + DomainControllerId *string `type:"string"` + + // Specifies when the domain controller was created. + LaunchTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The status of the domain controller. + Status *string `type:"string" enum:"DomainControllerStatus"` + + // The date and time that the status was last updated. + StatusLastUpdatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A description of the domain controller state. + StatusReason *string `type:"string"` + + // Identifier of the subnet in the VPC that contains the domain controller. + SubnetId *string `type:"string"` + + // The identifier of the VPC that contains the domain controller. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DomainController) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainController) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *DomainController) SetAvailabilityZone(v string) *DomainController { + s.AvailabilityZone = &v + return s +} + +// SetDirectoryId sets the DirectoryId field's value. +func (s *DomainController) SetDirectoryId(v string) *DomainController { + s.DirectoryId = &v + return s +} + +// SetDnsIpAddr sets the DnsIpAddr field's value. +func (s *DomainController) SetDnsIpAddr(v string) *DomainController { + s.DnsIpAddr = &v + return s +} + +// SetDomainControllerId sets the DomainControllerId field's value. +func (s *DomainController) SetDomainControllerId(v string) *DomainController { + s.DomainControllerId = &v + return s +} + +// SetLaunchTime sets the LaunchTime field's value. +func (s *DomainController) SetLaunchTime(v time.Time) *DomainController { + s.LaunchTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DomainController) SetStatus(v string) *DomainController { + s.Status = &v + return s +} + +// SetStatusLastUpdatedDateTime sets the StatusLastUpdatedDateTime field's value. +func (s *DomainController) SetStatusLastUpdatedDateTime(v time.Time) *DomainController { + s.StatusLastUpdatedDateTime = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *DomainController) SetStatusReason(v string) *DomainController { + s.StatusReason = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *DomainController) SetSubnetId(v string) *DomainController { + s.SubnetId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DomainController) SetVpcId(v string) *DomainController { + s.VpcId = &v + return s +} + // Contains the inputs for the EnableRadius operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/EnableRadiusRequest type EnableRadiusInput struct { @@ -8061,6 +8535,78 @@ func (s UpdateConditionalForwarderOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/UpdateNumberOfDomainControllersRequest +type UpdateNumberOfDomainControllersInput struct { + _ struct{} `type:"structure"` + + // The number of domain controllers desired in the directory. + // + // DesiredNumber is a required field + DesiredNumber *int64 `min:"2" type:"integer" required:"true"` + + // Identifier of the directory to which the domain controllers will be added + // or removed. + // + // DirectoryId is a required field + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateNumberOfDomainControllersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateNumberOfDomainControllersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateNumberOfDomainControllersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateNumberOfDomainControllersInput"} + if s.DesiredNumber == nil { + invalidParams.Add(request.NewErrParamRequired("DesiredNumber")) + } + if s.DesiredNumber != nil && *s.DesiredNumber < 2 { + invalidParams.Add(request.NewErrParamMinValue("DesiredNumber", 2)) + } + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDesiredNumber sets the DesiredNumber field's value. +func (s *UpdateNumberOfDomainControllersInput) SetDesiredNumber(v int64) *UpdateNumberOfDomainControllersInput { + s.DesiredNumber = &v + return s +} + +// SetDirectoryId sets the DirectoryId field's value. +func (s *UpdateNumberOfDomainControllersInput) SetDirectoryId(v string) *UpdateNumberOfDomainControllersInput { + s.DirectoryId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/UpdateNumberOfDomainControllersResult +type UpdateNumberOfDomainControllersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateNumberOfDomainControllersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateNumberOfDomainControllersOutput) GoString() string { + return s.String() +} + // Contains the inputs for the UpdateRadius operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/UpdateRadiusRequest type UpdateRadiusInput struct { @@ -8256,6 +8802,29 @@ const ( DirectoryTypeMicrosoftAd = "MicrosoftAD" ) +const ( + // DomainControllerStatusCreating is a DomainControllerStatus enum value + DomainControllerStatusCreating = "Creating" + + // DomainControllerStatusActive is a DomainControllerStatus enum value + DomainControllerStatusActive = "Active" + + // DomainControllerStatusImpaired is a DomainControllerStatus enum value + DomainControllerStatusImpaired = "Impaired" + + // DomainControllerStatusRestoring is a DomainControllerStatus enum value + DomainControllerStatusRestoring = "Restoring" + + // DomainControllerStatusDeleting is a DomainControllerStatus enum value + DomainControllerStatusDeleting = "Deleting" + + // DomainControllerStatusDeleted is a DomainControllerStatus enum value + DomainControllerStatusDeleted = "Deleted" + + // DomainControllerStatusFailed is a DomainControllerStatus enum value + DomainControllerStatusFailed = "Failed" +) + const ( // IpRouteStatusMsgAdding is a IpRouteStatusMsg enum value IpRouteStatusMsgAdding = "Adding" diff --git a/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go b/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go index a51f66988..4c65c7011 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go @@ -132,6 +132,13 @@ type DirectoryServiceAPI interface { DescribeDirectoriesWithContext(aws.Context, *directoryservice.DescribeDirectoriesInput, ...request.Option) (*directoryservice.DescribeDirectoriesOutput, error) DescribeDirectoriesRequest(*directoryservice.DescribeDirectoriesInput) (*request.Request, *directoryservice.DescribeDirectoriesOutput) + DescribeDomainControllers(*directoryservice.DescribeDomainControllersInput) (*directoryservice.DescribeDomainControllersOutput, error) + DescribeDomainControllersWithContext(aws.Context, *directoryservice.DescribeDomainControllersInput, ...request.Option) (*directoryservice.DescribeDomainControllersOutput, error) + DescribeDomainControllersRequest(*directoryservice.DescribeDomainControllersInput) (*request.Request, *directoryservice.DescribeDomainControllersOutput) + + DescribeDomainControllersPages(*directoryservice.DescribeDomainControllersInput, func(*directoryservice.DescribeDomainControllersOutput, bool) bool) error + DescribeDomainControllersPagesWithContext(aws.Context, *directoryservice.DescribeDomainControllersInput, func(*directoryservice.DescribeDomainControllersOutput, bool) bool, ...request.Option) error + DescribeEventTopics(*directoryservice.DescribeEventTopicsInput) (*directoryservice.DescribeEventTopicsOutput, error) DescribeEventTopicsWithContext(aws.Context, *directoryservice.DescribeEventTopicsInput, ...request.Option) (*directoryservice.DescribeEventTopicsOutput, error) DescribeEventTopicsRequest(*directoryservice.DescribeEventTopicsInput) (*request.Request, *directoryservice.DescribeEventTopicsOutput) @@ -204,6 +211,10 @@ type DirectoryServiceAPI interface { UpdateConditionalForwarderWithContext(aws.Context, *directoryservice.UpdateConditionalForwarderInput, ...request.Option) (*directoryservice.UpdateConditionalForwarderOutput, error) UpdateConditionalForwarderRequest(*directoryservice.UpdateConditionalForwarderInput) (*request.Request, *directoryservice.UpdateConditionalForwarderOutput) + UpdateNumberOfDomainControllers(*directoryservice.UpdateNumberOfDomainControllersInput) (*directoryservice.UpdateNumberOfDomainControllersOutput, error) + UpdateNumberOfDomainControllersWithContext(aws.Context, *directoryservice.UpdateNumberOfDomainControllersInput, ...request.Option) (*directoryservice.UpdateNumberOfDomainControllersOutput, error) + UpdateNumberOfDomainControllersRequest(*directoryservice.UpdateNumberOfDomainControllersInput) (*request.Request, *directoryservice.UpdateNumberOfDomainControllersOutput) + UpdateRadius(*directoryservice.UpdateRadiusInput) (*directoryservice.UpdateRadiusOutput, error) UpdateRadiusWithContext(aws.Context, *directoryservice.UpdateRadiusInput, ...request.Option) (*directoryservice.UpdateRadiusOutput, error) UpdateRadiusRequest(*directoryservice.UpdateRadiusInput) (*request.Request, *directoryservice.UpdateRadiusOutput) diff --git a/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/errors.go b/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/errors.go index 64ba6fd0a..f7560305b 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/errors.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/service/directoryservice/errors.go @@ -30,6 +30,13 @@ const ( // The specified directory is unavailable or could not be found. ErrCodeDirectoryUnavailableException = "DirectoryUnavailableException" + // ErrCodeDomainControllerLimitExceededException for service response error code + // "DomainControllerLimitExceededException". + // + // The maximum allowed number of domain controllers per directory was exceeded. + // The default limit per directory is 20 domain controllers. + ErrCodeDomainControllerLimitExceededException = "DomainControllerLimitExceededException" + // ErrCodeEntityAlreadyExistsException for service response error code // "EntityAlreadyExistsException". // diff --git a/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go b/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go index 0cf5f919d..d47b28fd2 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go @@ -1000,6 +1000,31 @@ func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Reques // * ErrCodeExpiredIteratorException "ExpiredIteratorException" // The provided iterator exceeds the maximum age allowed. // +// * ErrCodeKMSDisabledException "KMSDisabledException" +// The request was rejected because the specified CMK isn't enabled. +// +// * ErrCodeKMSInvalidStateException "KMSInvalidStateException" +// The request was rejected because the state of the specified resource isn't +// valid for this request. For more information, see How Key State Affects Use +// of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the AWS Key Management Service Developer Guide. +// +// * ErrCodeKMSAccessDeniedException "KMSAccessDeniedException" +// The ciphertext references a key that doesn't exist or that you don't have +// access to. +// +// * ErrCodeKMSNotFoundException "KMSNotFoundException" +// The request was rejected because the specified entity or resource couldn't +// be found. +// +// * ErrCodeKMSOptInRequired "KMSOptInRequired" +// The AWS access key ID needs a subscription for the service. +// +// * ErrCodeKMSThrottlingException "KMSThrottlingException" +// The request was denied due to request throttling. For more information about +// throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// in the AWS Key Management Service Developer Guide. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/GetRecords func (c *Kinesis) GetRecords(input *GetRecordsInput) (*GetRecordsOutput, error) { req, out := c.GetRecordsRequest(input) @@ -1705,8 +1730,9 @@ func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, // If a PutRecord request cannot be processed because of insufficient provisioned // throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException. // -// Data records are accessible for only 24 hours from the time that they are -// added to a stream. +// By default, data records are accessible for 24 hours from the time that they +// are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod +// to modify this retention period. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1732,6 +1758,31 @@ func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, // Backoff in AWS (http://docs.aws.amazon.com/general/latest/gr/api-retries.html) // in the AWS General Reference. // +// * ErrCodeKMSDisabledException "KMSDisabledException" +// The request was rejected because the specified CMK isn't enabled. +// +// * ErrCodeKMSInvalidStateException "KMSInvalidStateException" +// The request was rejected because the state of the specified resource isn't +// valid for this request. For more information, see How Key State Affects Use +// of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the AWS Key Management Service Developer Guide. +// +// * ErrCodeKMSAccessDeniedException "KMSAccessDeniedException" +// The ciphertext references a key that doesn't exist or that you don't have +// access to. +// +// * ErrCodeKMSNotFoundException "KMSNotFoundException" +// The request was rejected because the specified entity or resource couldn't +// be found. +// +// * ErrCodeKMSOptInRequired "KMSOptInRequired" +// The AWS access key ID needs a subscription for the service. +// +// * ErrCodeKMSThrottlingException "KMSThrottlingException" +// The request was denied due to request throttling. For more information about +// throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// in the AWS Key Management Service Developer Guide. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecord func (c *Kinesis) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { req, out := c.PutRecordRequest(input) @@ -1855,10 +1906,9 @@ func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Reques // see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords) // in the Amazon Kinesis Streams Developer Guide. // -// By default, data records are accessible for only 24 hours from the time that -// they are added to an Amazon Kinesis stream. This retention period can be -// modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod -// operations. +// By default, data records are accessible for 24 hours from the time that they +// are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod +// to modify this retention period. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1884,6 +1934,31 @@ func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Reques // Backoff in AWS (http://docs.aws.amazon.com/general/latest/gr/api-retries.html) // in the AWS General Reference. // +// * ErrCodeKMSDisabledException "KMSDisabledException" +// The request was rejected because the specified CMK isn't enabled. +// +// * ErrCodeKMSInvalidStateException "KMSInvalidStateException" +// The request was rejected because the state of the specified resource isn't +// valid for this request. For more information, see How Key State Affects Use +// of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the AWS Key Management Service Developer Guide. +// +// * ErrCodeKMSAccessDeniedException "KMSAccessDeniedException" +// The ciphertext references a key that doesn't exist or that you don't have +// access to. +// +// * ErrCodeKMSNotFoundException "KMSNotFoundException" +// The request was rejected because the specified entity or resource couldn't +// be found. +// +// * ErrCodeKMSOptInRequired "KMSOptInRequired" +// The AWS access key ID needs a subscription for the service. +// +// * ErrCodeKMSThrottlingException "KMSThrottlingException" +// The request was denied due to request throttling. For more information about +// throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// in the AWS Key Management Service Developer Guide. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecords func (c *Kinesis) PutRecords(input *PutRecordsInput) (*PutRecordsOutput, error) { req, out := c.PutRecordsRequest(input) @@ -2143,6 +2218,255 @@ func (c *Kinesis) SplitShardWithContext(ctx aws.Context, input *SplitShardInput, return out, req.Send() } +const opStartStreamEncryption = "StartStreamEncryption" + +// StartStreamEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the StartStreamEncryption operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See StartStreamEncryption for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartStreamEncryption method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartStreamEncryptionRequest method. +// req, resp := client.StartStreamEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StartStreamEncryption +func (c *Kinesis) StartStreamEncryptionRequest(input *StartStreamEncryptionInput) (req *request.Request, output *StartStreamEncryptionOutput) { + op := &request.Operation{ + Name: opStartStreamEncryption, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartStreamEncryptionInput{} + } + + output = &StartStreamEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// StartStreamEncryption API operation for Amazon Kinesis. +// +// Enables or updates server-side encryption using an AWS KMS key for a specified +// stream. +// +// Starting encryption is an asynchronous operation. Upon receiving the request, +// Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. +// After the update is complete, Amazon Kinesis sets the status of the stream +// back to ACTIVE. Updating or applying encryption normally takes a few seconds +// to complete but it can take minutes. You can continue to read and write data +// to your stream while its status is UPDATING. Once the status of the stream +// is ACTIVE, records written to the stream will begin to be encrypted. +// +// API Limits: You can successfully apply a new AWS KMS key for server-side +// encryption 25 times in a rolling 24 hour period. +// +// Note: It can take up to 5 seconds after the stream is in an ACTIVE status +// before all records written to the stream are encrypted. After you’ve enabled +// encryption, you can verify encryption was applied by inspecting the API response +// from PutRecord or PutRecords. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Kinesis's +// API operation StartStreamEncryption for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// A specified parameter exceeds its restrictions, is not supported, or can't +// be used. For more information, see the returned message. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested resource exceeds the maximum number allowed, or the number +// of concurrent stream requests exceeds the maximum number allowed (5). +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is not available for this operation. For successful operation, +// the resource needs to be in the ACTIVE state. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The requested resource could not be found. The stream might not be specified +// correctly. +// +// * ErrCodeKMSDisabledException "KMSDisabledException" +// The request was rejected because the specified CMK isn't enabled. +// +// * ErrCodeKMSInvalidStateException "KMSInvalidStateException" +// The request was rejected because the state of the specified resource isn't +// valid for this request. For more information, see How Key State Affects Use +// of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the AWS Key Management Service Developer Guide. +// +// * ErrCodeKMSAccessDeniedException "KMSAccessDeniedException" +// The ciphertext references a key that doesn't exist or that you don't have +// access to. +// +// * ErrCodeKMSNotFoundException "KMSNotFoundException" +// The request was rejected because the specified entity or resource couldn't +// be found. +// +// * ErrCodeKMSOptInRequired "KMSOptInRequired" +// The AWS access key ID needs a subscription for the service. +// +// * ErrCodeKMSThrottlingException "KMSThrottlingException" +// The request was denied due to request throttling. For more information about +// throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// in the AWS Key Management Service Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StartStreamEncryption +func (c *Kinesis) StartStreamEncryption(input *StartStreamEncryptionInput) (*StartStreamEncryptionOutput, error) { + req, out := c.StartStreamEncryptionRequest(input) + return out, req.Send() +} + +// StartStreamEncryptionWithContext is the same as StartStreamEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See StartStreamEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kinesis) StartStreamEncryptionWithContext(ctx aws.Context, input *StartStreamEncryptionInput, opts ...request.Option) (*StartStreamEncryptionOutput, error) { + req, out := c.StartStreamEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopStreamEncryption = "StopStreamEncryption" + +// StopStreamEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the StopStreamEncryption operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See StopStreamEncryption for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopStreamEncryption method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopStreamEncryptionRequest method. +// req, resp := client.StopStreamEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StopStreamEncryption +func (c *Kinesis) StopStreamEncryptionRequest(input *StopStreamEncryptionInput) (req *request.Request, output *StopStreamEncryptionOutput) { + op := &request.Operation{ + Name: opStopStreamEncryption, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopStreamEncryptionInput{} + } + + output = &StopStreamEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopStreamEncryption API operation for Amazon Kinesis. +// +// Disables server-side encryption for a specified stream. +// +// Stopping encryption is an asynchronous operation. Upon receiving the request, +// Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. +// After the update is complete, Amazon Kinesis sets the status of the stream +// back to ACTIVE. Stopping encryption normally takes a few seconds to complete +// but it can take minutes. You can continue to read and write data to your +// stream while its status is UPDATING. Once the status of the stream is ACTIVE +// records written to the stream will no longer be encrypted by the Amazon Kinesis +// Streams service. +// +// API Limits: You can successfully disable server-side encryption 25 times +// in a rolling 24 hour period. +// +// Note: It can take up to 5 seconds after the stream is in an ACTIVE status +// before all records written to the stream are no longer subject to encryption. +// After you’ve disabled encryption, you can verify encryption was not applied +// by inspecting the API response from PutRecord or PutRecords. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Kinesis's +// API operation StopStreamEncryption for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// A specified parameter exceeds its restrictions, is not supported, or can't +// be used. For more information, see the returned message. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested resource exceeds the maximum number allowed, or the number +// of concurrent stream requests exceeds the maximum number allowed (5). +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is not available for this operation. For successful operation, +// the resource needs to be in the ACTIVE state. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The requested resource could not be found. The stream might not be specified +// correctly. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StopStreamEncryption +func (c *Kinesis) StopStreamEncryption(input *StopStreamEncryptionInput) (*StopStreamEncryptionOutput, error) { + req, out := c.StopStreamEncryptionRequest(input) + return out, req.Send() +} + +// StopStreamEncryptionWithContext is the same as StopStreamEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See StopStreamEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kinesis) StopStreamEncryptionWithContext(ctx aws.Context, input *StopStreamEncryptionInput, opts ...request.Option) (*StopStreamEncryptionOutput, error) { + req, out := c.StopStreamEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateShardCount = "UpdateShardCount" // UpdateShardCountRequest generates a "aws/request.Request" representing the @@ -2198,18 +2522,30 @@ func (c *Kinesis) UpdateShardCountRequest(input *UpdateShardCountInput) (req *re // action could take a few minutes to complete. You can continue to read and // write data to your stream while its status is UPDATING. // -// To update the shard count, Amazon Kinesis performs splits and merges and -// individual shards. This can cause short-lived shards to be created, in addition -// to the final shards. We recommend that you double or halve the shard count, -// as this results in the fewest number of splits or merges. +// To update the shard count, Amazon Kinesis performs splits or merges on individual +// shards. This can cause short-lived shards to be created, in addition to the +// final shards. We recommend that you double or halve the shard count, as this +// results in the fewest number of splits or merges. // -// This operation has a rate limit of twice per rolling 24 hour period. You -// cannot scale above double your current shard count, scale below half your -// current shard count, or exceed the shard limits for your account. +// This operation has the following limits, which are per region per account +// unless otherwise noted: // -// For the default limits for an AWS account, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) -// in the Amazon Kinesis Streams Developer Guide. If you need to increase a -// limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). +// * scale more than twice per rolling 24 hour period +// +// * scale up above double your current shard count +// +// * scale down below half your current shard count +// +// * scale up above 200 shards in a stream +// +// * scale a stream with more than 200 shards down unless the result is less +// than 200 shards +// +// * scale up above the shard limits for your account +// +// * +// +// For the default limits for an AWS account, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)in the Amazon Kinesis Streams Developer Guide. If you need to increase a limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3728,6 +4064,15 @@ func (s *PutRecordInput) SetStreamName(v string) *PutRecordInput { type PutRecordOutput struct { _ struct{} `type:"structure"` + // The encryption type to use on the record. This parameter can be one of the + // following values: + // + // * NONE: Do not encrypt the records in the stream. + // + // * KMS: Use server-side encryption on the records in the stream using a + // customer-managed KMS key. + EncryptionType *string `type:"string" enum:"EncryptionType"` + // The sequence number identifier that was assigned to the put data record. // The sequence number for the record is unique across all records in the stream. // A sequence number is the identifier associated with every record put into @@ -3752,6 +4097,12 @@ func (s PutRecordOutput) GoString() string { return s.String() } +// SetEncryptionType sets the EncryptionType field's value. +func (s *PutRecordOutput) SetEncryptionType(v string) *PutRecordOutput { + s.EncryptionType = &v + return s +} + // SetSequenceNumber sets the SequenceNumber field's value. func (s *PutRecordOutput) SetSequenceNumber(v string) *PutRecordOutput { s.SequenceNumber = &v @@ -3839,6 +4190,15 @@ func (s *PutRecordsInput) SetStreamName(v string) *PutRecordsInput { type PutRecordsOutput struct { _ struct{} `type:"structure"` + // The encryption type used on the records. This parameter can be one of the + // following values: + // + // * NONE: Do not encrypt the records. + // + // * KMS: Use server-side encryption on the records using a customer-managed + // KMS key. + EncryptionType *string `type:"string" enum:"EncryptionType"` + // The number of unsuccessfully processed records in a PutRecords request. FailedRecordCount *int64 `min:"1" type:"integer"` @@ -3862,6 +4222,12 @@ func (s PutRecordsOutput) GoString() string { return s.String() } +// SetEncryptionType sets the EncryptionType field's value. +func (s *PutRecordsOutput) SetEncryptionType(v string) *PutRecordsOutput { + s.EncryptionType = &v + return s +} + // SetFailedRecordCount sets the FailedRecordCount field's value. func (s *PutRecordsOutput) SetFailedRecordCount(v int64) *PutRecordsOutput { s.FailedRecordCount = &v @@ -4032,12 +4398,21 @@ type Record struct { // Data is a required field Data []byte `type:"blob" required:"true"` + // The encryption type used on the record. This parameter can be one of the + // following values: + // + // * NONE: Do not encrypt the records in the stream. + // + // * KMS: Use server-side encryption on the records in the stream using a + // customer-managed KMS key. + EncryptionType *string `type:"string" enum:"EncryptionType"` + // Identifies which shard in the stream the data record is assigned to. // // PartitionKey is a required field PartitionKey *string `min:"1" type:"string" required:"true"` - // The unique identifier of the record in the stream. + // The unique identifier of the record within its shard. // // SequenceNumber is a required field SequenceNumber *string `type:"string" required:"true"` @@ -4065,6 +4440,12 @@ func (s *Record) SetData(v []byte) *Record { return s } +// SetEncryptionType sets the EncryptionType field's value. +func (s *Record) SetEncryptionType(v string) *Record { + s.EncryptionType = &v + return s +} + // SetPartitionKey sets the PartitionKey field's value. func (s *Record) SetPartitionKey(v string) *Record { s.PartitionKey = &v @@ -4352,11 +4733,209 @@ func (s SplitShardOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StartStreamEncryptionInput +type StartStreamEncryptionInput struct { + _ struct{} `type:"structure"` + + // The encryption type to use. This parameter can be one of the following values: + // + // * NONE: Not valid for this operation. An InvalidOperationException will + // be thrown. + // + // * KMS: Use server-side encryption on the records in the stream using a + // customer-managed KMS key. + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"EncryptionType"` + + // The GUID for the customer-managed KMS key to use for encryption. You can + // also use a Kinesis-owned master key by specifying the alias aws/kinesis. + // + // KeyId is a required field + KeyId *string `min:"1" type:"string" required:"true"` + + // The name of the stream for which to start encrypting records. + // + // StreamName is a required field + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartStreamEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartStreamEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartStreamEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartStreamEncryptionInput"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *StartStreamEncryptionInput) SetEncryptionType(v string) *StartStreamEncryptionInput { + s.EncryptionType = &v + return s +} + +// SetKeyId sets the KeyId field's value. +func (s *StartStreamEncryptionInput) SetKeyId(v string) *StartStreamEncryptionInput { + s.KeyId = &v + return s +} + +// SetStreamName sets the StreamName field's value. +func (s *StartStreamEncryptionInput) SetStreamName(v string) *StartStreamEncryptionInput { + s.StreamName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StartStreamEncryptionOutput +type StartStreamEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartStreamEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartStreamEncryptionOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StopStreamEncryptionInput +type StopStreamEncryptionInput struct { + _ struct{} `type:"structure"` + + // The encryption type. This parameter can be one of the following values: + // + // * NONE: Not valid for this operation. An InvalidOperationException will + // be thrown. + // + // * KMS: Use server-side encryption on the records in the stream using a + // customer-managed KMS key. + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"EncryptionType"` + + // The GUID for the customer-managed key that was used for encryption. + // + // KeyId is a required field + KeyId *string `min:"1" type:"string" required:"true"` + + // The name of the stream on which to stop encrypting records. + // + // StreamName is a required field + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopStreamEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopStreamEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopStreamEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopStreamEncryptionInput"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *StopStreamEncryptionInput) SetEncryptionType(v string) *StopStreamEncryptionInput { + s.EncryptionType = &v + return s +} + +// SetKeyId sets the KeyId field's value. +func (s *StopStreamEncryptionInput) SetKeyId(v string) *StopStreamEncryptionInput { + s.KeyId = &v + return s +} + +// SetStreamName sets the StreamName field's value. +func (s *StopStreamEncryptionInput) SetStreamName(v string) *StopStreamEncryptionInput { + s.StreamName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StopStreamEncryptionOutput +type StopStreamEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopStreamEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopStreamEncryptionOutput) GoString() string { + return s.String() +} + // Represents the output for DescribeStream. // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StreamDescription type StreamDescription struct { _ struct{} `type:"structure"` + // The server-side encryption type used on the stream. This parameter can be + // one of the following values: + // + // * NONE: Do not encrypt the records in the stream. + // + // * KMS: Use server-side encryption on the records in the stream using a + // customer-managed KMS key. + EncryptionType *string `type:"string" enum:"EncryptionType"` + // Represents the current enhanced monitoring settings of the stream. // // EnhancedMonitoring is a required field @@ -4367,6 +4946,9 @@ type StreamDescription struct { // HasMoreShards is a required field HasMoreShards *bool `type:"boolean" required:"true"` + // The GUID for the customer-managed KMS key used for encryption on the stream. + KeyId *string `min:"1" type:"string"` + // The current retention period, in hours. // // RetentionPeriodHours is a required field @@ -4423,6 +5005,12 @@ func (s StreamDescription) GoString() string { return s.String() } +// SetEncryptionType sets the EncryptionType field's value. +func (s *StreamDescription) SetEncryptionType(v string) *StreamDescription { + s.EncryptionType = &v + return s +} + // SetEnhancedMonitoring sets the EnhancedMonitoring field's value. func (s *StreamDescription) SetEnhancedMonitoring(v []*EnhancedMetrics) *StreamDescription { s.EnhancedMonitoring = v @@ -4435,6 +5023,12 @@ func (s *StreamDescription) SetHasMoreShards(v bool) *StreamDescription { return s } +// SetKeyId sets the KeyId field's value. +func (s *StreamDescription) SetKeyId(v string) *StreamDescription { + s.KeyId = &v + return s +} + // SetRetentionPeriodHours sets the RetentionPeriodHours field's value. func (s *StreamDescription) SetRetentionPeriodHours(v int64) *StreamDescription { s.RetentionPeriodHours = &v @@ -4625,6 +5219,14 @@ func (s *UpdateShardCountOutput) SetTargetShardCount(v int64) *UpdateShardCountO return s } +const ( + // EncryptionTypeNone is a EncryptionType enum value + EncryptionTypeNone = "NONE" + + // EncryptionTypeKms is a EncryptionType enum value + EncryptionTypeKms = "KMS" +) + const ( // MetricsNameIncomingBytes is a MetricsName enum value MetricsNameIncomingBytes = "IncomingBytes" diff --git a/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/errors.go b/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/errors.go index 9c9beafe3..bf2872c7f 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/errors.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/errors.go @@ -17,6 +17,49 @@ const ( // be used. For more information, see the returned message. ErrCodeInvalidArgumentException = "InvalidArgumentException" + // ErrCodeKMSAccessDeniedException for service response error code + // "KMSAccessDeniedException". + // + // The ciphertext references a key that doesn't exist or that you don't have + // access to. + ErrCodeKMSAccessDeniedException = "KMSAccessDeniedException" + + // ErrCodeKMSDisabledException for service response error code + // "KMSDisabledException". + // + // The request was rejected because the specified CMK isn't enabled. + ErrCodeKMSDisabledException = "KMSDisabledException" + + // ErrCodeKMSInvalidStateException for service response error code + // "KMSInvalidStateException". + // + // The request was rejected because the state of the specified resource isn't + // valid for this request. For more information, see How Key State Affects Use + // of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // in the AWS Key Management Service Developer Guide. + ErrCodeKMSInvalidStateException = "KMSInvalidStateException" + + // ErrCodeKMSNotFoundException for service response error code + // "KMSNotFoundException". + // + // The request was rejected because the specified entity or resource couldn't + // be found. + ErrCodeKMSNotFoundException = "KMSNotFoundException" + + // ErrCodeKMSOptInRequired for service response error code + // "KMSOptInRequired". + // + // The AWS access key ID needs a subscription for the service. + ErrCodeKMSOptInRequired = "KMSOptInRequired" + + // ErrCodeKMSThrottlingException for service response error code + // "KMSThrottlingException". + // + // The request was denied due to request throttling. For more information about + // throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) + // in the AWS Key Management Service Developer Guide. + ErrCodeKMSThrottlingException = "KMSThrottlingException" + // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // diff --git a/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go b/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go index e9644f00d..d4a70e5eb 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go @@ -138,6 +138,14 @@ type KinesisAPI interface { SplitShardWithContext(aws.Context, *kinesis.SplitShardInput, ...request.Option) (*kinesis.SplitShardOutput, error) SplitShardRequest(*kinesis.SplitShardInput) (*request.Request, *kinesis.SplitShardOutput) + StartStreamEncryption(*kinesis.StartStreamEncryptionInput) (*kinesis.StartStreamEncryptionOutput, error) + StartStreamEncryptionWithContext(aws.Context, *kinesis.StartStreamEncryptionInput, ...request.Option) (*kinesis.StartStreamEncryptionOutput, error) + StartStreamEncryptionRequest(*kinesis.StartStreamEncryptionInput) (*request.Request, *kinesis.StartStreamEncryptionOutput) + + StopStreamEncryption(*kinesis.StopStreamEncryptionInput) (*kinesis.StopStreamEncryptionOutput, error) + StopStreamEncryptionWithContext(aws.Context, *kinesis.StopStreamEncryptionInput, ...request.Option) (*kinesis.StopStreamEncryptionOutput, error) + StopStreamEncryptionRequest(*kinesis.StopStreamEncryptionInput) (*request.Request, *kinesis.StopStreamEncryptionOutput) + UpdateShardCount(*kinesis.UpdateShardCountInput) (*kinesis.UpdateShardCountOutput, error) UpdateShardCountWithContext(aws.Context, *kinesis.UpdateShardCountInput, ...request.Option) (*kinesis.UpdateShardCountOutput, error) UpdateShardCountRequest(*kinesis.UpdateShardCountInput) (*request.Request, *kinesis.UpdateShardCountOutput) diff --git a/fn/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/fn/vendor/github.com/aws/aws-sdk-go/service/kms/api.go index fa60d0247..f8960a3c8 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -6380,6 +6380,11 @@ type KeyMetadata struct { // KeyId is a required field KeyId *string `min:"1" type:"string" required:"true"` + // The CMK's manager. CMKs are either customer-managed or AWS-managed. For more + // information about the difference, see Customer Master Keys (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys) + // in the AWS Key Management Service Developer Guide. + KeyManager *string `type:"string" enum:"KeyManagerType"` + // The state of the CMK. // // For more information about how key state affects the use of a CMK, see How @@ -6463,6 +6468,12 @@ func (s *KeyMetadata) SetKeyId(v string) *KeyMetadata { return s } +// SetKeyManager sets the KeyManager field's value. +func (s *KeyMetadata) SetKeyManager(v string) *KeyMetadata { + s.KeyManager = &v + return s +} + // SetKeyState sets the KeyState field's value. func (s *KeyMetadata) SetKeyState(v string) *KeyMetadata { s.KeyState = &v @@ -8135,6 +8146,14 @@ const ( GrantOperationDescribeKey = "DescribeKey" ) +const ( + // KeyManagerTypeAws is a KeyManagerType enum value + KeyManagerTypeAws = "AWS" + + // KeyManagerTypeCustomer is a KeyManagerType enum value + KeyManagerTypeCustomer = "CUSTOMER" +) + const ( // KeyStateEnabled is a KeyState enum value KeyStateEnabled = "Enabled" diff --git a/fn/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go b/fn/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go index e3c2dfc90..e9bb6dd58 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go @@ -902,7 +902,8 @@ func (c *SSM) CreateResourceDataSyncRequest(input *CreateResourceDataSyncInput) // you enable encryption in Amazon S3 to ensure secure data storage. We also // recommend that you secure access to the Amazon S3 bucket by creating a restrictive // bucket policy. To view an example of a restrictive Amazon S3 bucket policy -// for Resource Data Sync, see Creating a Resource Data Sync (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-datasync-create.html). +// for Resource Data Sync, see Configuring Resource Data Sync for Inventory +// (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-configuring.html#sysman-inventory-datasync). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2733,7 +2734,8 @@ func (c *SSM) DescribeEffectivePatchesForPatchBaselineRequest(input *DescribeEff // DescribeEffectivePatchesForPatchBaseline API operation for Amazon Simple Systems Manager (SSM). // // Retrieves the current effective patches (the patch and the approval state) -// for the specified patch baseline. +// for the specified patch baseline. Note that this API applies only to Windows +// patch baselines. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2751,6 +2753,11 @@ func (c *SSM) DescribeEffectivePatchesForPatchBaselineRequest(input *DescribeEff // Error returned when the ID specified for a resource (e.g. a Maintenance Window) // doesn't exist. // +// * ErrCodeUnsupportedOperatingSystem "UnsupportedOperatingSystem" +// The operating systems you specified is not supported, or the operation is +// not supported for the operating system. Valid operating systems include: +// Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu. +// // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. // @@ -4438,7 +4445,9 @@ func (c *SSM) GetDefaultPatchBaselineRequest(input *GetDefaultPatchBaselineInput // GetDefaultPatchBaseline API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves the default patch baseline. +// Retrieves the default patch baseline. Note that Systems Manager supports +// creating multiple default patch baselines. For example, you can create a +// default patch baseline for each operating system. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4519,8 +4528,7 @@ func (c *SSM) GetDeployablePatchSnapshotForInstanceRequest(input *GetDeployableP // GetDeployablePatchSnapshotForInstance API operation for Amazon Simple Systems Manager (SSM). // // Retrieves the current snapshot for the patch baseline the instance uses. -// This API is primarily used by the AWS-ApplyPatchBaseline Systems Manager -// document. +// This API is primarily used by the AWS-RunPatchBaseline Systems Manager document. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4533,6 +4541,11 @@ func (c *SSM) GetDeployablePatchSnapshotForInstanceRequest(input *GetDeployableP // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. // +// * ErrCodeUnsupportedOperatingSystem "UnsupportedOperatingSystem" +// The operating systems you specified is not supported, or the operation is +// not supported for the operating system. Valid operating systems include: +// Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDeployablePatchSnapshotForInstance func (c *SSM) GetDeployablePatchSnapshotForInstance(input *GetDeployablePatchSnapshotForInstanceInput) (*GetDeployablePatchSnapshotForInstanceOutput, error) { req, out := c.GetDeployablePatchSnapshotForInstanceRequest(input) @@ -10794,6 +10807,12 @@ type CreatePatchBaselineInput struct { // A list of explicitly approved patches for the baseline. ApprovedPatches []*string `type:"list"` + // Defines the compliance level for approved patches. This means that if an + // approved patch is reported as missing, this is the severity of the compliance + // violation. Valid compliance severity levels include the following: CRITICAL, + // HIGH, MEDIUM, LOW, INFORMATIONAL, UNSPECIFIED. The default value is UNSPECIFIED. + ApprovedPatchesComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + // User-provided idempotency token. ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -10808,6 +10827,11 @@ type CreatePatchBaselineInput struct { // Name is a required field Name *string `min:"3" type:"string" required:"true"` + // Defines the operating system the patch baseline applies to. Supported operating + // systems include WINDOWS, AMAZON_LINUX, UBUNTU and REDHAT_ENTERPRISE_LINUX. + // The Default value is WINDOWS. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + // A list of explicitly rejected patches for the baseline. RejectedPatches []*string `type:"list"` } @@ -10866,6 +10890,12 @@ func (s *CreatePatchBaselineInput) SetApprovedPatches(v []*string) *CreatePatchB return s } +// SetApprovedPatchesComplianceLevel sets the ApprovedPatchesComplianceLevel field's value. +func (s *CreatePatchBaselineInput) SetApprovedPatchesComplianceLevel(v string) *CreatePatchBaselineInput { + s.ApprovedPatchesComplianceLevel = &v + return s +} + // SetClientToken sets the ClientToken field's value. func (s *CreatePatchBaselineInput) SetClientToken(v string) *CreatePatchBaselineInput { s.ClientToken = &v @@ -10890,6 +10920,12 @@ func (s *CreatePatchBaselineInput) SetName(v string) *CreatePatchBaselineInput { return s } +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *CreatePatchBaselineInput) SetOperatingSystem(v string) *CreatePatchBaselineInput { + s.OperatingSystem = &v + return s +} + // SetRejectedPatches sets the RejectedPatches field's value. func (s *CreatePatchBaselineInput) SetRejectedPatches(v []*string) *CreatePatchBaselineInput { s.RejectedPatches = v @@ -14199,6 +14235,9 @@ func (s *DescribePatchGroupStateOutput) SetInstancesWithNotApplicablePatches(v i type DescribePatchGroupsInput struct { _ struct{} `type:"structure"` + // One or more filters. Use a filter to return a more specific list of results. + Filters []*PatchOrchestratorFilter `type:"list"` + // The maximum number of patch groups to return (per page). MaxResults *int64 `min:"1" type:"integer"` @@ -14223,6 +14262,16 @@ func (s *DescribePatchGroupsInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -14230,6 +14279,12 @@ func (s *DescribePatchGroupsInput) Validate() error { return nil } +// SetFilters sets the Filters field's value. +func (s *DescribePatchGroupsInput) SetFilters(v []*PatchOrchestratorFilter) *DescribePatchGroupsInput { + s.Filters = v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *DescribePatchGroupsInput) SetMaxResults(v int64) *DescribePatchGroupsInput { s.MaxResults = &v @@ -15195,6 +15250,9 @@ func (s *GetCommandInvocationOutput) SetStatusDetails(v string) *GetCommandInvoc // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDefaultPatchBaselineRequest type GetDefaultPatchBaselineInput struct { _ struct{} `type:"structure"` + + // Returns the default patch baseline for the specified operating system. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` } // String returns the string representation @@ -15207,12 +15265,21 @@ func (s GetDefaultPatchBaselineInput) GoString() string { return s.String() } +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *GetDefaultPatchBaselineInput) SetOperatingSystem(v string) *GetDefaultPatchBaselineInput { + s.OperatingSystem = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDefaultPatchBaselineResult type GetDefaultPatchBaselineOutput struct { _ struct{} `type:"structure"` // The ID of the default patch baseline. BaselineId *string `min:"20" type:"string"` + + // The operating system for the returned patch baseline. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` } // String returns the string representation @@ -15231,6 +15298,12 @@ func (s *GetDefaultPatchBaselineOutput) SetBaselineId(v string) *GetDefaultPatch return s } +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *GetDefaultPatchBaselineOutput) SetOperatingSystem(v string) *GetDefaultPatchBaselineOutput { + s.OperatingSystem = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDeployablePatchSnapshotForInstanceRequest type GetDeployablePatchSnapshotForInstanceInput struct { _ struct{} `type:"structure"` @@ -15295,6 +15368,10 @@ type GetDeployablePatchSnapshotForInstanceOutput struct { // The ID of the instance. InstanceId *string `type:"string"` + // Returns the specific operating system (for example Windows Server 2012 or + // Amazon Linux 2015.09) on the instance for the specified patch snapshot. + Product *string `type:"string"` + // A pre-signed Amazon S3 URL that can be used to download the patch snapshot. SnapshotDownloadUrl *string `type:"string"` @@ -15318,6 +15395,12 @@ func (s *GetDeployablePatchSnapshotForInstanceOutput) SetInstanceId(v string) *G return s } +// SetProduct sets the Product field's value. +func (s *GetDeployablePatchSnapshotForInstanceOutput) SetProduct(v string) *GetDeployablePatchSnapshotForInstanceOutput { + s.Product = &v + return s +} + // SetSnapshotDownloadUrl sets the SnapshotDownloadUrl field's value. func (s *GetDeployablePatchSnapshotForInstanceOutput) SetSnapshotDownloadUrl(v string) *GetDeployablePatchSnapshotForInstanceOutput { s.SnapshotDownloadUrl = &v @@ -16515,6 +16598,10 @@ func (s *GetParametersOutput) SetParameters(v []*Parameter) *GetParametersOutput type GetPatchBaselineForPatchGroupInput struct { _ struct{} `type:"structure"` + // Returns he operating system rule specified for patch groups using the patch + // baseline. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + // The name of the patch group whose patch baseline should be retrieved. // // PatchGroup is a required field @@ -16547,6 +16634,12 @@ func (s *GetPatchBaselineForPatchGroupInput) Validate() error { return nil } +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *GetPatchBaselineForPatchGroupInput) SetOperatingSystem(v string) *GetPatchBaselineForPatchGroupInput { + s.OperatingSystem = &v + return s +} + // SetPatchGroup sets the PatchGroup field's value. func (s *GetPatchBaselineForPatchGroupInput) SetPatchGroup(v string) *GetPatchBaselineForPatchGroupInput { s.PatchGroup = &v @@ -16560,6 +16653,9 @@ type GetPatchBaselineForPatchGroupOutput struct { // The ID of the patch baseline that should be used for the patch group. BaselineId *string `min:"20" type:"string"` + // The operating system rule specified for patch groups using the patch baseline. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + // The name of the patch group. PatchGroup *string `min:"1" type:"string"` } @@ -16580,6 +16676,12 @@ func (s *GetPatchBaselineForPatchGroupOutput) SetBaselineId(v string) *GetPatchB return s } +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *GetPatchBaselineForPatchGroupOutput) SetOperatingSystem(v string) *GetPatchBaselineForPatchGroupOutput { + s.OperatingSystem = &v + return s +} + // SetPatchGroup sets the PatchGroup field's value. func (s *GetPatchBaselineForPatchGroupOutput) SetPatchGroup(v string) *GetPatchBaselineForPatchGroupOutput { s.PatchGroup = &v @@ -16638,6 +16740,10 @@ type GetPatchBaselineOutput struct { // A list of explicitly approved patches for the baseline. ApprovedPatches []*string `type:"list"` + // Returns the specified compliance severity level for approved patches in the + // patch baseline. + ApprovedPatchesComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + // The ID of the retrieved patch baseline. BaselineId *string `min:"20" type:"string"` @@ -16656,6 +16762,9 @@ type GetPatchBaselineOutput struct { // The name of the patch baseline. Name *string `min:"3" type:"string"` + // Returns the operating system specified for the patch baseline. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + // Patch groups included in the patch baseline. PatchGroups []*string `type:"list"` @@ -16685,6 +16794,12 @@ func (s *GetPatchBaselineOutput) SetApprovedPatches(v []*string) *GetPatchBaseli return s } +// SetApprovedPatchesComplianceLevel sets the ApprovedPatchesComplianceLevel field's value. +func (s *GetPatchBaselineOutput) SetApprovedPatchesComplianceLevel(v string) *GetPatchBaselineOutput { + s.ApprovedPatchesComplianceLevel = &v + return s +} + // SetBaselineId sets the BaselineId field's value. func (s *GetPatchBaselineOutput) SetBaselineId(v string) *GetPatchBaselineOutput { s.BaselineId = &v @@ -16721,6 +16836,12 @@ func (s *GetPatchBaselineOutput) SetName(v string) *GetPatchBaselineOutput { return s } +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *GetPatchBaselineOutput) SetOperatingSystem(v string) *GetPatchBaselineOutput { + s.OperatingSystem = &v + return s +} + // SetPatchGroups sets the PatchGroups field's value. func (s *GetPatchBaselineOutput) SetPatchGroups(v []*string) *GetPatchBaselineOutput { s.PatchGroups = v @@ -19954,7 +20075,7 @@ type Patch struct { // The ID of the patch (this is different than the Microsoft Knowledge Base // ID). - Id *string `type:"string"` + Id *string `min:"1" type:"string"` // The Microsoft Knowledge Base ID of the patch. KbNumber *string `type:"string"` @@ -20086,8 +20207,15 @@ type PatchBaselineIdentity struct { // The name of the patch baseline. BaselineName *string `min:"3" type:"string"` - // Whether this is the default baseline. + // Whether this is the default baseline. Note that Systems Manager supports + // creating multiple default patch baselines. For example, you can create a + // default patch baseline for each operating system. DefaultBaseline *bool `type:"boolean"` + + // Defines the operating system the patch baseline applies to. Supported operating + // systems include WINDOWS, AMAZON_LINUX, UBUNTU and REDHAT_ENTERPRISE_LINUX. + // The Default value is WINDOWS. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` } // String returns the string representation @@ -20124,6 +20252,12 @@ func (s *PatchBaselineIdentity) SetDefaultBaseline(v bool) *PatchBaselineIdentit return s } +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *PatchBaselineIdentity) SetOperatingSystem(v string) *PatchBaselineIdentity { + s.OperatingSystem = &v + return s +} + // Information about the state of a patch on a particular instance as it relates // to the patch baseline used to patch the instance. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PatchComplianceData @@ -20135,12 +20269,13 @@ type PatchComplianceData struct { // Classification is a required field Classification *string `type:"string" required:"true"` - // The date/time the patch was installed on the instance. + // The date/time the patch was installed on the instance. Note that not all + // operating systems provide this level of information. // // InstalledTime is a required field InstalledTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - // The Microsoft Knowledge Base ID of the patch. + // The operating system-specific ID of the patch. // // KBId is a required field KBId *string `type:"string" required:"true"` @@ -20408,6 +20543,11 @@ type PatchRule struct { // ApproveAfterDays is a required field ApproveAfterDays *int64 `type:"integer" required:"true"` + // A compliance severity level for all approved patches in a patch baseline. + // Valid compliance severity levels include the following: Unspecified, Critical, + // High, Medium, Low, and Informational. + ComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + // The patch filter group that defines the criteria for the rule. // // PatchFilterGroup is a required field @@ -20451,6 +20591,12 @@ func (s *PatchRule) SetApproveAfterDays(v int64) *PatchRule { return s } +// SetComplianceLevel sets the ComplianceLevel field's value. +func (s *PatchRule) SetComplianceLevel(v string) *PatchRule { + s.ComplianceLevel = &v + return s +} + // SetPatchFilterGroup sets the PatchFilterGroup field's value. func (s *PatchRule) SetPatchFilterGroup(v *PatchFilterGroup) *PatchRule { s.PatchFilterGroup = v @@ -20515,6 +20661,9 @@ type PatchStatus struct { // The date the patch was approved (or will be approved if the status is PENDING_APPROVAL). ApprovalDate *time.Time `type:"timestamp" timestampFormat:"unix"` + // The compliance severity level for a patch. + ComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + // The approval status of a patch (APPROVED, PENDING_APPROVAL, EXPLICIT_APPROVED, // EXPLICIT_REJECTED). DeploymentStatus *string `type:"string" enum:"PatchDeploymentStatus"` @@ -20536,6 +20685,12 @@ func (s *PatchStatus) SetApprovalDate(v time.Time) *PatchStatus { return s } +// SetComplianceLevel sets the ComplianceLevel field's value. +func (s *PatchStatus) SetComplianceLevel(v string) *PatchStatus { + s.ComplianceLevel = &v + return s +} + // SetDeploymentStatus sets the DeploymentStatus field's value. func (s *PatchStatus) SetDeploymentStatus(v string) *PatchStatus { s.DeploymentStatus = &v @@ -22915,6 +23070,9 @@ type UpdatePatchBaselineInput struct { // A list of explicitly approved patches for the baseline. ApprovedPatches []*string `type:"list"` + // Assigns a new compliance severity level to an existing patch baseline. + ApprovedPatchesComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + // The ID of the patch baseline to update. // // BaselineId is a required field @@ -22987,6 +23145,12 @@ func (s *UpdatePatchBaselineInput) SetApprovedPatches(v []*string) *UpdatePatchB return s } +// SetApprovedPatchesComplianceLevel sets the ApprovedPatchesComplianceLevel field's value. +func (s *UpdatePatchBaselineInput) SetApprovedPatchesComplianceLevel(v string) *UpdatePatchBaselineInput { + s.ApprovedPatchesComplianceLevel = &v + return s +} + // SetBaselineId sets the BaselineId field's value. func (s *UpdatePatchBaselineInput) SetBaselineId(v string) *UpdatePatchBaselineInput { s.BaselineId = &v @@ -23027,6 +23191,10 @@ type UpdatePatchBaselineOutput struct { // A list of explicitly approved patches for the baseline. ApprovedPatches []*string `type:"list"` + // The compliance severity level assigned to the patch baseline after the update + // completed. + ApprovedPatchesComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + // The ID of the deleted patch baseline. BaselineId *string `min:"20" type:"string"` @@ -23045,6 +23213,9 @@ type UpdatePatchBaselineOutput struct { // The name of the patch baseline. Name *string `min:"3" type:"string"` + // The operating system rule used by the updated patch baseline. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + // A list of explicitly rejected patches for the baseline. RejectedPatches []*string `type:"list"` } @@ -23071,6 +23242,12 @@ func (s *UpdatePatchBaselineOutput) SetApprovedPatches(v []*string) *UpdatePatch return s } +// SetApprovedPatchesComplianceLevel sets the ApprovedPatchesComplianceLevel field's value. +func (s *UpdatePatchBaselineOutput) SetApprovedPatchesComplianceLevel(v string) *UpdatePatchBaselineOutput { + s.ApprovedPatchesComplianceLevel = &v + return s +} + // SetBaselineId sets the BaselineId field's value. func (s *UpdatePatchBaselineOutput) SetBaselineId(v string) *UpdatePatchBaselineOutput { s.BaselineId = &v @@ -23107,6 +23284,12 @@ func (s *UpdatePatchBaselineOutput) SetName(v string) *UpdatePatchBaselineOutput return s } +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *UpdatePatchBaselineOutput) SetOperatingSystem(v string) *UpdatePatchBaselineOutput { + s.OperatingSystem = &v + return s +} + // SetRejectedPatches sets the RejectedPatches field's value. func (s *UpdatePatchBaselineOutput) SetRejectedPatches(v []*string) *UpdatePatchBaselineOutput { s.RejectedPatches = v @@ -23474,6 +23657,20 @@ const ( NotificationTypeInvocation = "Invocation" ) +const ( + // OperatingSystemWindows is a OperatingSystem enum value + OperatingSystemWindows = "WINDOWS" + + // OperatingSystemAmazonLinux is a OperatingSystem enum value + OperatingSystemAmazonLinux = "AMAZON_LINUX" + + // OperatingSystemUbuntu is a OperatingSystem enum value + OperatingSystemUbuntu = "UBUNTU" + + // OperatingSystemRedhatEnterpriseLinux is a OperatingSystem enum value + OperatingSystemRedhatEnterpriseLinux = "REDHAT_ENTERPRISE_LINUX" +) + const ( // ParameterTypeString is a ParameterType enum value ParameterTypeString = "String" @@ -23513,6 +23710,26 @@ const ( PatchComplianceDataStateFailed = "FAILED" ) +const ( + // PatchComplianceLevelCritical is a PatchComplianceLevel enum value + PatchComplianceLevelCritical = "CRITICAL" + + // PatchComplianceLevelHigh is a PatchComplianceLevel enum value + PatchComplianceLevelHigh = "HIGH" + + // PatchComplianceLevelMedium is a PatchComplianceLevel enum value + PatchComplianceLevelMedium = "MEDIUM" + + // PatchComplianceLevelLow is a PatchComplianceLevel enum value + PatchComplianceLevelLow = "LOW" + + // PatchComplianceLevelInformational is a PatchComplianceLevel enum value + PatchComplianceLevelInformational = "INFORMATIONAL" + + // PatchComplianceLevelUnspecified is a PatchComplianceLevel enum value + PatchComplianceLevelUnspecified = "UNSPECIFIED" +) + const ( // PatchDeploymentStatusApproved is a PatchDeploymentStatus enum value PatchDeploymentStatusApproved = "APPROVED" @@ -23539,6 +23756,15 @@ const ( // PatchFilterKeyPatchId is a PatchFilterKey enum value PatchFilterKeyPatchId = "PATCH_ID" + + // PatchFilterKeySection is a PatchFilterKey enum value + PatchFilterKeySection = "SECTION" + + // PatchFilterKeyPriority is a PatchFilterKey enum value + PatchFilterKeyPriority = "PRIORITY" + + // PatchFilterKeySeverity is a PatchFilterKey enum value + PatchFilterKeySeverity = "SEVERITY" ) const ( diff --git a/fn/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go b/fn/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go index 8c6dc09e7..23ec18b70 100644 --- a/fn/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go +++ b/fn/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go @@ -494,6 +494,14 @@ const ( // for each type. ErrCodeUnsupportedInventorySchemaVersionException = "UnsupportedInventorySchemaVersionException" + // ErrCodeUnsupportedOperatingSystem for service response error code + // "UnsupportedOperatingSystem". + // + // The operating systems you specified is not supported, or the operation is + // not supported for the operating system. Valid operating systems include: + // Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu. + ErrCodeUnsupportedOperatingSystem = "UnsupportedOperatingSystem" + // ErrCodeUnsupportedParameterType for service response error code // "UnsupportedParameterType". // diff --git a/fn/vendor/github.com/docker/docker/.gitignore b/fn/vendor/github.com/docker/docker/.gitignore index 3e5ed1f82..943e7f3f6 100644 --- a/fn/vendor/github.com/docker/docker/.gitignore +++ b/fn/vendor/github.com/docker/docker/.gitignore @@ -15,21 +15,8 @@ autogen/ bundles/ cmd/dockerd/dockerd -cmd/docker/docker contrib/builder/rpm/*/changelog dockerversion/version_autogen.go dockerversion/version_autogen_unix.go -docs/AWS_S3_BUCKET -docs/GITCOMMIT -docs/GIT_BRANCH -docs/VERSION -docs/_build -docs/_static -docs/_templates -docs/changed-files -# generated by man/md2man-all.sh -man/man1 -man/man5 -man/man8 vendor/pkg/ hack/integration-cli-on-swarm/integration-cli-on-swarm diff --git a/fn/vendor/github.com/docker/docker/.mailmap b/fn/vendor/github.com/docker/docker/.mailmap index 350ecaf44..49275048f 100644 --- a/fn/vendor/github.com/docker/docker/.mailmap +++ b/fn/vendor/github.com/docker/docker/.mailmap @@ -36,8 +36,8 @@ Guillaume J. Charmes Thatcher Peskens Thatcher Peskens dhrp -Jérôme Petazzoni jpetazzo -Jérôme Petazzoni +Jérôme Petazzoni +Jérôme Petazzoni Joffrey F Joffrey F Joffrey F @@ -93,6 +93,7 @@ Sven Dowideit <¨SvenDowideit@home.org.au¨> Sven Dowideit Sven Dowideit Sven Dowideit +Akihiro Matsushima Alexander Morozov Alexander Morozov @@ -108,6 +109,7 @@ Roberto G. Hashioka Sridhar Ratnakumar Sridhar Ratnakumar Liang-Chi Hsieh +Aaron L. Xu Aleksa Sarai Aleksa Sarai Aleksa Sarai @@ -162,17 +164,31 @@ Darren Shepherd Deshi Xiao Deshi Xiao Doug Davis +Giampaolo Mancini +K. Heller Jacob Atzen Jeff Nickoloff +Jérôme Petazzoni +John Harris John Howard (VM) John Howard (VM) John Howard (VM) John Howard (VM) +John Howard (VM) +Kevin Feyrer +Liao Qingwei +Luke Marsden Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mansi Nahar +Mansi Nahar Mary Anthony Mary Anthony moxiegirl Mary Anthony mattyw +Michael Spetsiotis +Nik Nyby +Peter Jaffe resouer AJ Bowen soulshake AJ Bowen soulshake @@ -234,11 +250,13 @@ Stephen Day Toli Kuznets Tristan Carel -Vincent Demeester - + + +Vincent Demeester Vishnu Kannan xlgao-zju xlgao -yuchangchun y00277921 +Yu Changchun y00277921 +Yu Changchun @@ -284,38 +302,59 @@ Bingshen Wang Chen Chuanliang Chen Mingjie CUI Wei cuiwei13 +Daniel Grunwell +Daniel J Walsh Dattatraya Kumbhar +David Sheets Diego Siqueira +Eric G. Noriega Evelyn Xu Felix Ruess Gabriel Nicolas Avellaneda Gang Qiao <1373319223@qq.com> +Gustav Sinder +Harshal Patil Helen Xie +Hyzhou Zhy <1187766782@qq.com> +Hyzhou Zhy Jacob Tomlinson Jiuyue Ma Jose Diaz-Gonzalez Josh Eveleth Josh Wilson +Jim Galasyn Kevin Kern +Konstantin Gribov Kunal Kushwaha Lajos Papp Lyn +Markan Patel Michael Käufl Michal Minář Michael Hudson-Doyle +Mike Casas Milind Chawre Ma Müller +Philipp Gillé Roberto Muñoz Fernández +Sean Lee +Shukui Yang Stefan S. +Steve Desmond Sun Gengze <690388648@qq.com> Tim Zju <21651152@zju.edu.cn> Tõnis Tiigi +Wayne Song +Wang Jie Wang Ping Wang Yuexiao Wewang Xiaorenfine Wei Wu cizixs Ying Li +Yong Tang +Yu Chengxia Yu Peng Yu Peng +Yao Zaiyong Zhenkun Bi Zhu Kunjia diff --git a/fn/vendor/github.com/docker/docker/AUTHORS b/fn/vendor/github.com/docker/docker/AUTHORS index b2cd9ecbe..4f908078a 100644 --- a/fn/vendor/github.com/docker/docker/AUTHORS +++ b/fn/vendor/github.com/docker/docker/AUTHORS @@ -5,6 +5,7 @@ Aanand Prasad Aaron Davidson Aaron Feng Aaron Huslage +Aaron L. Xu Aaron Lehmann Aaron Welch Aaron.L.Xu @@ -37,6 +38,7 @@ Aidan Hobson Sayers AJ Bowen Ajey Charantimath ajneu +Akihiro Matsushima Akihiro Suda Akira Koyasu Akshay Karle @@ -66,6 +68,7 @@ Alexander Larsson Alexander Morozov Alexander Shopov Alexandre Beslic +Alexandre Garnier Alexandre González Alexandru Sfirlogea Alexey Guskov @@ -80,6 +83,7 @@ Allen Madsen Allen Sun almoehi Alvaro Saurin +Alvin Deng Alvin Richards amangoel Amen Belayneh @@ -180,6 +184,7 @@ Ben Severson Ben Toews Ben Wiklund Benjamin Atkin +Benjamin Boudreau Benoit Chesneau Bernerd Schaefer Bert Goethals @@ -193,6 +198,7 @@ Bingshen Wang Blake Geno Boaz Shuster bobby abbott +Boris Pruessmann Boshi Lian boucher Bouke Haarsma @@ -278,6 +284,7 @@ Chris Armstrong Chris Dituri Chris Fordham Chris Gavin +Chris Gibson Chris Khoo Chris McKinnel Chris Seto @@ -308,11 +315,13 @@ Clayton Coleman Clinton Kitson Coenraad Loubser Colin Dunklau +Colin Hebert Colin Rice Colin Walters Collin Guarino Colm Hally companycy +Corey Farrell Cory Forsyth cressie176 CrimsonGlory @@ -340,14 +349,15 @@ Dan Keder Dan Levy Dan McPherson Dan Stine -Dan Walsh Dan Williams Daniel Antlinger Daniel Exner Daniel Farrell Daniel Garcia Daniel Gasienica +Daniel Grunwell Daniel Hiltgen +Daniel J Walsh Daniel Menet Daniel Mizyrycki Daniel Nephin @@ -386,10 +396,11 @@ David M. Karr David Mackey David Mat David Mcanulty +David McKay David Pelaez David R. Jenni David Röthlisberger -David Sheets +David Sheets David Sissitka David Trott David Williamson @@ -403,6 +414,7 @@ decadent deed02392 Deng Guangxing Deni Bertovic +Denis Defreyne Denis Gladkikh Denis Ollier Dennis Chen @@ -466,6 +478,7 @@ Eivin Giske Skaaren Eivind Uggedal Elan Ruusamäe Elena Morozova +Elias Faxö Elias Probst Elijah Zupancic eluck @@ -479,6 +492,7 @@ Eohyung Lee epeterso Eric Barch Eric Curtin +Eric G. Noriega Eric Hanchrow Eric Lee Eric Myhre @@ -486,9 +500,9 @@ Eric Paris Eric Rafaloff Eric Rosenberg Eric Sage -Erica Windisch Eric Yang Eric-Olivier Lamey +Erica Windisch Erik Bray Erik Dubbelboer Erik Hollensbe @@ -516,6 +530,7 @@ Ewa Czechowska Eystein Måløy Stenberg ezbercih Ezra Silvera +Fabian Lauer Fabiano Rosas Fabio Falci Fabio Rapposelli @@ -535,6 +550,7 @@ Felix Hupfeld Felix Rabe Felix Ruess Felix Schindler +Fengtu Wang Ferenc Szabo Fernando Fero Volar @@ -543,6 +559,7 @@ Filipe Brandenburger Filipe Oliveira fl0yd Flavio Castelli +Flavio Crisciani FLGMwt Florian Florian Klein @@ -551,6 +568,7 @@ Florian Weingarten Florin Asavoaie fonglh fortinux +Foysal Iqbal Francesc Campoy Francis Chuang Francisco Carriedo @@ -578,9 +596,11 @@ Galen Sampson Gang Qiao Gareth Rushgrove Garrett Barboza +Gary Schaetz Gaurav gautam, prasanna Gaël PORTAY +Genki Takiuchi GennadySpb Geoffrey Bachelet George MacRorie @@ -590,6 +610,7 @@ Gereon Frey German DZ Gert van Valkenhoef Gerwim +Giampaolo Mancini Gianluca Borello Gildas Cuisinier gissehel @@ -605,10 +626,9 @@ Govinda Fichtner Grant Reaber Graydon Hoare Greg Fausak +Greg Pflaum Greg Thornton -grossws -grunny -gs11 +Grzegorz Jaśkiewicz Guilhem Lettron Guilherme Salgado Guillaume Dufour @@ -616,6 +636,7 @@ Guillaume J. Charmes guoxiuyan Gurjeet Singh Guruprasad +Gustav Sinder gwx296173 Günter Zöchbauer Hans Kristian Flaatten @@ -626,6 +647,7 @@ Harald Albers Harley Laue Harold Cooper Harry Zhang +Harshal Patil Harshal Patil He Simei He Xin @@ -649,7 +671,7 @@ huqun Huu Nguyen hyeongkyu.lee hyp3rdino -Hyzhou <1187766782@qq.com> +Hyzhou Zhy Ian Babrou Ian Bishop Ian Bull @@ -657,9 +679,11 @@ Ian Calvert Ian Campbell Ian Lee Ian Main +Ian Philpot Ian Truslove Iavael Icaro Seara +Ignacio Capurro Igor Dolzhikov Iliana Weller Ilkka Laukkanen @@ -675,6 +699,7 @@ Isao Jonas Ivan Babrou Ivan Fraixedes Ivan Grcic +Ivan Markin J Bruni J. Nunn Jack Danger Canty @@ -694,6 +719,7 @@ James Kyburz James Kyle James Lal James Mills +James Nesbitt James Nugent James Turnbull Jamie Hannaford @@ -747,10 +773,12 @@ Jeffrey Bolle Jeffrey Morgan Jeffrey van Gogh Jenny Gebske +Jeremy Chambers Jeremy Grosser Jeremy Price Jeremy Qian Jeremy Unruh +Jeremy Yallop Jeroen Jacobs Jesse Dearing Jesse Dubay @@ -764,10 +792,12 @@ jianbosun Jie Luo Jilles Oldenbeuving Jim Alateras +Jim Galasyn Jim Minter Jim Perrin Jimmy Cuadra Jimmy Puckett +Jimmy Song jimmyxian Jinsoo Park Jiri Popelka @@ -797,15 +827,19 @@ John Costa John Feminella John Gardiner Myers John Gossman +John Harris John Howard (VM) +John Laswell +John Maguire John Mulhausen John OBrien III John Starks John Stephens John Tims +John V. Martinez John Warwick John Willis -johnharris85 +Jon Johnson Jon Wedaman Jonas Pfenniger Jonathan A. Sternberg @@ -820,6 +854,7 @@ Jonathan Pares Jonathan Rudenberg Jonathan Stoppani Jonh Wendell +Joni Sar Joost Cassee Jordan Jordan Arentsen @@ -829,6 +864,7 @@ Jose Diaz-Gonzalez Joseph Anthony Pasquale Holsten Joseph Hager Joseph Kern +Joseph Rothrock Josh Josh Bodah Josh Chorlton @@ -847,6 +883,8 @@ Julien Barbier Julien Bisconti Julien Bordellier Julien Dubois +Julien Kassar +Julien Maitrehenry Julien Pervillé Julio Montes Jun-Ru Chang @@ -859,8 +897,9 @@ Justin Simonelis Justin Terry Justyn Temme Jyrki Puttonen -Jérôme Petazzoni +Jérôme Petazzoni Jörg Thalheim +K. Heller Kai Blin Kai Qiang Wu(Kennan) Kamil Domański @@ -872,6 +911,7 @@ Kareem Khazem kargakis Karl Grzeszczak Karol Duleba +Karthik Nayak Katie McLaughlin Kato Kazuyoshi Katrina Owen @@ -892,6 +932,7 @@ Kent Johnson Kevin "qwazerty" Houdebert Kevin Burke Kevin Clark +Kevin Feyrer Kevin J. Lynagh Kevin Jing Qiu Kevin Kern @@ -915,6 +956,7 @@ knappe Kohei Tsuruta Koichi Shiraishi Konrad Kleine +Konstantin Gribov Konstantin L Konstantin Pelykh Krasi Georgiev @@ -961,7 +1003,7 @@ Liam Macgillavry Liana Lo Liang Mingqiang Liang-Chi Hsieh -liaoqingwei +Liao Qingwei Lily Guo limsy Lin Lu @@ -994,24 +1036,24 @@ Luiz Svoboda Lukas Waslowski lukaspustina Lukasz Zajaczkowski -lukemarsden +Luke Marsden Lyn Lynda O'Leary Lénaïc Huard Ma Müller Ma Shimiao Mabin +Madhan Raj Mookkandy Madhav Puri Madhu Venugopal -Mageee <21521230.zju.edu.cn> +Mageee Mahesh Tiyyagura malnick Malte Janduda -manchoz Manfred Touron Manfred Zabarauskas +Manjunath A Kumatagi Mansi Nahar -mansinahar Manuel Meurer Manuel Woelker mapk0y @@ -1037,6 +1079,7 @@ Mark McKinstry Mark Milstein Mark Parker Mark West +Markan Patel Marko Mikulicic Marko Tibold Markus Fix @@ -1108,6 +1151,7 @@ Michael Käufl Michael Neale Michael Prokop Michael Scharf +Michael Spetsiotis Michael Stapelberg Michael Steinert Michael Thies @@ -1126,6 +1170,7 @@ Miguel Morales Mihai Borobocea Mihuleacc Sergiu Mike Brown +Mike Casas Mike Chelen Mike Danese Mike Dillon @@ -1202,8 +1247,10 @@ Nicolas Goy Nicolas Kaiser Nicolás Hock Isaza Nigel Poulton +Nik Nyby +Nikhil Chawla NikolaMandic -nikolas +Nikolas Garofil Nikolay Milovanov Nirmal Mehta Nishant Totla @@ -1269,8 +1316,8 @@ Peeyush Gupta Peggy Li Pei Su Penghan Wang +Per Weijnitz perhapszzy@sina.com -pestophagous Peter Bourgon Peter Braden Peter Choi @@ -1278,6 +1325,7 @@ Peter Dave Hello Peter Edge Peter Ericson Peter Esbensen +Peter Jaffe Peter Malmgren Peter Salvatore Peter Volpe @@ -1287,9 +1335,11 @@ Phil Phil Estes Phil Spitler Philip Monroe +Philipp Gillé Philipp Wahala Philipp Weissensteiner Phillip Alexander +phineas pidster Piergiuliano Bossi Pierre @@ -1303,8 +1353,10 @@ Porjo Poul Kjeldager Sørensen Pradeep Chhetri Prasanna Gautam +Pratik Karki Prayag Verma Przemek Hejman +Pure White pysqz qhuang Qiang Huang @@ -1332,10 +1384,12 @@ Recursive Madman Reficul Regan McCooey Remi Rampin +Remy Suen Renato Riccieri Santos Zannon resouer rgstephens Rhys Hiltner +Ricardo N Feliciano Rich Moyse Rich Seymour Richard @@ -1400,6 +1454,7 @@ Ryan Aslett Ryan Belgrave Ryan Detzel Ryan Fowler +Ryan Liu Ryan McLaughlin Ryan O'Donnell Ryan Seto @@ -1407,6 +1462,7 @@ Ryan Thomas Ryan Trauntvein Ryan Wallner Ryan Zhang +ryancooper7 RyanDeng Rémy Greinhofer s. rannou @@ -1439,7 +1495,6 @@ Satnam Singh satoru Satoshi Amemiya Satoshi Tagomori -scaleoutsean Scott Bessler Scott Collier Scott Johnston @@ -1448,6 +1503,7 @@ Scott Walls sdreyesg Sean Christopherson Sean Cronin +Sean Lee Sean McIntyre Sean OMeara Sean P. Kane @@ -1489,6 +1545,7 @@ Silas Sewell Silvan Jegen Simei He Simon Eskildsen +Simon Ferquel Simon Leinen Simon Taranto Sindhu S @@ -1517,6 +1574,7 @@ Stephen Crosby Stephen Day Stephen Drake Stephen Rust +Steve Desmond Steve Dougherty Steve Durrheimer Steve Francia @@ -1531,6 +1589,7 @@ Steven Taylor Subhajit Ghosh Sujith Haridasan Sun Gengze <690388648@qq.com> +Sunny Gogoi Suryakumar Sudar Sven Dowideit Swapnil Daingade @@ -1539,6 +1598,7 @@ Sylvain Bellemare Sébastien Sébastien Luttringer Sébastien Stormacq +Tabakhase Tadej Janež TAGOMORI Satoshi tang0th @@ -1638,6 +1698,7 @@ Tristan Carel Troy Denton Tyler Brock Tzu-Jung Lee +uhayate Ulysse Carion unknown vagrant @@ -1660,13 +1721,14 @@ VinayRaghavanKS Vincent Batts Vincent Bernat Vincent Bernat -Vincent Demeester +Vincent Demeester Vincent Giersch Vincent Mayers Vincent Woo Vinod Kulkarni Vishal Doshi Vishnu Kannan +Vitaly Ostrosablin Vitor Monteiro Vivek Agarwal Vivek Dasgupta @@ -1682,6 +1744,7 @@ waitingkuo Walter Leibbrandt Walter Stanish WANG Chao +Wang Jie Wang Long Wang Ping Wang Xing @@ -1689,6 +1752,7 @@ Wang Yuexiao Ward Vandewege WarheadsSE Wayne Chang +Wayne Song Wei Wu Wei-Ting Kuo weiyan @@ -1732,8 +1796,8 @@ Yahya YAMADA Tsuyoshi Yan Feng Yang Bai -yangshukui Yanqiang Miao +Yao Zaiyong Yasunori Mahata Yestin Sun Yi EungJun @@ -1746,10 +1810,11 @@ Yongzhi Pan yorkie You-Sheng Yang (楊有勝) Youcef YEKHLEF +Yu Changchun +Yu Chengxia Yu Peng Yuan Sun -yuchangchun -yuchengxia +Yuanhong Peng Yunxiang Huang Yurii Rashkovskii yuzou @@ -1785,4 +1850,5 @@ Zunayed Ali Álvaro Lázaro Átila Camurça Alves 尹吉峰 +徐俊杰 搏通 diff --git a/fn/vendor/github.com/docker/docker/CHANGELOG.md b/fn/vendor/github.com/docker/docker/CHANGELOG.md index 18668ce54..bbedc491e 100644 --- a/fn/vendor/github.com/docker/docker/CHANGELOG.md +++ b/fn/vendor/github.com/docker/docker/CHANGELOG.md @@ -190,7 +190,7 @@ be found. * Update runc to 54296cf40ad8143b62dbcaa1d90e520a2136ddfe [#31666](https://github.com/docker/docker/pull/31666) * Ignore cgroup2 mountpoints [opencontainers/runc#1266](https://github.com/opencontainers/runc/pull/1266) * Update containerd to 4ab9917febca54791c5f071a9d1f404867857fcc [#31662](https://github.com/docker/docker/pull/31662) [#31852](https://github.com/docker/docker/pull/31852) - * Register healtcheck service before calling restore() [docker/containerd#609](https://github.com/docker/containerd/pull/609) + * Register healthcheck service before calling restore() [docker/containerd#609](https://github.com/docker/containerd/pull/609) * Fix `docker exec` not working after unattended upgrades that reload apparmor profiles [#31773](https://github.com/docker/docker/pull/31773) * Fix unmounting layer without merge dir with Overlay2 [#31069](https://github.com/docker/docker/pull/31069) * Do not ignore "volume in use" errors when force-delete [#31450](https://github.com/docker/docker/pull/31450) @@ -1087,12 +1087,12 @@ installing docker, please make sure to update them accordingly. + Add security options to `docker info` output [#21172](https://github.com/docker/docker/pull/21172) [#23520](https://github.com/docker/docker/pull/23520) + Add insecure registries to `docker info` output [#20410](https://github.com/docker/docker/pull/20410) + Extend Docker authorization with TLS user information [#21556](https://github.com/docker/docker/pull/21556) -+ devicemapper: expose Mininum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945) ++ devicemapper: expose Minimum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945) * API now returns a JSON object when an error occurs making it more consistent [#22880](https://github.com/docker/docker/pull/22880) - Prevent `docker run -i --restart` from hanging on exit [#22777](https://github.com/docker/docker/pull/22777) - Fix API/CLI discrepancy on hostname validation [#21641](https://github.com/docker/docker/pull/21641) - Fix discrepancy in the format of sizes in `stats` from HumanSize to BytesSize [#21773](https://github.com/docker/docker/pull/21773) -- authz: when request is denied return forbbiden exit code (403) [#22448](https://github.com/docker/docker/pull/22448) +- authz: when request is denied return forbidden exit code (403) [#22448](https://github.com/docker/docker/pull/22448) - Windows: fix tty-related displaying issues [#23878](https://github.com/docker/docker/pull/23878) ### Runtime @@ -1887,7 +1887,7 @@ by another client (#15489) #### Remote API -- Fix unmarshalling of Command and Entrypoint +- Fix unmarshaling of Command and Entrypoint - Set limit for minimum client version supported - Validate port specification - Return proper errors when attach/reattach fail @@ -2572,7 +2572,7 @@ With the ongoing changes to the networking and execution subsystems of docker te - Fix ADD caching issue with . prefixed path - Fix docker build on devicemapper by reverting sparse file tar option - Fix issue with file caching and prevent wrong cache hit -* Use same error handling while unmarshalling CMD and ENTRYPOINT +* Use same error handling while unmarshaling CMD and ENTRYPOINT #### Documentation diff --git a/fn/vendor/github.com/docker/docker/CONTRIBUTING.md b/fn/vendor/github.com/docker/docker/CONTRIBUTING.md index abf6c45a3..917214cd1 100644 --- a/fn/vendor/github.com/docker/docker/CONTRIBUTING.md +++ b/fn/vendor/github.com/docker/docker/CONTRIBUTING.md @@ -39,7 +39,7 @@ A great way to contribute to the project is to send a detailed report when you encounter an issue. We always appreciate a well-written, thorough bug report, and will thank you for it! -Check that [our issue database](https://github.com/docker/docker/issues) +Check that [our issue database](https://github.com/moby/moby/issues) doesn't already include that problem or suggestion before submitting an issue. If you find a match, you can use the "subscribe" button to get notified on updates. Do *not* leave random "+1" or "I have this too" comments, as they @@ -66,7 +66,7 @@ This section gives the experienced contributor some tips and guidelines. Not sure if that typo is worth a pull request? Found a bug and know how to fix it? Do it! We will appreciate it. Any significant improvement should be -documented as [a GitHub issue](https://github.com/docker/docker/issues) before +documented as [a GitHub issue](https://github.com/moby/moby/issues) before anybody starts working on it. We are always thrilled to receive pull requests. We do our best to process them diff --git a/fn/vendor/github.com/docker/docker/Dockerfile b/fn/vendor/github.com/docker/docker/Dockerfile index c52476c63..afc7f5f63 100644 --- a/fn/vendor/github.com/docker/docker/Dockerfile +++ b/fn/vendor/github.com/docker/docker/Dockerfile @@ -9,7 +9,7 @@ # docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash # # # Run the test suite: -# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# docker run -e DOCKER_GITCOMMIT=foo --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py # # # Publish a release: # docker run --privileged \ @@ -29,11 +29,6 @@ FROM debian:jessie ARG APT_MIRROR=deb.debian.org RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list -# Add zfs ppa -COPY keys/launchpad-ppa-zfs.asc /go/src/github.com/docker/docker/keys/ -RUN apt-key add /go/src/github.com/docker/docker/keys/launchpad-ppa-zfs.asc -RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list - # Packaged dependencies RUN apt-get update && apt-get install -y \ apparmor \ @@ -45,7 +40,6 @@ RUN apt-get update && apt-get install -y \ bsdmainutils \ btrfs-tools \ build-essential \ - clang \ cmake \ createrepo \ curl \ @@ -57,13 +51,11 @@ RUN apt-get update && apt-get install -y \ less \ libapparmor-dev \ libcap-dev \ - libltdl-dev \ libnl-3-dev \ libprotobuf-c0-dev \ libprotobuf-dev \ libsystemd-journal-dev \ libtool \ - libzfs-dev \ mercurial \ net-tools \ pkg-config \ @@ -74,7 +66,6 @@ RUN apt-get update && apt-get install -y \ python-pip \ python-websocket \ tar \ - ubuntu-zfs \ vim \ vim-common \ xfsprogs \ @@ -97,17 +88,6 @@ RUN cd /usr/local/lvm2 \ && make install_device-mapper # See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL -# Configure the container for OSX cross compilation -ENV OSX_SDK MacOSX10.11.sdk -ENV OSX_CROSS_COMMIT a9317c18a3a457ca0a657f08cc4d0d43c6cf8953 -RUN set -x \ - && export OSXCROSS_PATH="/osxcross" \ - && git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \ - && ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \ - && curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \ - && UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh -ENV PATH /osxcross/target/bin:$PATH - # Install seccomp: the version shipped upstream is too old ENV SECCOMP_VERSION 2.3.2 RUN set -x \ @@ -127,21 +107,13 @@ RUN set -x \ # IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines # will need updating, to avoid errors. Ping #docker-maintainers on IRC # with a heads-up. -ENV GO_VERSION 1.7.5 +ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ | tar -xzC /usr/local ENV PATH /go/bin:/usr/local/go/bin:$PATH ENV GOPATH /go -# Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS \ - linux/386 linux/arm \ - darwin/amd64 \ - freebsd/amd64 freebsd/386 freebsd/arm \ - windows/amd64 windows/386 \ - solaris/amd64 - # Dependency for golint ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ @@ -193,7 +165,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 4a08d04aef0595322e1b5ac7c52f28a931da85a5 +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef # To run integration tests docker-pycreds is required. # Before running the integration tests conftest.py is # loaded which results in loads auth.py that @@ -222,30 +194,31 @@ RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV DOCKER_BUILDTAGS apparmor seccomp selinux # Let us use a .bashrc file RUN ln -sfv $PWD/.bashrc ~/.bashrc # Add integration helps to bashrc RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - buildpack-deps:jessie@sha256:25785f89240fbcdd8a74bdaf30dd5599a9523882c6dfc567f2e9ef7cf6f79db6 \ - busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \ - debian:jessie@sha256:f968f10b4b523737e253a97eac59b0d1420b5c19b69928d35801a6373ffe330e \ - hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 -# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + buildpack-deps:jessie@sha256:85b379ec16065e4fe4127eb1c5fb1bcc03c559bd36dbb2e22ff496de55925fa6 \ + busybox:latest@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f \ + debian:jessie@sha256:72f784399fd2719b4cb4e16ef8e369a39dc67f53d978cd3e2e7bf4e502c7b793 \ + hello-world:latest@sha256:c5515758d4c5e1e838e9cd307f6c6a0d620b5e07e6f927b07d05f6d12a1ac8d7 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) -# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Install tomlv, vndr, runc, containerd, tini, docker-proxy dockercli # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Activate bash completion if mounted with DOCKER_BASH_COMPLETION_PATH +RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/fn/vendor/github.com/docker/docker/Dockerfile.aarch64 b/fn/vendor/github.com/docker/docker/Dockerfile.aarch64 index a8ddf9c85..7a8f5f793 100644 --- a/fn/vendor/github.com/docker/docker/Dockerfile.aarch64 +++ b/fn/vendor/github.com/docker/docker/Dockerfile.aarch64 @@ -37,7 +37,6 @@ RUN apt-get update && apt-get install -y \ libapparmor-dev \ libc6-dev \ libcap-dev \ - libltdl-dev \ libsystemd-dev \ libyaml-dev \ mercurial \ @@ -94,11 +93,11 @@ RUN set -x \ && rm -rf "$SECCOMP_PATH" # Install Go -# We don't have official binary golang 1.7.5 tarballs for ARM64, eigher for Go or +# We don't have official binary golang 1.7.5 tarballs for ARM64, either for Go or # bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code. # We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because # not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. -ENV GO_VERSION 1.7.5 +ENV GO_VERSION 1.8.3 RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ && cd /usr/src/go/src \ && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash @@ -142,7 +141,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 4a08d04aef0595322e1b5ac7c52f28a931da85a5 +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef # Before running the integration tests conftest.py is # loaded which results in loads auth.py that # imports the docker-pycreds module. @@ -171,7 +170,7 @@ RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV DOCKER_BUILDTAGS apparmor seccomp selinux # Let us use a .bashrc file RUN ln -sfv $PWD/.bashrc ~/.bashrc @@ -182,17 +181,18 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - aarch64/buildpack-deps:jessie@sha256:6aa1d6910791b7ac78265fd0798e5abd6cb3f27ae992f6f960f6c303ec9535f2 \ - aarch64/busybox:latest@sha256:b23a6a37cf269dff6e46d2473b6e227afa42b037e6d23435f1d2bc40fc8c2828 \ - aarch64/debian:jessie@sha256:4be74a41a7c70ebe887b634b11ffe516cf4fcd56864a54941e56bb49883c3170 \ - aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda -# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + aarch64/buildpack-deps:jessie@sha256:107f4a96837ed89c493fc205cd28508ed0b6b680b4bf3e514e9f0fa0f6667b77 \ + aarch64/busybox:latest@sha256:5a06b8b2fdf22dd1f4085c6c3efd23ee99af01b2d668d286bc4be6d8baa10efb \ + aarch64/debian:jessie@sha256:e6f90b568631705bd5cb27490977378ba762792b38d47c91c4da7a539f63079a \ + aarch64/hello-world:latest@sha256:bd1722550b97668b23ede297abf824d4855f4d9f600dab7b4db1a963dae7ec9e +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) # Install tomlv, vndr, runc, containerd, tini, docker-proxy # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/fn/vendor/github.com/docker/docker/Dockerfile.armhf b/fn/vendor/github.com/docker/docker/Dockerfile.armhf index d91ba9fe9..6103c5a3a 100644 --- a/fn/vendor/github.com/docker/docker/Dockerfile.armhf +++ b/fn/vendor/github.com/docker/docker/Dockerfile.armhf @@ -39,7 +39,6 @@ RUN apt-get update && apt-get install -y \ net-tools \ libapparmor-dev \ libcap-dev \ - libltdl-dev \ libsystemd-journal-dev \ libtool \ mercurial \ @@ -71,7 +70,7 @@ RUN cd /usr/local/lvm2 \ # See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go -ENV GO_VERSION 1.7.5 +ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ | tar -xzC /usr/local ENV PATH /go/bin:/usr/local/go/bin:$PATH @@ -137,7 +136,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ @@ -152,7 +151,7 @@ RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV DOCKER_BUILDTAGS apparmor seccomp selinux # Let us use a .bashrc file RUN ln -sfv $PWD/.bashrc ~/.bashrc @@ -163,17 +162,18 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - armhf/buildpack-deps:jessie@sha256:ca6cce8e5bf5c952129889b5cc15cd6aa8d995d77e55e3749bbaadae50e476cb \ - armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \ - armhf/debian:jessie@sha256:4a2187483f04a84f9830910fe3581d69b3c985cc045d9f01d8e2f3795b28107b \ - armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791 -# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + armhf/buildpack-deps:jessie@sha256:eb2dad77ef53e88d94c3c83862d315c806ea1ca49b6e74f4db362381365ce489 \ + armhf/busybox:latest@sha256:016a1e149d2acc2a3789a160dfa60ce870794eea27ad5e96f7a101970e5e1689 \ + armhf/debian:jessie@sha256:ac59fa18b28d0ef751eabb5ba4c4b5a9063f99398bae2f70495aa8ed6139b577 \ + armhf/hello-world:latest@sha256:9701edc932223a66e49dd6c894a11db8c2cf4eccd1414f1ec105a623bf16b426 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) # Install tomlv, vndr, runc, containerd, tini, docker-proxy # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH ENTRYPOINT ["hack/dind"] diff --git a/fn/vendor/github.com/docker/docker/Dockerfile.ppc64le b/fn/vendor/github.com/docker/docker/Dockerfile.ppc64le index c079e6c00..e64153800 100644 --- a/fn/vendor/github.com/docker/docker/Dockerfile.ppc64le +++ b/fn/vendor/github.com/docker/docker/Dockerfile.ppc64le @@ -40,7 +40,6 @@ RUN apt-get update && apt-get install -y \ net-tools \ libapparmor-dev \ libcap-dev \ - libltdl-dev \ libsystemd-journal-dev \ libtool \ mercurial \ @@ -95,7 +94,7 @@ RUN set -x \ # Install Go # NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4 -ENV GO_VERSION 1.7.5 +ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ | tar -xzC /usr/local @@ -143,7 +142,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ @@ -158,7 +157,7 @@ RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV DOCKER_BUILDTAGS apparmor seccomp selinux # Let us use a .bashrc file RUN ln -sfv $PWD/.bashrc ~/.bashrc @@ -169,17 +168,18 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - ppc64le/buildpack-deps:jessie@sha256:902bfe4ef1389f94d143d64516dd50a2de75bca2e66d4a44b1d73f63ddf05dda \ - ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \ - ppc64le/debian:jessie@sha256:412845f51b6ab662afba71bc7a716e20fdb9b84f185d180d4c7504f8a75c4f91 \ - ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974 -# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + ppc64le/buildpack-deps:jessie@sha256:1a2f2d2cc8738f14b336aeffc3503b5c9dedf9e1f26c7313cb4999534ad4716f \ + ppc64le/busybox:latest@sha256:54f34c83adfab20cf0e630d879e210f07b0062cd6caaf16346a61396d50e7584 \ + ppc64le/debian:jessie@sha256:ea8c5b105e3790f075145b40e4be1e4488c9f33f55e6cc45182047b80a68f892 \ + ppc64le/hello-world:latest@sha256:7d57adf137665f748956c86089320710b66d08584db3500ed98f4bb3da637c2d +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) # Install tomlv, vndr, runc, containerd, tini, docker-proxy # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/fn/vendor/github.com/docker/docker/Dockerfile.s390x b/fn/vendor/github.com/docker/docker/Dockerfile.s390x index a47e4463f..c69da3c96 100644 --- a/fn/vendor/github.com/docker/docker/Dockerfile.s390x +++ b/fn/vendor/github.com/docker/docker/Dockerfile.s390x @@ -36,7 +36,6 @@ RUN apt-get update && apt-get install -y \ net-tools \ libapparmor-dev \ libcap-dev \ - libltdl-dev \ libsystemd-journal-dev \ libtool \ mercurial \ @@ -88,7 +87,7 @@ RUN cd /usr/local/lvm2 \ && make install_device-mapper # See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL -ENV GO_VERSION 1.7.5 +ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ | tar -xzC /usr/local @@ -136,7 +135,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ @@ -162,17 +161,18 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - s390x/buildpack-deps:jessie@sha256:4d1381224acaca6c4bfe3604de3af6972083a8558a99672cb6989c7541780099 \ - s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \ - s390x/debian:jessie@sha256:b74c863400909eff3c5e196cac9bfd1f6333ce47aae6a38398d87d5875da170a \ - s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699 -# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + s390x/buildpack-deps:jessie@sha256:552dec28146e4d2591fc0309aebdbac9e4fb1f335d90c70a14bbf72fb8bb1be5 \ + s390x/busybox:latest@sha256:e32f40c39ca596a4317392bd32809bb188c4ae5864ea827c3219c75c50069964 \ + s390x/debian:jessie@sha256:6994e3ffa5a1dabea09d536f350b3ed2715292cb469417c42a82b70fcbff7d32 \ + s390x/hello-world:latest@sha256:602db500fee63934292260e65c0c528128ad1c1c7c6497f95bbbac7d4d5312f1 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) # Install tomlv, vndr, runc, containerd, tini, docker-proxy # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/fn/vendor/github.com/docker/docker/Dockerfile.simple b/fn/vendor/github.com/docker/docker/Dockerfile.simple index 248f88de3..4edc08f9e 100644 --- a/fn/vendor/github.com/docker/docker/Dockerfile.simple +++ b/fn/vendor/github.com/docker/docker/Dockerfile.simple @@ -53,7 +53,7 @@ RUN set -x \ # IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines # will need updating, to avoid errors. Ping #docker-maintainers on IRC # with a heads-up. -ENV GO_VERSION 1.7.5 +ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ | tar -xzC /usr/local ENV PATH /go/bin:/usr/local/go/bin:$PATH @@ -64,7 +64,8 @@ ENV CGO_LDFLAGS -L/lib # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh runc containerd tini proxy +RUN /tmp/install-binaries.sh runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH ENV AUTO_GOPATH 1 WORKDIR /usr/src/docker diff --git a/fn/vendor/github.com/docker/docker/Dockerfile.solaris b/fn/vendor/github.com/docker/docker/Dockerfile.solaris index bb342e5e6..4198b138b 100644 --- a/fn/vendor/github.com/docker/docker/Dockerfile.solaris +++ b/fn/vendor/github.com/docker/docker/Dockerfile.solaris @@ -15,6 +15,5 @@ RUN pkg install --accept \ developer/gcc-* ENV GOPATH /go/:/usr/lib/gocode/1.5/ -ENV DOCKER_CROSSPLATFORMS solaris/amd64 WORKDIR /go/src/github.com/docker/docker COPY . /go/src/github.com/docker/docker diff --git a/fn/vendor/github.com/docker/docker/Dockerfile.windows b/fn/vendor/github.com/docker/docker/Dockerfile.windows index fae1b8192..8f8ee609c 100644 --- a/fn/vendor/github.com/docker/docker/Dockerfile.windows +++ b/fn/vendor/github.com/docker/docker/Dockerfile.windows @@ -161,7 +161,7 @@ SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPref # Environment variable notes: # - GO_VERSION must be consistent with 'Dockerfile' used by Linux. # - FROM_DOCKERFILE is used for detection of building within a container. -ENV GO_VERSION=1.7.5 ` +ENV GO_VERSION=1.8.3 ` GIT_VERSION=2.11.1 ` GOPATH=C:\go ` FROM_DOCKERFILE=1 diff --git a/fn/vendor/github.com/docker/docker/MAINTAINERS b/fn/vendor/github.com/docker/docker/MAINTAINERS index fe2ef0edc..dc4485da1 100644 --- a/fn/vendor/github.com/docker/docker/MAINTAINERS +++ b/fn/vendor/github.com/docker/docker/MAINTAINERS @@ -312,7 +312,7 @@ [people.icecrime] Name = "Arnaud Porterie" - Email = "arnaud@docker.com" + Email = "icecrime@gmail.com" GitHub = "icecrime" [people.jamtur01] diff --git a/fn/vendor/github.com/docker/docker/Makefile b/fn/vendor/github.com/docker/docker/Makefile index 78be642b2..0d99606cc 100644 --- a/fn/vendor/github.com/docker/docker/Makefile +++ b/fn/vendor/github.com/docker/docker/Makefile @@ -1,4 +1,4 @@ -.PHONY: all binary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win +.PHONY: all binary dynbinary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win # set the graph driver as the current graphdriver if not set DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) @@ -7,7 +7,7 @@ DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMEN export DOCKER_INCREMENTAL_BINARY # get OS/Arch of docker engine -DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}') +DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}') DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported) @@ -17,13 +17,15 @@ export DOCKER_GITCOMMIT # to allow things like `make KEEPBUNDLE=1 binary` easily # `project/PACKAGERS.md` have some limited documentation of some of these DOCKER_ENVS := \ + -e DOCKER_CROSSPLATFORMS \ -e BUILD_APT_MIRROR \ -e BUILDFLAGS \ -e KEEPBUNDLE \ -e DOCKER_BUILD_ARGS \ -e DOCKER_BUILD_GOGC \ -e DOCKER_BUILD_PKGS \ - -e DOCKER_CROSSPLATFORMS \ + -e DOCKER_BASH_COMPLETION_PATH \ + -e DOCKER_CLI_PATH \ -e DOCKER_DEBUG \ -e DOCKER_EXPERIMENTAL \ -e DOCKER_GITCOMMIT \ @@ -63,7 +65,9 @@ PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 PKGCACHE_VOLROOT := dockerdev-go-pkg-cache PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-) DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),) -DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) +DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,) +DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,) +DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION) GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") @@ -79,6 +83,11 @@ SWAGGER_DOCS_PORT ?= 9000 INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master) INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker) +define \n + + +endef + # if this session isn't interactive, then we don't want to allocate a # TTY, which would fail, but if it is interactive, we do want to attach # so that the user can send e.g. ^C through. @@ -97,7 +106,11 @@ all: build ## validate all checks, build linux binaries, run all tests\ncross bu binary: build ## build the linux binaries $(DOCKER_RUN_DOCKER) hack/make.sh binary +dynbinary: build ## build the linux dynbinaries + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary + build: bundles init-go-pkg-cache + $(warning The docker client CLI has moved to github.com/docker/cli. By default, it is built from the git sha specified in hack/dockerfile/binaries-commits. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n}) docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . bundles: @@ -126,12 +139,6 @@ init-go-pkg-cache: install: ## install the linux binaries KEEPBUNDLE=1 hack/make.sh install-binary -manpages: ## Generate man pages from go source and markdown - docker build ${DOCKER_BUILD_ARGS} -t docker-manpage-dev -f "man/$(DOCKERFILE)" ./man - docker run --rm \ - -v $(PWD):/go/src/github.com/docker/docker/ \ - docker-manpage-dev - rpm: build ## build the rpm packages $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm @@ -141,9 +148,6 @@ run: build ## run the docker daemon in a container shell: build ## start a shell inside the build env $(DOCKER_RUN_DOCKER) bash -yaml-docs-gen: build ## generate documentation YAML files consumed by docs repo - $(DOCKER_RUN_DOCKER) sh -c 'hack/make.sh yaml-docs-generator && ( root=$$(pwd); cd bundles/latest/yaml-docs-generator; mkdir docs; ./yaml-docs-generator --root $${root} --target $$(pwd)/docs )' - test: build ## run the unit, integration and docker-py tests $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py @@ -182,7 +186,7 @@ swagger-docs: ## preview the API documentation bfirsh/redoc:1.6.2 build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel - @echo "Building hack/integration-cli-on-swarm" + @echo "Building hack/integration-cli-on-swarm (if build fails, please refer to hack/integration-cli-on-swarm/README.md)" go build -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host @echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)" docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent diff --git a/fn/vendor/github.com/docker/docker/README.md b/fn/vendor/github.com/docker/docker/README.md index 2e91559c5..f768ff844 100644 --- a/fn/vendor/github.com/docker/docker/README.md +++ b/fn/vendor/github.com/docker/docker/README.md @@ -1,270 +1,80 @@ -Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest) -============================ +### Docker users, see [Moby and Docker](https://mobyproject.org/#moby-and-docker) to clarify the relationship between the projects -Docker is an open source project to pack, ship and run any application -as a lightweight container. +### Docker maintainers and contributors, see [Transitioning to Moby](#transitioning-to-moby) for more details -Docker containers are both *hardware-agnostic* and *platform-agnostic*. -This means they can run anywhere, from your laptop to the largest -cloud compute instance and everything in between - and they don't require -you to use a particular language, framework or packaging system. That -makes them great building blocks for deploying and scaling web apps, -databases, and backend services without depending on a particular stack -or provider. +The Moby Project +================ -Docker began as an open-source implementation of the deployment engine which -powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/), -a popular Platform-as-a-Service. It benefits directly from the experience -accumulated over several years of large-scale operation and support of hundreds -of thousands of applications and databases. +![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project") -![Docker logo](docs/static_files/docker-logo-compressed.png "Docker") +Moby is an open-source project created by Docker to advance the software containerization movement. +It provides a “Lego set” of dozens of components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts to experiment and exchange ideas. -## Security Disclosure +# Moby -Security is very important to us. If you have any issue regarding security, -please disclose the information responsibly by sending an email to -security@docker.com and not by creating a GitHub issue. +## Overview -## Better than VMs +At the core of Moby is a framework to assemble specialized container systems. +It provides: -A common method for distributing applications and sandboxing their -execution is to use virtual machines, or VMs. Typical VM formats are -VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory -these formats should allow every developer to automatically package -their application into a "machine" for easy distribution and deployment. -In practice, that almost never happens, for a few reasons: +- A library of containerized components for all vital aspects of a container system: OS, container runtime, orchestration, infrastructure management, networking, storage, security, build, image distribution, etc. +- Tools to assemble the components into runnable artifacts for a variety of platforms and architectures: bare metal (both x86 and Arm); executables for Linux, Mac and Windows; VM images for popular cloud and virtualization providers. +- A set of reference assemblies which can be used as-is, modified, or used as inspiration to create your own. - * *Size*: VMs are very large which makes them impractical to store - and transfer. - * *Performance*: running VMs consumes significant CPU and memory, - which makes them impractical in many scenarios, for example local - development of multi-tier applications, and large-scale deployment - of cpu and memory-intensive applications on large numbers of - machines. - * *Portability*: competing VM environments don't play well with each - other. Although conversion tools do exist, they are limited and - add even more overhead. - * *Hardware-centric*: VMs were designed with machine operators in - mind, not software developers. As a result, they offer very - limited tooling for what developers need most: building, testing - and running their software. For example, VMs offer no facilities - for application versioning, monitoring, configuration, logging or - service discovery. +All Moby components are containers, so creating new components is as easy as building a new OCI-compatible container. -By contrast, Docker relies on a different sandboxing method known as -*containerization*. Unlike traditional virtualization, containerization -takes place at the kernel level. Most modern operating system kernels -now support the primitives necessary for containerization, including -Linux with [openvz](https://openvz.org), -[vserver](http://linux-vserver.org) and more recently -[lxc](https://linuxcontainers.org/), Solaris with -[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), -and FreeBSD with -[Jails](https://www.freebsd.org/doc/handbook/jails.html). +## Principles -Docker builds on top of these low-level primitives to offer developers a -portable format and runtime environment that solves all four problems. -Docker containers are small (and their transfer can be optimized with -layers), they have basically zero memory and cpu overhead, they are -completely portable, and are designed from the ground up with an -application-centric design. +Moby is an open project guided by strong principles, but modular, flexible and without too strong an opinion on user experience, so it is open to the community to help set its direction. +The guiding principles are: -Perhaps best of all, because Docker operates at the OS level, it can still be -run inside a VM! +- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations. +- Usable security: Moby will provide secure defaults without compromising usability. +- Container centric: Moby is built with containers, for running containers. -## Plays well with others +With Moby, you should be able to describe all the components of your distributed application, from the high-level configuration files down to the kernel you would like to use and build and deploy it easily. -Docker does not require you to buy into a particular programming -language, framework, packaging system, or configuration language. +Moby uses [containerd](https://github.com/containerd/containerd) as the default container runtime. -Is your application a Unix process? Does it use files, tcp connections, -environment variables, standard Unix streams and command-line arguments -as inputs and outputs? Then Docker can run it. +## Audience -Can your application's build be expressed as a sequence of such -commands? Then Docker can build it. +Moby is recommended for anyone who wants to assemble a container-based system. This includes: -## Escape dependency hell +- Hackers who want to customize or patch their Docker build +- System engineers or integrators building a container system +- Infrastructure providers looking to adapt existing container systems to their environment +- Container enthusiasts who want to experiment with the latest container tech +- Open-source developers looking to test their project in a variety of different systems +- Anyone curious about Docker internals and how it’s built -A common problem for developers is the difficulty of managing all -their application's dependencies in a simple and automated way. +Moby is NOT recommended for: -This is usually difficult for several reasons: +- Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead. +- Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead. +- Anyone curious about containers and looking for an easy way to learn. We recommend the docker.com website instead. - * *Cross-platform dependencies*. Modern applications often depend on - a combination of system libraries and binaries, language-specific - packages, framework-specific modules, internal components - developed for another project, etc. These dependencies live in - different "worlds" and require different tools - these tools - typically don't work well with each other, requiring awkward - custom integrations. +# Transitioning to Moby - * *Conflicting dependencies*. Different applications may depend on - different versions of the same dependency. Packaging tools handle - these situations with various degrees of ease - but they all - handle them in different and incompatible ways, which again forces - the developer to do extra work. +Docker is transitioning all of its open source collaborations to the Moby project going forward. +During the transition, all open source activity should continue as usual. - * *Custom dependencies*. A developer may need to prepare a custom - version of their application's dependency. Some packaging systems - can handle custom versions of a dependency, others can't - and all - of them handle it differently. +We are proposing the following list of changes: +- splitting up the engine into more open components +- removing the docker UI, SDK etc to keep them in the Docker org +- clarifying that the project is not limited to the engine, but to the assembly of all the individual components of the Docker platform +- open-source new tools & components which we currently use to assemble the Docker product, but could benefit the community +- defining an open, community-centric governance inspired by the Fedora project (a very successful example of balancing the needs of the community with the constraints of the primary corporate sponsor) -Docker solves the problem of dependency hell by giving developers a simple -way to express *all* their application's dependencies in one place, while -streamlining the process of assembling them. If this makes you think of -[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't -*replace* your favorite packaging systems. It simply orchestrates -their use in a simple and repeatable way. How does it do that? With -layers. +----- -Docker defines a build as running a sequence of Unix commands, one -after the other, in the same container. Build commands modify the -contents of the container (usually by installing new files on the -filesystem), the next command modifies it some more, etc. Since each -build command inherits the result of the previous commands, the -*order* in which the commands are executed expresses *dependencies*. - -Here's a typical Docker build process: - -```bash -FROM ubuntu:12.04 -RUN apt-get update && apt-get install -y python python-pip curl -RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv -RUN cd helloflask-master && pip install -r requirements.txt -``` - -Note that Docker doesn't care *how* dependencies are built - as long -as they can be built by running a Unix command in a container. - - -Getting started -=============== - -Docker can be installed either on your computer for building applications or -on servers for running them. To get started, [check out the installation -instructions in the -documentation](https://docs.docker.com/engine/installation/). - -Usage examples -============== - -Docker can be used to run short-lived commands, long-running daemons -(app servers, databases, etc.), interactive shell sessions, etc. - -You can find a [list of real-world -examples](https://docs.docker.com/engine/examples/) in the -documentation. - -Under the hood --------------- - -Under the hood, Docker is built on the following components: - -* The - [cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt) - and - [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) - capabilities of the Linux kernel -* The [Go](https://golang.org) programming language -* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) -* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md) - -Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) -====================== - -| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** | -|------------------|----------------------|---------|---------| -| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) | - -Want to hack on Docker? Awesome! We have [instructions to help you get -started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/). - -These instructions are probably not perfect, please let us know if anything -feels wrong or incomplete. Better yet, submit a PR and improve them yourself. - -Getting the development builds -============================== - -Want to run Docker from a master build? You can download -master builds at [master.dockerproject.org](https://master.dockerproject.org). -They are updated with each commit merged into the master branch. - -Don't know how to use that super cool new feature in the master build? Check -out the master docs at -[docs.master.dockerproject.org](http://docs.master.dockerproject.org). - -How the project is run -====================== - -Docker is a very, very active project. If you want to learn more about how it is run, -or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). - -We are always open to suggestions on process improvements, and are always looking for more maintainers. - -### Talking to other Docker users and contributors - - - - - - - - - - - - - - - - - - - - - - - - -
Internet Relay Chat (IRC) -

- IRC is a direct line to our most knowledgeable Docker users; we have - both the #docker and #docker-dev group on - irc.freenode.net. - IRC is a rich chat protocol but it can overwhelm new users. You can search - our chat archives. -

- Read our IRC quickstart guide for an easy way to get started. -
Docker Community Forums - The Docker Engine - group is for users of the Docker Engine project. -
Google Groups - The docker-dev group is for contributors and other people - contributing to the Docker project. You can join this group without a - Google account by sending an email to docker-dev+subscribe@googlegroups.com. - You'll receive a join-request message; simply reply to the message to - confirm your subscription. -
Twitter - You can follow Docker's Twitter feed - to get updates on our products. You can also tweet us questions or just - share blogs or stories. -
Stack Overflow - Stack Overflow has thousands of Docker questions listed. We regularly - monitor Docker questions - and so do many other knowledgeable Docker users. -
- -### Legal +Legal +===== *Brought to you courtesy of our legal counsel. For more context, -please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.* +please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.* -Use and transfer of Docker may be subject to certain restrictions by the +Use and transfer of Moby may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not @@ -275,30 +85,6 @@ For more information, please see https://www.bis.doc.gov Licensing ========= -Docker is licensed under the Apache License, Version 2.0. See -[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +Moby is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full license text. - -Other Docker Related Projects -============================= -There are a number of projects under development that are based on Docker's -core technology. These projects expand the tooling built around the -Docker platform to broaden its application and utility. - -* [Docker Registry](https://github.com/docker/distribution): Registry -server for Docker (hosting/delivery of repositories and images) -* [Docker Machine](https://github.com/docker/machine): Machine management -for a container-centric world -* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering -system -* [Docker Compose](https://github.com/docker/compose) (formerly Fig): -Define and run multi-container apps -* [Kitematic](https://github.com/docker/kitematic): The easiest way to use -Docker on Mac and Windows - -If you know of another project underway that should be listed here, please help -us keep this list up-to-date by submitting a PR. - -Awesome-Docker -============== -You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there. diff --git a/fn/vendor/github.com/docker/docker/VERSION b/fn/vendor/github.com/docker/docker/VERSION index d4fbcf138..2d736aaa1 100644 --- a/fn/vendor/github.com/docker/docker/VERSION +++ b/fn/vendor/github.com/docker/docker/VERSION @@ -1 +1 @@ -17.05.0-ce +17.06.0-dev diff --git a/fn/vendor/github.com/docker/docker/api/README.md b/fn/vendor/github.com/docker/docker/api/README.md index 8954ed017..bb8813252 100644 --- a/fn/vendor/github.com/docker/docker/api/README.md +++ b/fn/vendor/github.com/docker/docker/api/README.md @@ -14,8 +14,8 @@ It consists of various components in this repository: The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: -1. To automatically generate documentation. -2. To automatically generate the Go server and client. (A work-in-progress.) +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) 3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. ## Updating the API documentation diff --git a/fn/vendor/github.com/docker/docker/api/common.go b/fn/vendor/github.com/docker/docker/api/common.go index 499a8d084..859daf602 100644 --- a/fn/vendor/github.com/docker/docker/api/common.go +++ b/fn/vendor/github.com/docker/docker/api/common.go @@ -4,15 +4,9 @@ import ( "encoding/json" "encoding/pem" "fmt" - "mime" "os" "path/filepath" - "sort" - "strconv" - "strings" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/system" "github.com/docker/libtrust" @@ -21,112 +15,17 @@ import ( // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion string = "1.29" + DefaultVersion string = "1.31" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. NoBaseImageSpecifier string = "scratch" ) -// byPortInfo is a temporary type used to sort types.Port by its fields -type byPortInfo []types.Port - -func (r byPortInfo) Len() int { return len(r) } -func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byPortInfo) Less(i, j int) bool { - if r[i].PrivatePort != r[j].PrivatePort { - return r[i].PrivatePort < r[j].PrivatePort - } - - if r[i].IP != r[j].IP { - return r[i].IP < r[j].IP - } - - if r[i].PublicPort != r[j].PublicPort { - return r[i].PublicPort < r[j].PublicPort - } - - return r[i].Type < r[j].Type -} - -// DisplayablePorts returns formatted string representing open ports of container -// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" -// it's used by command 'docker ps' -func DisplayablePorts(ports []types.Port) string { - type portGroup struct { - first uint16 - last uint16 - } - groupMap := make(map[string]*portGroup) - var result []string - var hostMappings []string - var groupMapKeys []string - sort.Sort(byPortInfo(ports)) - for _, port := range ports { - current := port.PrivatePort - portKey := port.Type - if port.IP != "" { - if port.PublicPort != current { - hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) - continue - } - portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) - } - group := groupMap[portKey] - - if group == nil { - groupMap[portKey] = &portGroup{first: current, last: current} - // record order that groupMap keys are created - groupMapKeys = append(groupMapKeys, portKey) - continue - } - if current == (group.last + 1) { - group.last = current - continue - } - - result = append(result, formGroup(portKey, group.first, group.last)) - groupMap[portKey] = &portGroup{first: current, last: current} - } - for _, portKey := range groupMapKeys { - g := groupMap[portKey] - result = append(result, formGroup(portKey, g.first, g.last)) - } - result = append(result, hostMappings...) - return strings.Join(result, ", ") -} - -func formGroup(key string, start, last uint16) string { - parts := strings.Split(key, "/") - groupType := parts[0] - var ip string - if len(parts) > 1 { - ip = parts[0] - groupType = parts[1] - } - group := strconv.Itoa(int(start)) - if start != last { - group = fmt.Sprintf("%s-%d", group, last) - } - if ip != "" { - group = fmt.Sprintf("%s:%s->%s", ip, group, group) - } - return fmt.Sprintf("%s/%s", group, groupType) -} - -// MatchesContentType validates the content type against the expected one -func MatchesContentType(contentType, expectedType string) bool { - mimetype, _, err := mime.ParseMediaType(contentType) - if err != nil { - logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) - } - return err == nil && mimetype == expectedType -} - // LoadOrCreateTrustKey attempts to load the libtrust key at the given path, // otherwise generates a new one func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "") if err != nil { return nil, err } diff --git a/fn/vendor/github.com/docker/docker/api/common_test.go b/fn/vendor/github.com/docker/docker/api/common_test.go index 4d67206ff..f466616b0 100644 --- a/fn/vendor/github.com/docker/docker/api/common_test.go +++ b/fn/vendor/github.com/docker/docker/api/common_test.go @@ -6,272 +6,8 @@ import ( "testing" "os" - - "github.com/docker/docker/api/types" ) -type ports struct { - ports []types.Port - expected string -} - -// DisplayablePorts -func TestDisplayablePorts(t *testing.T) { - cases := []ports{ - { - []types.Port{ - { - PrivatePort: 9988, - Type: "tcp", - }, - }, - "9988/tcp"}, - { - []types.Port{ - { - PrivatePort: 9988, - Type: "udp", - }, - }, - "9988/udp", - }, - { - []types.Port{ - { - IP: "0.0.0.0", - PrivatePort: 9988, - Type: "tcp", - }, - }, - "0.0.0.0:0->9988/tcp", - }, - { - []types.Port{ - { - PrivatePort: 9988, - PublicPort: 8899, - Type: "tcp", - }, - }, - "9988/tcp", - }, - { - []types.Port{ - { - IP: "4.3.2.1", - PrivatePort: 9988, - PublicPort: 8899, - Type: "tcp", - }, - }, - "4.3.2.1:8899->9988/tcp", - }, - { - []types.Port{ - { - IP: "4.3.2.1", - PrivatePort: 9988, - PublicPort: 9988, - Type: "tcp", - }, - }, - "4.3.2.1:9988->9988/tcp", - }, - { - []types.Port{ - { - PrivatePort: 9988, - Type: "udp", - }, { - PrivatePort: 9988, - Type: "udp", - }, - }, - "9988/udp, 9988/udp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PublicPort: 9998, - PrivatePort: 9998, - Type: "udp", - }, { - IP: "1.2.3.4", - PublicPort: 9999, - PrivatePort: 9999, - Type: "udp", - }, - }, - "1.2.3.4:9998-9999->9998-9999/udp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PublicPort: 8887, - PrivatePort: 9998, - Type: "udp", - }, { - IP: "1.2.3.4", - PublicPort: 8888, - PrivatePort: 9999, - Type: "udp", - }, - }, - "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", - }, - { - []types.Port{ - { - PrivatePort: 9998, - Type: "udp", - }, { - PrivatePort: 9999, - Type: "udp", - }, - }, - "9998-9999/udp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PrivatePort: 6677, - PublicPort: 7766, - Type: "tcp", - }, { - PrivatePort: 9988, - PublicPort: 8899, - Type: "udp", - }, - }, - "9988/udp, 1.2.3.4:7766->6677/tcp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PrivatePort: 9988, - PublicPort: 8899, - Type: "udp", - }, { - IP: "1.2.3.4", - PrivatePort: 9988, - PublicPort: 8899, - Type: "tcp", - }, { - IP: "4.3.2.1", - PrivatePort: 2233, - PublicPort: 3322, - Type: "tcp", - }, - }, - "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", - }, - { - []types.Port{ - { - PrivatePort: 9988, - PublicPort: 8899, - Type: "udp", - }, { - IP: "1.2.3.4", - PrivatePort: 6677, - PublicPort: 7766, - Type: "tcp", - }, { - IP: "4.3.2.1", - PrivatePort: 2233, - PublicPort: 3322, - Type: "tcp", - }, - }, - "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", - }, - { - []types.Port{ - { - PrivatePort: 80, - Type: "tcp", - }, { - PrivatePort: 1024, - Type: "tcp", - }, { - PrivatePort: 80, - Type: "udp", - }, { - PrivatePort: 1024, - Type: "udp", - }, { - IP: "1.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "tcp", - }, { - IP: "1.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "udp", - }, { - IP: "1.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "tcp", - }, { - IP: "1.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "udp", - }, { - IP: "2.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "tcp", - }, { - IP: "2.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "udp", - }, { - IP: "2.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "tcp", - }, { - IP: "2.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "udp", - }, - }, - "80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", - }, - } - - for _, port := range cases { - actual := DisplayablePorts(port.ports) - if port.expected != actual { - t.Fatalf("Expected %s, got %s.", port.expected, actual) - } - } -} - -// MatchesContentType -func TestJsonContentType(t *testing.T) { - if !MatchesContentType("application/json", "application/json") { - t.Fail() - } - - if !MatchesContentType("application/json; charset=utf-8", "application/json") { - t.Fail() - } - - if MatchesContentType("dockerapplication/json", "application/json") { - t.Fail() - } -} - // LoadOrCreateTrustKey func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") diff --git a/fn/vendor/github.com/docker/docker/api/errors/errors_test.go b/fn/vendor/github.com/docker/docker/api/errors/errors_test.go new file mode 100644 index 000000000..1d6a596ac --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/errors/errors_test.go @@ -0,0 +1,64 @@ +package errors + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "net/http" + "testing" +) + +func newError(errorname string) error { + + return fmt.Errorf("test%v", errorname) +} + +func TestErrors(t *testing.T) { + errmsg := newError("apiError") + err := apiError{ + error: errmsg, + statusCode: 0, + } + assert.Equal(t, err.HTTPErrorStatusCode(), err.statusCode) + + errmsg = newError("ErrorWithStatusCode") + errcode := 1 + serr := NewErrorWithStatusCode(errmsg, errcode) + apierr, ok := serr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, errcode, apierr.statusCode) + + errmsg = newError("NewBadRequestError") + baderr := NewBadRequestError(errmsg) + apierr, ok = baderr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusBadRequest, apierr.statusCode) + + errmsg = newError("RequestForbiddenError") + ferr := NewRequestForbiddenError(errmsg) + apierr, ok = ferr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusForbidden, apierr.statusCode) + + errmsg = newError("RequestNotFoundError") + nerr := NewRequestNotFoundError(errmsg) + apierr, ok = nerr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusNotFound, apierr.statusCode) + + errmsg = newError("RequestConflictError") + cerr := NewRequestConflictError(errmsg) + apierr, ok = cerr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusConflict, apierr.statusCode) + +} diff --git a/fn/vendor/github.com/docker/docker/api/server/backend/build/backend.go b/fn/vendor/github.com/docker/docker/api/server/backend/build/backend.go new file mode 100644 index 000000000..b5a758c38 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/server/backend/build/backend.go @@ -0,0 +1,90 @@ +package build + +import ( + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// ImageComponent provides an interface for working with images +type ImageComponent interface { + SquashImage(from string, to string) (string, error) + TagImageWithReference(image.ID, string, reference.Named) error +} + +// Builder defines interface for running a build +type Builder interface { + Build(context.Context, backend.BuildConfig) (*builder.Result, error) +} + +// Backend provides build functionality to the API router +type Backend struct { + builder Builder + fsCache *fscache.FSCache + imageComponent ImageComponent +} + +// NewBackend creates a new build backend from components +func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache) (*Backend, error) { + return &Backend{imageComponent: components, builder: builder, fsCache: fsCache}, nil +} + +// Build builds an image from a Source +func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) { + options := config.Options + tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags) + if err != nil { + return "", err + } + + build, err := b.builder.Build(ctx, config) + if err != nil { + return "", err + } + + var imageID = build.ImageID + if options.Squash { + if imageID, err = squashBuild(build, b.imageComponent); err != nil { + return "", err + } + if config.ProgressWriter.AuxFormatter != nil { + if err = config.ProgressWriter.AuxFormatter.Emit(types.BuildResult{ID: imageID}); err != nil { + return "", err + } + } + } + + stdout := config.ProgressWriter.StdoutFormatter + fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID)) + err = tagger.TagImages(image.ID(imageID)) + return imageID, err +} + +// PruneCache removes all cached build sources +func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport, error) { + size, err := b.fsCache.Prune() + if err != nil { + return nil, errors.Wrap(err, "failed to prune build cache") + } + return &types.BuildCachePruneReport{SpaceReclaimed: size}, nil +} + +func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) { + var fromID string + if build.FromImage != nil { + fromID = build.FromImage.ImageID() + } + imageID, err := imageComponent.SquashImage(build.ImageID, fromID) + if err != nil { + return "", errors.Wrap(err, "error squashing image") + } + return imageID, nil +} diff --git a/fn/vendor/github.com/docker/docker/api/server/backend/build/tag.go b/fn/vendor/github.com/docker/docker/api/server/backend/build/tag.go new file mode 100644 index 000000000..7bd5dcdeb --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/server/backend/build/tag.go @@ -0,0 +1,84 @@ +package build + +import ( + "fmt" + "io" + "runtime" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// Tagger is responsible for tagging an image created by a builder +type Tagger struct { + imageComponent ImageComponent + stdout io.Writer + repoAndTags []reference.Named +} + +// NewTagger returns a new Tagger for tagging the images of a build. +// If any of the names are invalid tags an error is returned. +func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) { + reposAndTags, err := sanitizeRepoAndTags(names) + if err != nil { + return nil, err + } + return &Tagger{ + imageComponent: backend, + stdout: stdout, + repoAndTags: reposAndTags, + }, nil +} + +// TagImages creates image tags for the imageID +func (bt *Tagger) TagImages(imageID image.ID) error { + for _, rt := range bt.repoAndTags { + // TODO @jhowardmsft LCOW support. Will need revisiting. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + if err := bt.imageComponent.TagImageWithReference(imageID, platform, rt); err != nil { + return err + } + fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) + } + return nil +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNormalizedNamed(repo) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + ref = reference.TagNameOnly(ref) + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} diff --git a/fn/vendor/github.com/docker/docker/api/server/httputils/errors.go b/fn/vendor/github.com/docker/docker/api/server/httputils/errors.go index 40ff35149..82da21c2a 100644 --- a/fn/vendor/github.com/docker/docker/api/server/httputils/errors.go +++ b/fn/vendor/github.com/docker/docker/api/server/httputils/errors.go @@ -9,6 +9,7 @@ import ( "github.com/docker/docker/api/types/versions" "github.com/gorilla/mux" "google.golang.org/grpc" + "google.golang.org/grpc/codes" ) // httpStatusError is an interface @@ -44,11 +45,17 @@ func GetHTTPErrorStatusCode(err error) int { case inputValidationError: statusCode = http.StatusBadRequest default: + statusCode = statusCodeFromGRPCError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + // FIXME: this is brittle and should not be necessary, but we still need to identify if // there are errors falling back into this logic. // If we need to differentiate between different possible error types, // we should create appropriate error types that implement the httpStatusError interface. errStr := strings.ToLower(errMsg) + for _, status := range []struct { keyword string code int @@ -66,6 +73,7 @@ func GetHTTPErrorStatusCode(err error) int { {"this node", http.StatusServiceUnavailable}, {"needs to be unlocked", http.StatusServiceUnavailable}, {"certificates have expired", http.StatusServiceUnavailable}, + {"repository does not exist", http.StatusNotFound}, } { if strings.Contains(errStr, status.keyword) { statusCode = status.code @@ -102,3 +110,36 @@ func MakeErrorHandler(err error) http.HandlerFunc { } } } + +// statusCodeFromGRPCError returns status code according to gRPC error +func statusCodeFromGRPCError(err error) int { + switch grpc.Code(err) { + case codes.InvalidArgument: // code 3 + return http.StatusBadRequest + case codes.NotFound: // code 5 + return http.StatusNotFound + case codes.AlreadyExists: // code 6 + return http.StatusConflict + case codes.PermissionDenied: // code 7 + return http.StatusForbidden + case codes.FailedPrecondition: // code 9 + return http.StatusBadRequest + case codes.Unauthenticated: // code 16 + return http.StatusUnauthorized + case codes.OutOfRange: // code 11 + return http.StatusBadRequest + case codes.Unimplemented: // code 12 + return http.StatusNotImplemented + case codes.Unavailable: // code 14 + return http.StatusServiceUnavailable + default: + // codes.Canceled(1) + // codes.Unknown(2) + // codes.DeadlineExceeded(4) + // codes.ResourceExhausted(8) + // codes.Aborted(10) + // codes.Internal(13) + // codes.DataLoss(15) + return http.StatusInternalServerError + } +} diff --git a/fn/vendor/github.com/docker/docker/api/server/httputils/httputils.go b/fn/vendor/github.com/docker/docker/api/server/httputils/httputils.go index 2c2e8452f..92cb67c56 100644 --- a/fn/vendor/github.com/docker/docker/api/server/httputils/httputils.go +++ b/fn/vendor/github.com/docker/docker/api/server/httputils/httputils.go @@ -3,12 +3,12 @@ package httputils import ( "fmt" "io" + "mime" "net/http" "strings" + "github.com/Sirupsen/logrus" "golang.org/x/net/context" - - "github.com/docker/docker/api" ) // APIVersionKey is the client's requested API version. @@ -55,7 +55,7 @@ func CheckForJSON(r *http.Request) error { } // Otherwise it better be json - if api.MatchesContentType(ct, "application/json") { + if matchesContentType(ct, "application/json") { return nil } return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) @@ -86,3 +86,12 @@ func VersionFromContext(ctx context.Context) string { return "" } + +// matchesContentType validates the content type against the expected one +func matchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} diff --git a/fn/vendor/github.com/docker/docker/api/server/httputils/httputils_test.go b/fn/vendor/github.com/docker/docker/api/server/httputils/httputils_test.go new file mode 100644 index 000000000..d551b9d98 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/server/httputils/httputils_test.go @@ -0,0 +1,18 @@ +package httputils + +import "testing" + +// matchesContentType +func TestJsonContentType(t *testing.T) { + if !matchesContentType("application/json", "application/json") { + t.Fail() + } + + if !matchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if matchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} diff --git a/fn/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go b/fn/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go index 4787cc3c3..562c127e8 100644 --- a/fn/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go +++ b/fn/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go @@ -1,5 +1,3 @@ -// +build go1.7 - package httputils import ( diff --git a/fn/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go b/fn/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go deleted file mode 100644 index bdc698173..000000000 --- a/fn/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build go1.6,!go1.7 - -package httputils - -import ( - "encoding/json" - "net/http" -) - -// WriteJSON writes the value v to the http response stream as json with standard json encoding. -func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - enc := json.NewEncoder(w) - return enc.Encode(v) -} diff --git a/fn/vendor/github.com/docker/docker/api/server/httputils/write_log_stream.go b/fn/vendor/github.com/docker/docker/api/server/httputils/write_log_stream.go index 5793a99ff..891e5f02b 100644 --- a/fn/vendor/github.com/docker/docker/api/server/httputils/write_log_stream.go +++ b/fn/vendor/github.com/docker/docker/api/server/httputils/write_log_stream.go @@ -3,6 +3,7 @@ package httputils import ( "fmt" "io" + "net/url" "sort" "strings" @@ -85,6 +86,7 @@ func (s byKey) Swap(i, j int) { func stringAttrs(a backend.LogAttributes) string { var ss byKey for k, v := range a { + k, v := url.QueryEscape(k), url.QueryEscape(v) ss = append(ss, k+"="+v) } sort.Sort(ss) diff --git a/fn/vendor/github.com/docker/docker/api/server/middleware/debug.go b/fn/vendor/github.com/docker/docker/api/server/middleware/debug.go index 8c8567669..a9a94e7f3 100644 --- a/fn/vendor/github.com/docker/docker/api/server/middleware/debug.go +++ b/fn/vendor/github.com/docker/docker/api/server/middleware/debug.go @@ -41,7 +41,7 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri var postForm map[string]interface{} if err := json.Unmarshal(b, &postForm); err == nil { - maskSecretKeys(postForm) + maskSecretKeys(postForm, r.RequestURI) formStr, errMarshal := json.Marshal(postForm) if errMarshal == nil { logrus.Debugf("form data: %s", string(formStr)) @@ -54,23 +54,41 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri } } -func maskSecretKeys(inp interface{}) { +func maskSecretKeys(inp interface{}, path string) { + // Remove any query string from the path + idx := strings.Index(path, "?") + if idx != -1 { + path = path[:idx] + } + // Remove trailing / characters + path = strings.TrimRight(path, "/") + if arr, ok := inp.([]interface{}); ok { for _, f := range arr { - maskSecretKeys(f) + maskSecretKeys(f, path) } return } + if form, ok := inp.(map[string]interface{}); ok { loop0: for k, v := range form { - for _, m := range []string{"password", "secret", "jointoken", "unlockkey"} { + for _, m := range []string{"password", "secret", "jointoken", "unlockkey", "signingcakey"} { if strings.EqualFold(m, k) { form[k] = "*****" continue loop0 } } - maskSecretKeys(v) + maskSecretKeys(v, path) + } + + // Route-specific redactions + if strings.HasSuffix(path, "/secrets/create") { + for k := range form { + if k == "Data" { + form[k] = "*****" + } + } } } } diff --git a/fn/vendor/github.com/docker/docker/api/server/middleware/debug_test.go b/fn/vendor/github.com/docker/docker/api/server/middleware/debug_test.go new file mode 100644 index 000000000..87ecafd14 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/server/middleware/debug_test.go @@ -0,0 +1,58 @@ +package middleware + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMaskSecretKeys(t *testing.T) { + tests := []struct { + path string + input map[string]interface{} + expected map[string]interface{} + }{ + { + path: "/v1.30/secrets/create", + input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}}, + expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}}, + }, + { + path: "/v1.30/secrets/create//", + input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}}, + expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}}, + }, + + { + path: "/secrets/create?key=val", + input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}}, + expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}}, + }, + { + path: "/v1.30/some/other/path", + input: map[string]interface{}{ + "password": "pass", + "other": map[string]interface{}{ + "secret": "secret", + "jointoken": "jointoken", + "unlockkey": "unlockkey", + "signingcakey": "signingcakey", + }, + }, + expected: map[string]interface{}{ + "password": "*****", + "other": map[string]interface{}{ + "secret": "*****", + "jointoken": "*****", + "unlockkey": "*****", + "signingcakey": "*****", + }, + }, + }, + } + + for _, testcase := range tests { + maskSecretKeys(testcase.input, testcase.path) + assert.Equal(t, testcase.expected, testcase.input) + } +} diff --git a/fn/vendor/github.com/docker/docker/api/server/router/build/backend.go b/fn/vendor/github.com/docker/docker/api/server/router/build/backend.go index 751999e63..defffd3ef 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/build/backend.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/build/backend.go @@ -1,8 +1,6 @@ package build import ( - "io" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "golang.org/x/net/context" @@ -10,11 +8,14 @@ import ( // Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. type Backend interface { - // BuildFromContext builds a Docker image referenced by an imageID string. - // - // Note: Tagging an image should not be done by a Builder, it should instead be done - // by the caller. - // + // Build a Docker image returning the id of the image // TODO: make this return a reference instead of string - BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) + Build(context.Context, backend.BuildConfig) (string, error) + + // Prune build cache + PruneCache(context.Context) (*types.BuildCachePruneReport, error) +} + +type experimentalProvider interface { + HasExperimental() bool } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/build/build.go b/fn/vendor/github.com/docker/docker/api/server/router/build/build.go index 959498e0f..78f5ae2f2 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/build/build.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/build/build.go @@ -5,14 +5,13 @@ import "github.com/docker/docker/api/server/router" // buildRouter is a router to talk with the build controller type buildRouter struct { backend Backend + daemon experimentalProvider routes []router.Route } // NewRouter initializes a new build router -func NewRouter(b Backend) router.Router { - r := &buildRouter{ - backend: b, - } +func NewRouter(b Backend, d experimentalProvider) router.Router { + r := &buildRouter{backend: b, daemon: d} r.initRoutes() return r } @@ -24,6 +23,7 @@ func (r *buildRouter) Routes() []router.Route { func (r *buildRouter) initRoutes() { r.routes = []router.Route{ - router.Cancellable(router.NewPostRoute("/build", r.postBuild)), + router.NewPostRoute("/build", r.postBuild, router.WithCancel), + router.NewPostRoute("/build/prune", r.postPrune, router.WithCancel), } } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/build/build_routes.go b/fn/vendor/github.com/docker/docker/api/server/router/build/build_routes.go index ba86d80fb..baa1da303 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/build/build_routes.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/build/build_routes.go @@ -13,6 +13,7 @@ import ( "sync" "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" @@ -21,7 +22,8 @@ import ( "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/go-units" + units "github.com/docker/go-units" + "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -57,6 +59,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui options.SecurityOpt = r.Form["securityopt"] options.Squash = httputils.BoolValue(r, "squash") options.Target = r.FormValue("target") + options.RemoteContext = r.FormValue("remote") if r.Form.Get("shmsize") != "" { shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) @@ -74,7 +77,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui } if runtime.GOOS != "windows" && options.SecurityOpt != nil { - return nil, fmt.Errorf("the daemon on this platform does not support --security-opt to build") + return nil, fmt.Errorf("The daemon on this platform does not support setting security options on build") } var buildUlimits = []*units.Ulimit{} @@ -86,9 +89,6 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui options.Ulimits = buildUlimits } - var buildArgs = map[string]*string{} - buildArgsJSON := r.FormValue("buildargs") - // Note that there are two ways a --build-arg might appear in the // json of the query param: // "foo":"bar" @@ -101,34 +101,128 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui // the fact they mentioned it, we need to pass that along to the builder // so that it can print a warning about "foo" being unused if there is // no "ARG foo" in the Dockerfile. + buildArgsJSON := r.FormValue("buildargs") if buildArgsJSON != "" { + var buildArgs = map[string]*string{} if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil { return nil, err } options.BuildArgs = buildArgs } - var labels = map[string]string{} labelsJSON := r.FormValue("labels") if labelsJSON != "" { + var labels = map[string]string{} if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil { return nil, err } options.Labels = labels } - var cacheFrom = []string{} cacheFromJSON := r.FormValue("cachefrom") if cacheFromJSON != "" { + var cacheFrom = []string{} if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil { return nil, err } options.CacheFrom = cacheFrom } + options.SessionID = r.FormValue("session") return options, nil } +func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + report, err := br.backend.PruneCache(ctx) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, report) +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + notVerboseBuffer = bytes.NewBuffer(nil) + version = httputils.VersionFromContext(ctx) + ) + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(streamformatter.FormatError(err)) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + buildOptions.AuthConfigs = getAuthConfigs(r.Header) + + if buildOptions.Squash && !br.daemon.HasExperimental() { + return apierrors.NewBadRequestError( + errors.New("squash is only supported with experimental mode")) + } + + out := io.Writer(output) + if buildOptions.SuppressOutput { + out = notVerboseBuffer + } + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := streamformatter.NewJSONProgressOutput(out, true) + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", buildOptions.RemoteContext) + } + + wantAux := versions.GreaterThanOrEqualTo(version, "1.30") + + imgID, err := br.backend.Build(ctx, backend.BuildConfig{ + Source: r.Body, + Options: buildOptions, + ProgressWriter: buildProgressWriter(out, wantAux, createProgressReader), + }) + if err != nil { + return errf(err) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID) + } + return nil +} + +func getAuthConfigs(header http.Header) map[string]types.AuthConfig { + authConfigs := map[string]types.AuthConfig{} + authConfigsEncoded := header.Get("X-Registry-Config") + + if authConfigsEncoded == "" { + return authConfigs + } + + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + // Pulling an image does not error when no auth is provided so to remain + // consistent with the existing api decode errors are ignored + json.NewDecoder(authConfigsJSON).Decode(&authConfigs) + return authConfigs +} + type syncWriter struct { w io.Writer mu sync.Mutex @@ -141,87 +235,19 @@ func (s *syncWriter) Write(b []byte) (count int, err error) { return } -func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var ( - authConfigs = map[string]types.AuthConfig{} - authConfigsEncoded = r.Header.Get("X-Registry-Config") - notVerboseBuffer = bytes.NewBuffer(nil) - ) - - if authConfigsEncoded != "" { - authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) - if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting - // to be empty. - } - } - - w.Header().Set("Content-Type", "application/json") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - sf := streamformatter.NewJSONStreamFormatter() - errf := func(err error) error { - if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { - output.Write(notVerboseBuffer.Bytes()) - } - // Do not write the error in the http output if it's still empty. - // This prevents from writing a 200(OK) when there is an internal error. - if !output.Flushed() { - return err - } - _, err = w.Write(sf.FormatError(err)) - if err != nil { - logrus.Warnf("could not write error response: %v", err) - } - return nil - } - - buildOptions, err := newImageBuildOptions(ctx, r) - if err != nil { - return errf(err) - } - buildOptions.AuthConfigs = authConfigs - - remoteURL := r.FormValue("remote") - - // Currently, only used if context is from a remote url. - // Look at code in DetectContextFromRemoteURL for more information. - createProgressReader := func(in io.ReadCloser) io.ReadCloser { - progressOutput := sf.NewProgressOutput(output, true) - if buildOptions.SuppressOutput { - progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) - } - return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) - } - - out := io.Writer(output) - if buildOptions.SuppressOutput { - out = notVerboseBuffer - } +func buildProgressWriter(out io.Writer, wantAux bool, createProgressReader func(io.ReadCloser) io.ReadCloser) backend.ProgressWriter { out = &syncWriter{w: out} - stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf} - stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf} - pg := backend.ProgressWriter{ + var aux *streamformatter.AuxFormatter + if wantAux { + aux = &streamformatter.AuxFormatter{Writer: out} + } + + return backend.ProgressWriter{ Output: out, - StdoutFormatter: stdout, - StderrFormatter: stderr, + StdoutFormatter: streamformatter.NewStdoutWriter(out), + StderrFormatter: streamformatter.NewStderrWriter(out), + AuxFormatter: aux, ProgressReaderFunc: createProgressReader, } - - imgID, err := br.backend.BuildFromContext(ctx, r.Body, remoteURL, buildOptions, pg) - if err != nil { - return errf(err) - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if buildOptions.SuppressOutput { - stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} - fmt.Fprintf(stdout, "%s\n", string(imgID)) - } - - return nil } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go b/fn/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go index c1e93926f..b16971823 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go @@ -29,8 +29,8 @@ func (r *checkpointRouter) Routes() []router.Route { func (r *checkpointRouter) initRoutes() { r.routes = []router.Route{ - router.Experimental(router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints)), - router.Experimental(router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint)), - router.Experimental(router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint)), + router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints, router.Experimental), + router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint, router.Experimental), + router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint, router.Experimental), } } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/container/backend.go b/fn/vendor/github.com/docker/docker/api/server/router/container/backend.go index ce0ee8c9d..d51ed8177 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/container/backend.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/container/backend.go @@ -2,7 +2,6 @@ package container import ( "io" - "time" "golang.org/x/net/context" @@ -10,6 +9,7 @@ import ( "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" + containerpkg "github.com/docker/docker/container" "github.com/docker/docker/pkg/archive" ) @@ -27,7 +27,7 @@ type copyBackend interface { ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) ContainerCopy(name string, res string) (io.ReadCloser, error) ContainerExport(name string, out io.Writer) error - ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error + ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) } @@ -44,7 +44,7 @@ type stateBackend interface { ContainerStop(name string, seconds *int) error ContainerUnpause(name string) error ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) - ContainerWait(name string, timeout time.Duration) (int, error) + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) } // monitorBackend includes functions to implement to provide containers monitoring functionality. @@ -65,7 +65,7 @@ type attachBackend interface { // systemBackend includes functions to implement to provide system wide containers functionality type systemBackend interface { - ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) } // Backend is all the methods that need to be implemented to provide container specific functionality. diff --git a/fn/vendor/github.com/docker/docker/api/server/router/container/container.go b/fn/vendor/github.com/docker/docker/api/server/router/container/container.go index bbed7e994..24c3224ee 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/container/container.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/container/container.go @@ -46,8 +46,8 @@ func (r *containerRouter) initRoutes() { router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), - router.Cancellable(router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs)), - router.Cancellable(router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats)), + router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs, router.WithCancel), + router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats, router.WithCancel), router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), @@ -59,7 +59,7 @@ func (r *containerRouter) initRoutes() { router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), - router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), + router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait, router.WithCancel), router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12 @@ -68,7 +68,7 @@ func (r *containerRouter) initRoutes() { router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), - router.NewPostRoute("/containers/prune", r.postContainersPrune), + router.NewPostRoute("/containers/prune", r.postContainersPrune, router.WithCancel), // PUT router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), // DELETE diff --git a/fn/vendor/github.com/docker/docker/api/server/router/container/container_routes.go b/fn/vendor/github.com/docker/docker/api/server/router/container/container_routes.go index bd151ab27..96b1010e1 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/container/container_routes.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/container/container_routes.go @@ -7,7 +7,6 @@ import ( "net/http" "strconv" "syscall" - "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/api" @@ -17,6 +16,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/versions" + containerpkg "github.com/docker/docker/container" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/signal" "golang.org/x/net/context" @@ -102,7 +102,7 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response } // doesn't matter what version the client is on, we're using this internally only - // also do we need size? i'm thinkin no we don't + // also do we need size? i'm thinking no we don't raw, err := s.backend.ContainerInspect(containerName, false, api.DefaultVersion) if err != nil { return err @@ -284,13 +284,48 @@ func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.Resp } func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) + // Behavior changed in version 1.30 to handle wait condition and to + // return headers immediately. + version := httputils.VersionFromContext(ctx) + legacyBehavior := versions.LessThan(version, "1.30") + + // The wait condition defaults to "not-running". + waitCondition := containerpkg.WaitConditionNotRunning + if !legacyBehavior { + if err := httputils.ParseForm(r); err != nil { + return err + } + switch container.WaitCondition(r.Form.Get("condition")) { + case container.WaitConditionNextExit: + waitCondition = containerpkg.WaitConditionNextExit + case container.WaitConditionRemoved: + waitCondition = containerpkg.WaitConditionRemoved + } + } + + // Note: the context should get canceled if the client closes the + // connection since this handler has been wrapped by the + // router.WithCancel() wrapper. + waitC, err := s.backend.ContainerWait(ctx, vars["name"], waitCondition) if err != nil { return err } - return httputils.WriteJSON(w, http.StatusOK, &container.ContainerWaitOKBody{ - StatusCode: int64(status), + w.Header().Set("Content-Type", "application/json") + + if !legacyBehavior { + // Write response header immediately. + w.WriteHeader(http.StatusOK) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + } + + // Block on the result of the wait operation. + status := <-waitC + + return json.NewEncoder(w).Encode(&container.ContainerWaitOKBody{ + StatusCode: int64(status.ExitCode()), }) } @@ -565,7 +600,7 @@ func (s *containerRouter) postContainersPrune(ctx context.Context, w http.Respon return err } - pruneReport, err := s.backend.ContainersPrune(pruneFilters) + pruneReport, err := s.backend.ContainersPrune(ctx, pruneFilters) if err != nil { return err } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/container/copy.go b/fn/vendor/github.com/docker/docker/api/server/router/container/copy.go index 72d8c4a67..5cfe8d7ba 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/container/copy.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/container/copy.go @@ -112,5 +112,7 @@ func (s *containerRouter) putContainersArchive(ctx context.Context, w http.Respo } noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") - return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) + copyUIDGID := httputils.BoolValue(r, "copyUIDGID") + + return s.backend.ContainerExtractToDir(v.Name, v.Path, copyUIDGID, noOverwriteDirNonDir, r.Body) } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/distribution/backend.go b/fn/vendor/github.com/docker/docker/api/server/router/distribution/backend.go new file mode 100644 index 000000000..fc3a80e59 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/server/router/distribution/backend.go @@ -0,0 +1,14 @@ +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) +} diff --git a/fn/vendor/github.com/docker/docker/api/server/router/distribution/distribution.go b/fn/vendor/github.com/docker/docker/api/server/router/distribution/distribution.go new file mode 100644 index 000000000..c1fb7bc1e --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/server/router/distribution/distribution.go @@ -0,0 +1,31 @@ +package distribution + +import "github.com/docker/docker/api/server/router" + +// distributionRouter is a router to talk with the registry +type distributionRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new distribution router +func NewRouter(backend Backend) router.Router { + r := &distributionRouter{ + backend: backend, + } + r.initRoutes() + return r +} + +// Routes returns the available routes +func (r *distributionRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the distribution router +func (r *distributionRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/distribution/{name:.*}/json", r.getDistributionInfo), + } +} diff --git a/fn/vendor/github.com/docker/docker/api/server/router/distribution/distribution_routes.go b/fn/vendor/github.com/docker/docker/api/server/router/distribution/distribution_routes.go new file mode 100644 index 000000000..cc9e66a16 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/server/router/distribution/distribution_routes.go @@ -0,0 +1,138 @@ +package distribution + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strings" + + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + + var ( + config = &types.AuthConfig{} + authEncoded = r.Header.Get("X-Registry-Auth") + distributionInspect registrytypes.DistributionInspect + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + + image := vars["name"] + + ref, err := reference.ParseAnyReference(image) + if err != nil { + return err + } + namedRef, ok := ref.(reference.Named) + if !ok { + if _, ok := ref.(reference.Digested); ok { + // full image ID + return errors.Errorf("no manifest found for full image ID") + } + return errors.Errorf("unknown image reference format: %s", image) + } + + distrepo, _, err := s.backend.GetRepository(ctx, namedRef, config) + if err != nil { + return err + } + blobsrvc := distrepo.Blobs(ctx) + + if canonicalRef, ok := namedRef.(reference.Canonical); !ok { + namedRef = reference.TagNameOnly(namedRef) + + taggedRef, ok := namedRef.(reference.NamedTagged) + if !ok { + return errors.Errorf("image reference not tagged: %s", image) + } + + descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag()) + if err != nil { + return err + } + distributionInspect.Descriptor = v1.Descriptor{ + MediaType: descriptor.MediaType, + Digest: descriptor.Digest, + Size: descriptor.Size, + } + } else { + // TODO(nishanttotla): Once manifests can be looked up as a blob, the + // descriptor should be set using blobsrvc.Stat(ctx, canonicalRef.Digest()) + // instead of having to manually fill in the fields + distributionInspect.Descriptor.Digest = canonicalRef.Digest() + } + + // we have a digest, so we can retrieve the manifest + mnfstsrvc, err := distrepo.Manifests(ctx) + if err != nil { + return err + } + mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest) + if err != nil { + return err + } + + mediaType, payload, err := mnfst.Payload() + if err != nil { + return err + } + // update MediaType because registry might return something incorrect + distributionInspect.Descriptor.MediaType = mediaType + if distributionInspect.Descriptor.Size == 0 { + distributionInspect.Descriptor.Size = int64(len(payload)) + } + + // retrieve platform information depending on the type of manifest + switch mnfstObj := mnfst.(type) { + case *manifestlist.DeserializedManifestList: + for _, m := range mnfstObj.Manifests { + distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{ + Architecture: m.Platform.Architecture, + OS: m.Platform.OS, + OSVersion: m.Platform.OSVersion, + OSFeatures: m.Platform.OSFeatures, + Variant: m.Platform.Variant, + }) + } + case *schema2.DeserializedManifest: + configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest) + var platform v1.Platform + if err == nil { + err := json.Unmarshal(configJSON, &platform) + if err == nil && (platform.OS != "" || platform.Architecture != "") { + distributionInspect.Platforms = append(distributionInspect.Platforms, platform) + } + } + case *schema1.SignedManifest: + platform := v1.Platform{ + Architecture: mnfstObj.Architecture, + OS: "linux", + } + distributionInspect.Platforms = append(distributionInspect.Platforms, platform) + } + + return httputils.WriteJSON(w, http.StatusOK, distributionInspect) +} diff --git a/fn/vendor/github.com/docker/docker/api/server/router/experimental.go b/fn/vendor/github.com/docker/docker/api/server/router/experimental.go index 51385c255..ac31f0487 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/experimental.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/experimental.go @@ -11,7 +11,7 @@ import ( ) var ( - errExperimentalFeature = errors.New("This experimental feature is disabled by default. Start the Docker daemon with --experimental in order to enable it.") + errExperimentalFeature = errors.New("This experimental feature is disabled by default. Start the Docker daemon in experimental mode in order to enable it.") ) // ExperimentalRoute defines an experimental API route that can be enabled or disabled. diff --git a/fn/vendor/github.com/docker/docker/api/server/router/image/backend.go b/fn/vendor/github.com/docker/docker/api/server/router/image/backend.go index e4bac6f13..9a588a71a 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/image/backend.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/image/backend.go @@ -30,17 +30,17 @@ type imageBackend interface { Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) LookupImage(name string) (*types.ImageInspect, error) TagImage(imageName, repository, tag string) error - ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) + ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) } type importExportBackend interface { LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error - ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error + ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error ExportImage(names []string, outStream io.Writer) error } type registryBackend interface { - PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/image/image.go b/fn/vendor/github.com/docker/docker/api/server/router/image/image.go index 54a4d5148..6c233d900 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/image/image.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/image/image.go @@ -40,10 +40,10 @@ func (r *imageRouter) initRoutes() { // POST router.NewPostRoute("/commit", r.postCommit), router.NewPostRoute("/images/load", r.postImagesLoad), - router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)), - router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)), + router.NewPostRoute("/images/create", r.postImagesCreate, router.WithCancel), + router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush, router.WithCancel), router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), - router.NewPostRoute("/images/prune", r.postImagesPrune), + router.NewPostRoute("/images/prune", r.postImagesPrune, router.WithCancel), // DELETE router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/image/image_routes.go b/fn/vendor/github.com/docker/docker/api/server/router/image/image_routes.go index 7dbcf5e21..9b99a585f 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/image/image_routes.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/image/image_routes.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "runtime" "strconv" "strings" @@ -17,6 +18,7 @@ import ( "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" "golang.org/x/net/context" ) @@ -85,6 +87,41 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite ) defer output.Close() + // TODO @jhowardmsft LCOW Support: Eventually we will need an API change + // so that platform comes from (for example) r.Form.Get("platform"). For + // the initial implementation, we assume that the platform is the + // runtime OS of the host. It will also need a validation function such + // as below which should be called after getting it from the API. + // + // Ensures the requested platform is valid and normalized + //func validatePlatform(req string) (string, error) { + // req = strings.ToLower(req) + // if req == "" { + // req = runtime.GOOS // default to host platform + // } + // valid := []string{runtime.GOOS} + // + // if system.LCOWSupported() { + // valid = append(valid, "linux") + // } + // + // for _, item := range valid { + // if req == item { + // return req, nil + // } + // } + // return "", fmt.Errorf("invalid platform requested: %s", req) + //} + // + // And in the call-site: + // if platform, err = validatePlatform(platform); err != nil { + // return err + // } + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + w.Header().Set("Content-Type", "application/json") if image != "" { //pull @@ -106,20 +143,19 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite } } - err = s.backend.PullImage(ctx, image, tag, metaHeaders, authConfig, output) + err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output) } else { //import src := r.Form.Get("fromSrc") // 'err' MUST NOT be defined within this block, we need any error // generated from the download to be available to the output // stream processing below - err = s.backend.ImportImage(src, repo, tag, message, r.Body, output, r.Form["changes"]) + err = s.backend.ImportImage(src, repo, platform, tag, message, r.Body, output, r.Form["changes"]) } if err != nil { if !output.Flushed() { return err } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) + output.Write(streamformatter.FormatError(err)) } return nil @@ -164,8 +200,7 @@ func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, if !output.Flushed() { return err } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) + output.Write(streamformatter.FormatError(err)) } return nil } @@ -190,8 +225,7 @@ func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r if !output.Flushed() { return err } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) + output.Write(streamformatter.FormatError(err)) } return nil } @@ -207,7 +241,7 @@ func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, output := ioutils.NewWriteFlusher(w) defer output.Close() if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { - output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + output.Write(streamformatter.FormatError(err)) } return nil } @@ -336,7 +370,7 @@ func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter return err } - pruneReport, err := s.backend.ImagesPrune(pruneFilters) + pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters) if err != nil { return err } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/local.go b/fn/vendor/github.com/docker/docker/api/server/router/local.go index 7cb2a5a2f..ba70f3413 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/local.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/local.go @@ -7,6 +7,10 @@ import ( "golang.org/x/net/context" ) +// RouteWrapper wraps a route with extra functionality. +// It is passed in when creating a new route. +type RouteWrapper func(r Route) Route + // localRoute defines an individual API route to connect // with the docker daemon. It implements Route. type localRoute struct { @@ -31,38 +35,42 @@ func (l localRoute) Path() string { } // NewRoute initializes a new local route for the router. -func NewRoute(method, path string, handler httputils.APIFunc) Route { - return localRoute{method, path, handler} +func NewRoute(method, path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + var r Route = localRoute{method, path, handler} + for _, o := range opts { + r = o(r) + } + return r } // NewGetRoute initializes a new route with the http method GET. -func NewGetRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("GET", path, handler) +func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("GET", path, handler, opts...) } // NewPostRoute initializes a new route with the http method POST. -func NewPostRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("POST", path, handler) +func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("POST", path, handler, opts...) } // NewPutRoute initializes a new route with the http method PUT. -func NewPutRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("PUT", path, handler) +func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("PUT", path, handler, opts...) } // NewDeleteRoute initializes a new route with the http method DELETE. -func NewDeleteRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("DELETE", path, handler) +func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("DELETE", path, handler, opts...) } // NewOptionsRoute initializes a new route with the http method OPTIONS. -func NewOptionsRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("OPTIONS", path, handler) +func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("OPTIONS", path, handler, opts...) } // NewHeadRoute initializes a new route with the http method HEAD. -func NewHeadRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("HEAD", path, handler) +func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("HEAD", path, handler, opts...) } func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { @@ -85,9 +93,9 @@ func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { } } -// Cancellable makes new route which embeds http.CloseNotifier feature to +// WithCancel makes new route which embeds http.CloseNotifier feature to // context.Context of handler. -func Cancellable(r Route) Route { +func WithCancel(r Route) Route { return localRoute{ method: r.Method(), path: r.Path(), diff --git a/fn/vendor/github.com/docker/docker/api/server/router/network/backend.go b/fn/vendor/github.com/docker/docker/api/server/router/network/backend.go index 000ace6d6..a32a0b9c0 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/network/backend.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/network/backend.go @@ -1,6 +1,8 @@ package network import ( + "golang.org/x/net/context" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" @@ -16,5 +18,5 @@ type Backend interface { ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error DeleteNetwork(name string) error - NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) + NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/network/network.go b/fn/vendor/github.com/docker/docker/api/server/router/network/network.go index bc613a0ed..eaf52aa2a 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/network/network.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/network/network.go @@ -37,7 +37,7 @@ func (r *networkRouter) initRoutes() { router.NewPostRoute("/networks/create", r.postNetworkCreate), router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect), router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect), - router.NewPostRoute("/networks/prune", r.postNetworksPrune), + router.NewPostRoute("/networks/prune", r.postNetworksPrune, router.WithCancel), // DELETE router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork), } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/network/network_routes.go b/fn/vendor/github.com/docker/docker/api/server/router/network/network_routes.go index e23c463ae..6f2041e35 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/network/network_routes.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/network/network_routes.go @@ -98,6 +98,14 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r return errors.NewBadRequestError(err) } } + scope := r.URL.Query().Get("scope") + + isMatchingScope := func(scope, term string) bool { + if term != "" { + return scope == term + } + return true + } // In case multiple networks have duplicate names, return error. // TODO (yongtang): should we wrap with version here for backward compatibility? @@ -112,15 +120,15 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r nw := n.backend.GetNetworks() for _, network := range nw { - if network.ID() == term { + if network.ID() == term && isMatchingScope(network.Info().Scope(), scope) { return httputils.WriteJSON(w, http.StatusOK, *n.buildDetailedNetworkResources(network, verbose)) } - if network.Name() == term { + if network.Name() == term && isMatchingScope(network.Info().Scope(), scope) { // No need to check the ID collision here as we are still in // local scope and the network ID is unique in this scope. listByFullName[network.ID()] = *n.buildDetailedNetworkResources(network, verbose) } - if strings.HasPrefix(network.ID(), term) { + if strings.HasPrefix(network.ID(), term) && isMatchingScope(network.Info().Scope(), scope) { // No need to check the ID collision here as we are still in // local scope and the network ID is unique in this scope. listByPartialID[network.ID()] = *n.buildDetailedNetworkResources(network, verbose) @@ -129,10 +137,10 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r nr, _ := n.cluster.GetNetworks() for _, network := range nr { - if network.ID == term { + if network.ID == term && isMatchingScope(network.Scope, scope) { return httputils.WriteJSON(w, http.StatusOK, network) } - if network.Name == term { + if network.Name == term && isMatchingScope(network.Scope, scope) { // Check the ID collision as we are in swarm scope here, and // the map (of the listByFullName) may have already had a // network with the same ID (from local scope previously) @@ -140,7 +148,7 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r listByFullName[network.ID] = network } } - if strings.HasPrefix(network.ID, term) { + if strings.HasPrefix(network.ID, term) && isMatchingScope(network.Scope, scope) { // Check the ID collision as we are in swarm scope here, and // the map (of the listByPartialID) may have already had a // network with the same ID (from local scope previously) @@ -283,13 +291,6 @@ func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.Netwo r.ID = nw.ID() r.Created = info.Created() r.Scope = info.Scope() - if n.cluster.IsManager() { - if _, err := n.cluster.GetNetwork(nw.ID()); err == nil { - r.Scope = "swarm" - } - } else if info.Dynamic() { - r.Scope = "swarm" - } r.Driver = nw.Type() r.EnableIPv6 = info.IPv6Enabled() r.Internal = info.Internal() @@ -299,6 +300,11 @@ func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.Netwo r.Containers = make(map[string]types.EndpointResource) buildIpamResources(r, info) r.Labels = info.Labels() + r.ConfigOnly = info.ConfigOnly() + + if cn := info.ConfigFrom(); cn != "" { + r.ConfigFrom = network.ConfigReference{Network: cn} + } peers := info.Peers() if len(peers) != 0 { @@ -412,6 +418,9 @@ func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) if !hasIpv6Conf { for _, ip6Info := range ipv6Info { + if ip6Info.IPAMData.Pool == nil { + continue + } iData := network.IPAMConfig{} iData.Subnet = ip6Info.IPAMData.Pool.String() iData.Gateway = ip6Info.IPAMData.Gateway.String() @@ -455,7 +464,7 @@ func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWr return err } - pruneReport, err := n.backend.NetworksPrune(pruneFilters) + pruneReport, err := n.backend.NetworksPrune(ctx, pruneFilters) if err != nil { return err } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/plugin/backend.go b/fn/vendor/github.com/docker/docker/api/server/router/plugin/backend.go index a5f3c9790..1b60501fc 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/plugin/backend.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/plugin/backend.go @@ -7,6 +7,7 @@ import ( "github.com/docker/distribution/reference" enginetypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/plugin" "golang.org/x/net/context" ) @@ -19,7 +20,7 @@ type Backend interface { Remove(name string, config *enginetypes.PluginRmConfig) error Set(name string, args []string) error Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) - Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error diff --git a/fn/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go b/fn/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go index e4ea9e23b..22819e27a 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go @@ -30,9 +30,9 @@ func (r *pluginRouter) initRoutes() { router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), - router.Cancellable(router.NewPostRoute("/plugins/pull", r.pullPlugin)), - router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin)), - router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin)), + router.NewPostRoute("/plugins/pull", r.pullPlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin, router.WithCancel), router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), router.NewPostRoute("/plugins/create", r.createPlugin), } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go b/fn/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go index 0d743a4a9..79e3cf5de 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go @@ -121,7 +121,7 @@ func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter if !output.Flushed() { return err } - output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + output.Write(streamformatter.FormatError(err)) } return nil @@ -160,7 +160,7 @@ func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r if !output.Flushed() { return err } - output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + output.Write(streamformatter.FormatError(err)) } return nil @@ -268,7 +268,7 @@ func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r if !output.Flushed() { return err } - output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + output.Write(streamformatter.FormatError(err)) } return nil } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/session/backend.go b/fn/vendor/github.com/docker/docker/api/server/router/session/backend.go new file mode 100644 index 000000000..ad4cc1bc5 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/server/router/session/backend.go @@ -0,0 +1,12 @@ +package session + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// Backend abstracts an session receiver from an http request. +type Backend interface { + HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error +} diff --git a/fn/vendor/github.com/docker/docker/api/server/router/session/session.go b/fn/vendor/github.com/docker/docker/api/server/router/session/session.go new file mode 100644 index 000000000..977a9c42c --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/server/router/session/session.go @@ -0,0 +1,29 @@ +package session + +import "github.com/docker/docker/api/server/router" + +// sessionRouter is a router to talk with the session controller +type sessionRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new session router +func NewRouter(b Backend) router.Router { + r := &sessionRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the session controller +func (r *sessionRouter) Routes() []router.Route { + return r.routes +} + +func (r *sessionRouter) initRoutes() { + r.routes = []router.Route{ + router.Experimental(router.NewPostRoute("/session", r.startSession)), + } +} diff --git a/fn/vendor/github.com/docker/docker/api/server/router/session/session_routes.go b/fn/vendor/github.com/docker/docker/api/server/router/session/session_routes.go new file mode 100644 index 000000000..ef9753c6e --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/server/router/session/session_routes.go @@ -0,0 +1,16 @@ +package session + +import ( + "net/http" + + apierrors "github.com/docker/docker/api/errors" + "golang.org/x/net/context" +) + +func (sr *sessionRouter) startSession(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := sr.backend.HandleHTTPRequest(ctx, w, r) + if err != nil { + return apierrors.NewBadRequestError(err) + } + return nil +} diff --git a/fn/vendor/github.com/docker/docker/api/server/router/swarm/backend.go b/fn/vendor/github.com/docker/docker/api/server/router/swarm/backend.go index 3a5da97d2..3b7933d7b 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/swarm/backend.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/swarm/backend.go @@ -16,21 +16,32 @@ type Backend interface { Update(uint64, types.Spec, types.UpdateFlags) error GetUnlockKey() (string, error) UnlockSwarm(req types.UnlockRequest) error + GetServices(basictypes.ServiceListOptions) ([]types.Service, error) GetService(idOrName string, insertDefaults bool) (types.Service, error) - CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error) - UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions) (*basictypes.ServiceUpdateResponse, error) + CreateService(types.ServiceSpec, string, bool) (*basictypes.ServiceCreateResponse, error) + UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions, bool) (*basictypes.ServiceUpdateResponse, error) RemoveService(string) error + ServiceLogs(context.Context, *backend.LogSelector, *basictypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) + GetNodes(basictypes.NodeListOptions) ([]types.Node, error) GetNode(string) (types.Node, error) UpdateNode(string, uint64, types.NodeSpec) error RemoveNode(string, bool) error + GetTasks(basictypes.TaskListOptions) ([]types.Task, error) GetTask(string) (types.Task, error) + GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error) CreateSecret(s types.SecretSpec) (string, error) RemoveSecret(idOrName string) error GetSecret(id string) (types.Secret, error) UpdateSecret(idOrName string, version uint64, spec types.SecretSpec) error + + GetConfigs(opts basictypes.ConfigListOptions) ([]types.Config, error) + CreateConfig(s types.ConfigSpec) (string, error) + RemoveConfig(id string) error + GetConfig(id string) (types.Config, error) + UpdateConfig(idOrName string, version uint64, spec types.ConfigSpec) error } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go b/fn/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go index cda2eb7bd..2529250b0 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go @@ -31,23 +31,33 @@ func (sr *swarmRouter) initRoutes() { router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey), router.NewPostRoute("/swarm/update", sr.updateCluster), router.NewPostRoute("/swarm/unlock", sr.unlockCluster), + router.NewGetRoute("/services", sr.getServices), router.NewGetRoute("/services/{id}", sr.getService), router.NewPostRoute("/services/create", sr.createService), router.NewPostRoute("/services/{id}/update", sr.updateService), router.NewDeleteRoute("/services/{id}", sr.removeService), - router.Cancellable(router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs)), + router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs, router.WithCancel), + router.NewGetRoute("/nodes", sr.getNodes), router.NewGetRoute("/nodes/{id}", sr.getNode), router.NewDeleteRoute("/nodes/{id}", sr.removeNode), router.NewPostRoute("/nodes/{id}/update", sr.updateNode), + router.NewGetRoute("/tasks", sr.getTasks), router.NewGetRoute("/tasks/{id}", sr.getTask), - router.Cancellable(router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs)), + router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs, router.WithCancel), + router.NewGetRoute("/secrets", sr.getSecrets), router.NewPostRoute("/secrets/create", sr.createSecret), router.NewDeleteRoute("/secrets/{id}", sr.removeSecret), router.NewGetRoute("/secrets/{id}", sr.getSecret), router.NewPostRoute("/secrets/{id}/update", sr.updateSecret), + + router.NewGetRoute("/configs", sr.getConfigs), + router.NewPostRoute("/configs/create", sr.createConfig), + router.NewDeleteRoute("/configs/{id}", sr.removeConfig), + router.NewGetRoute("/configs/{id}", sr.getConfig), + router.NewPostRoute("/configs/{id}/update", sr.updateConfig), } } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go b/fn/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go index 4c60b6b6e..91461da76 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go @@ -13,6 +13,7 @@ import ( "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/filters" types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" "golang.org/x/net/context" ) @@ -178,8 +179,13 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, // Get returns "" if the header does not exist encodedAuth := r.Header.Get("X-Registry-Auth") + cliVersion := r.Header.Get("version") + queryRegistry := false + if cliVersion != "" && versions.LessThan(cliVersion, "1.30") { + queryRegistry = true + } - resp, err := sr.backend.CreateService(service, encodedAuth) + resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry) if err != nil { logrus.Errorf("Error creating service %s: %v", service.Name, err) return err @@ -207,8 +213,13 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth") flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom") flags.Rollback = r.URL.Query().Get("rollback") + cliVersion := r.Header.Get("version") + queryRegistry := false + if cliVersion != "" && versions.LessThan(cliVersion, "1.30") { + queryRegistry = true + } - resp, err := sr.backend.UpdateService(vars["id"], version, service, flags) + resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry) if err != nil { logrus.Errorf("Error updating service %s: %v", vars["id"], err) return err @@ -408,3 +419,74 @@ func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, return nil } + +func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + configs, err := sr.backend.GetConfigs(basictypes.ConfigListOptions{Filters: filters}) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, configs) +} + +func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config types.ConfigSpec + if err := json.NewDecoder(r.Body).Decode(&config); err != nil { + return err + } + + id, err := sr.backend.CreateConfig(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ConfigCreateResponse{ + ID: id, + }) +} + +func (sr *swarmRouter) removeConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveConfig(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (sr *swarmRouter) getConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + config, err := sr.backend.GetConfig(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, config) +} + +func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config types.ConfigSpec + if err := json.NewDecoder(r.Body).Decode(&config); err != nil { + return errors.NewBadRequestError(err) + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid config version")) + } + + id := vars["id"] + if err := sr.backend.UpdateConfig(id, version, config); err != nil { + return err + } + + return nil +} diff --git a/fn/vendor/github.com/docker/docker/api/server/router/swarm/helpers.go b/fn/vendor/github.com/docker/docker/api/server/router/swarm/helpers.go index ea692ea36..7d2944208 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/swarm/helpers.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/swarm/helpers.go @@ -44,7 +44,7 @@ func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r * // maybe should return some context with this error? return err } - tty = s.Spec.TaskTemplate.ContainerSpec.TTY || tty + tty = (s.Spec.TaskTemplate.ContainerSpec != nil && s.Spec.TaskTemplate.ContainerSpec.TTY) || tty } for _, task := range selector.Tasks { t, err := sr.backend.GetTask(task) diff --git a/fn/vendor/github.com/docker/docker/api/server/router/system/backend.go b/fn/vendor/github.com/docker/docker/api/server/router/system/backend.go index 6946c4e2d..da1de380d 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/system/backend.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/system/backend.go @@ -14,7 +14,7 @@ import ( type Backend interface { SystemInfo() (*types.Info, error) SystemVersion() types.Version - SystemDiskUsage() (*types.DiskUsage, error) + SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) UnsubscribeFromEvents(chan interface{}) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) diff --git a/fn/vendor/github.com/docker/docker/api/server/router/system/system.go b/fn/vendor/github.com/docker/docker/api/server/router/system/system.go index 44231d964..a64631e8a 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/system/system.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/system/system.go @@ -2,6 +2,7 @@ package system import ( "github.com/docker/docker/api/server/router" + "github.com/docker/docker/builder/fscache" "github.com/docker/docker/daemon/cluster" ) @@ -11,22 +12,24 @@ type systemRouter struct { backend Backend cluster *cluster.Cluster routes []router.Route + builder *fscache.FSCache } // NewRouter initializes a new system router -func NewRouter(b Backend, c *cluster.Cluster) router.Router { +func NewRouter(b Backend, c *cluster.Cluster, fscache *fscache.FSCache) router.Router { r := &systemRouter{ backend: b, cluster: c, + builder: fscache, } r.routes = []router.Route{ router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), router.NewGetRoute("/_ping", pingHandler), - router.Cancellable(router.NewGetRoute("/events", r.getEvents)), + router.NewGetRoute("/events", r.getEvents, router.WithCancel), router.NewGetRoute("/info", r.getInfo), router.NewGetRoute("/version", r.getVersion), - router.NewGetRoute("/system/df", r.getDiskUsage), + router.NewGetRoute("/system/df", r.getDiskUsage, router.WithCancel), router.NewPostRoute("/auth", r.postAuth), } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/system/system_routes.go b/fn/vendor/github.com/docker/docker/api/server/router/system/system_routes.go index bb7853927..30fb000e1 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/system/system_routes.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/system/system_routes.go @@ -17,6 +17,7 @@ import ( timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" + pkgerrors "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -71,10 +72,15 @@ func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - du, err := s.backend.SystemDiskUsage() + du, err := s.backend.SystemDiskUsage(ctx) if err != nil { return err } + builderSize, err := s.builder.DiskUsage() + if err != nil { + return pkgerrors.Wrap(err, "error getting build cache usage") + } + du.BuilderSize = builderSize return httputils.WriteJSON(w, http.StatusOK, du) } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/volume/backend.go b/fn/vendor/github.com/docker/docker/api/server/router/volume/backend.go index 180c06e5d..b97cb9478 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/volume/backend.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/volume/backend.go @@ -1,6 +1,8 @@ package volume import ( + "golang.org/x/net/context" + // TODO return types need to be refactored into pkg "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -13,5 +15,5 @@ type Backend interface { VolumeInspect(name string) (*types.Volume, error) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) VolumeRm(name string, force bool) error - VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) + VolumesPrune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/volume/volume.go b/fn/vendor/github.com/docker/docker/api/server/router/volume/volume.go index 4e9f972a6..b24c8fee5 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/volume/volume.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/volume/volume.go @@ -29,7 +29,7 @@ func (r *volumeRouter) initRoutes() { router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), - router.NewPostRoute("/volumes/prune", r.postVolumesPrune), + router.NewPostRoute("/volumes/prune", r.postVolumesPrune, router.WithCancel), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } diff --git a/fn/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go b/fn/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go index 21d12d283..f0f490119 100644 --- a/fn/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go +++ b/fn/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go @@ -77,7 +77,7 @@ func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWrit return err } - pruneReport, err := v.backend.VolumesPrune(pruneFilters) + pruneReport, err := v.backend.VolumesPrune(ctx, pruneFilters) if err != nil { return err } diff --git a/fn/vendor/github.com/docker/docker/api/server/server.go b/fn/vendor/github.com/docker/docker/api/server/server.go index 2e5f9632d..d40201911 100644 --- a/fn/vendor/github.com/docker/docker/api/server/server.go +++ b/fn/vendor/github.com/docker/docker/api/server/server.go @@ -92,13 +92,12 @@ func (s *Server) serveAPI() error { }(srv) } - for i := 0; i < len(s.servers); i++ { + for range s.servers { err := <-chErrors if err != nil { return err } } - return nil } diff --git a/fn/vendor/github.com/docker/docker/api/swagger.yaml b/fn/vendor/github.com/docker/docker/api/swagger.yaml index 36927b5a0..5803266b0 100644 --- a/fn/vendor/github.com/docker/docker/api/swagger.yaml +++ b/fn/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.29" +basePath: "/v1.31" info: title: "Docker Engine API" - version: "1.29" + version: "1.31" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | @@ -44,7 +44,7 @@ info: The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. - For Docker Engine 17.05, the API version is 1.29. To lock to this version, you prefix the URL with `/v1.29`. For example, calling `/info` is the same as calling `/v1.29/info`. + For Docker Engine 17.06, the API version is 1.30. To lock to this version, you prefix the URL with `/v1.30`. For example, calling `/info` is the same as calling `/v1.30/info`. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. @@ -52,10 +52,12 @@ info: The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. - This documentation is for version 1.29 of the API, which was introduced with Docker 17.05. Use this table to find documentation for previous versions of the API: + This documentation is for version 1.31 of the API. Use this table to find documentation for previous versions of the API: Docker version | API version | Changes ----------------|-------------|--------- + 17.06.x | [1.30](https://docs.docker.com/engine/api/v1.30/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-30-api-changes) + 17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes) 17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes) 17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes) 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes) @@ -230,6 +232,7 @@ definitions: type: "string" Source: description: "Mount source (e.g. a volume name, a host path)." + type: "string" Type: description: | The mount type. Available types: @@ -307,10 +310,12 @@ definitions: Name: type: "string" description: | + - Empty string means not to restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: + - "" - "always" - "unless-stopped" - "on-failure" @@ -498,16 +503,16 @@ definitions: items: type: "string" Interval: - description: "The time to wait between checks in nanoseconds. It should be 0 or not less than 1000000000(1s). 0 means inherit." + description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." type: "integer" Timeout: - description: "The time to wait before considering the check to have hung. It should be 0 or not less than 1000000000(1s). 0 means inherit." + description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit." type: "integer" Retries: description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." type: "integer" StartPeriod: - description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. 0 means inherit." + description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." type: "integer" HostConfig: @@ -944,6 +949,12 @@ definitions: type: "string" BaseLayer: type: "string" + Metadata: + type: "object" + properties: + LastTagTime: + type: "string" + format: "dateTime" ImageSummary: type: "object" @@ -1044,6 +1055,10 @@ definitions: type: "string" description: "Mount path of the volume on the host." x-nullable: false + CreatedAt: + type: "string" + format: "dateTime" + description: "Time volume was created." Status: type: "object" description: | @@ -1097,6 +1112,7 @@ definitions: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" + CreatedAt: "2016-06-07T20:31:11.853781916Z" Network: type: "object" @@ -1192,6 +1208,8 @@ definitions: NetworkContainer: type: "object" properties: + Name: + type: "string" EndpointID: type: "string" MacAddress: @@ -1619,7 +1637,7 @@ definitions: may not be applied if the version number has changed from the last read. In other words, if two update requests specify the same base version, only one of the requests can succeed. As a result, two separate update requests that happen at the same time will not - unintentially overwrite each other. + unintentionally overwrite each other. type: "object" properties: Index: @@ -1710,6 +1728,8 @@ definitions: type: "string" Name: type: "string" + TLSInfo: + $ref: "#/definitions/SwarmSpec" example: ID: "24ifsmvkjbyhk" Version: @@ -1750,6 +1770,47 @@ definitions: Leader: true Reachability: "reachable" Addr: "172.17.0.2:2377" + TLSInfo: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + TLSInfo: + description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" + type: "object" + properties: + TrustRoot: + description: "The root CA certificate(s) that are used to validate leaf TLS certificates" + type: "string" + CertIssuerSubject: + description: "The base64-url-safe-encoded raw subject bytes of the issuer" + type: "string" + CertIssuerPublicKey: + description: "The base64-url-safe-encoded raw public key bytes of the issuer" + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" SwarmSpec: description: "User modifiable swarm configuration." type: "object" @@ -1834,6 +1895,17 @@ definitions: type: "object" additionalProperties: type: "string" + CACert: + description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)." + type: "string" + SigningCACert: + description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format." + type: "string" + SigningCAKey: + description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format." + type: "string" + ForceRotate: + description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" EncryptionConfig: description: "Parameters related to encryption-at-rest." type: "object" @@ -1894,15 +1966,48 @@ definitions: format: "dateTime" Spec: $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: "Whether there is currently a root CA rotation in progress for the swarm" + type: "boolean" TaskSpec: description: "User modifiable task configuration." type: "object" properties: + PluginSpec: + type: "object" + description: "Invalid when specified with `ContainerSpec`." + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" ContainerSpec: type: "object" + description: "Invalid when specified with `PluginSpec`." properties: Image: - description: "The image name to use for the container." + description: "The image name to use for the container" type: "string" Labels: description: "User-defined key/value data." @@ -2093,9 +2198,22 @@ definitions: SpreadDescriptor: description: "label descriptor, such as engine.labels.az" type: "string" + Platforms: + description: "An array of supported platforms." + type: "array" + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" + Runtime: + description: "Runtime is the type of runtime specified for the task executor." + type: "string" Networks: type: "array" items: @@ -2615,6 +2733,11 @@ paths: /containers/json: get: summary: "List containers" + description: | + Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container than inspecting a single container. For example, + the list of linked containers is not propagated . operationId: "ContainerList" produces: - "application/json" @@ -3026,10 +3149,22 @@ paths: type: "object" properties: Status: - description: "The status of the container. For example, `running` or `exited`." + description: | + The status of the container. For example, `"running"` or `"exited"`. type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] Running: - description: "Whether this container is running." + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the cgroups freezer is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". type: "boolean" Paused: description: "Whether this container is paused." @@ -4189,6 +4324,11 @@ paths: required: true description: "ID or name of the container" type: "string" + - name: "condition" + in: "query" + description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'." + type: "string" + default: "not-running" tags: ["Container"] /containers/{id}: delete: @@ -4392,6 +4532,7 @@ paths: Available filters: - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: @@ -4638,6 +4779,27 @@ paths: schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] /images/create: post: summary: "Create an image" @@ -5082,6 +5244,7 @@ paths: unused *and* untagged images. When set to `false` (or `0`), all unused images are pruned. - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: @@ -5199,6 +5362,10 @@ paths: type: "array" items: type: "string" + Log: + type: "array" + items: + type: "string" ExperimentalBuild: type: "boolean" HttpProxy: @@ -6185,6 +6352,7 @@ paths: Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: @@ -6209,6 +6377,11 @@ paths: /networks: get: summary: "List networks" + description: | + Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than inspecting a single network. For example, + the list of containers attached to the network is not propagated in API versions 1.28 and up. operationId: "NetworkList" produces: - "application/json" @@ -6235,12 +6408,6 @@ paths: Config: - Subnet: "172.17.0.0/16" - Containers: - 39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867: - EndpointID: "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda" - MacAddress: "02:42:ac:11:00:02" - IPv4Address: "172.17.0.2/16" - IPv6Address: "" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" @@ -6310,6 +6477,10 @@ paths: description: "Network not found" schema: $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" @@ -6321,6 +6492,10 @@ paths: description: "Detailed inspect output for troubleshooting" type: "boolean" default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" tags: ["Network"] delete: @@ -6329,6 +6504,10 @@ paths: responses: 204: description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "no such network" schema: @@ -6553,6 +6732,7 @@ paths: Available filters: - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: @@ -7095,6 +7275,10 @@ paths: responses: 200: description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "no such node" schema: @@ -7171,6 +7355,21 @@ paths: UpdatedAt: "2016-08-15T16:32:09.623207604Z" Version: Index: 51 + RootRotationInProgress: false + TLSInfo: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" 404: description: "no such swarm" schema: @@ -7223,6 +7422,16 @@ paths: AdvertiseAddr: description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, + or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` + is used. + + The `DataPathAddr` specifies the address that global scope network drivers will publish towards other + nodes in order to reach the containers running on this node. Using this parameter it is possible to + separate the container data traffic from the management traffic of the cluster. + type: "string" ForceNewCluster: description: "Force creation of a new swarm." type: "boolean" @@ -7271,6 +7480,17 @@ paths: type: "string" AdvertiseAddr: description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, + or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` + is used. + + The `DataPathAddr` specifies the address that global scope network drivers will publish towards other + nodes in order to reach the containers running on this node. Using this parameter it is possible to + separate the container data traffic from the management traffic of the cluster. + type: "string" RemoteAddrs: description: "Addresses of manager nodes already participating in the swarm." @@ -7515,6 +7735,15 @@ paths: Nameservers: ["8.8.8.8"] Search: ["example.org"] Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" LogDriver: Name: "json-file" Options: @@ -8145,7 +8374,8 @@ paths: description: "no error" schema: $ref: "#/definitions/Secret" - example: + examples: + application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 @@ -8206,6 +8436,10 @@ paths: responses: 200: description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "no such secret" schema: @@ -8236,3 +8470,133 @@ paths: format: "int64" required: true tags: ["Secret"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: "Return image digest and platform information by contacting the registry." + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + type: "object" + x-go-name: DistributionInspect + required: [Descriptor, Platforms] + properties: + Descriptor: + type: "object" + description: "A descriptor struct containing digest, media type, and size" + properties: + MediaType: + type: "string" + Size: + type: "integer" + format: "int64" + Digest: + type: "string" + URLs: + type: "array" + items: + type: "string" + Platforms: + type: "array" + description: "An array containing all platforms supported by the image" + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + OSVersion: + type: "string" + OSFeatures: + type: "array" + items: + type: "string" + Variant: + type: "string" + Features: + type: "array" + items: + type: "string" + examples: + application/json: + Descriptor: + MediaType: "application/vnd.docker.distribution.manifest.v2+json" + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + Size: 3987495 + URLs: + - "" + Platforms: + - Architecture: "amd64" + OS: "linux" + OSVersion: "" + OSFeatures: + - "" + Variant: "" + Features: + - "" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. + + > **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental + > features enabled. The specifications for this endpoint may still change in a future version of the API. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session (experimental)"] diff --git a/fn/vendor/github.com/docker/docker/api/types/backend/backend.go b/fn/vendor/github.com/docker/docker/api/types/backend/backend.go index 83efae300..368ad7b5a 100644 --- a/fn/vendor/github.com/docker/docker/api/types/backend/backend.go +++ b/fn/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -6,7 +6,7 @@ import ( "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/api/types/container" ) // ContainerAttachConfig holds the streams to use when connecting to a container to view logs. @@ -98,13 +98,7 @@ type ExecProcessConfig struct { type ContainerCommitConfig struct { types.ContainerCommitConfig Changes []string -} - -// ProgressWriter is an interface -// to transport progress streams. -type ProgressWriter struct { - Output io.Writer - StdoutFormatter *streamformatter.StdoutFormatter - StderrFormatter *streamformatter.StderrFormatter - ProgressReaderFunc func(io.ReadCloser) io.ReadCloser + // TODO: ContainerConfig is only used by the dockerfile Builder, so remove it + // once the Builder has been updated to use a different interface + ContainerConfig *container.Config } diff --git a/fn/vendor/github.com/docker/docker/api/types/backend/build.go b/fn/vendor/github.com/docker/docker/api/types/backend/build.go new file mode 100644 index 000000000..300d35896 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/types/backend/build.go @@ -0,0 +1,44 @@ +package backend + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/streamformatter" +) + +// PullOption defines different modes for accessing images +type PullOption int + +const ( + // PullOptionNoPull only returns local images + PullOptionNoPull PullOption = iota + // PullOptionForcePull always tries to pull a ref from the registry first + PullOptionForcePull + // PullOptionPreferLocal uses local image if it exists, otherwise pulls + PullOptionPreferLocal +) + +// ProgressWriter is a data object to transport progress streams to the client +type ProgressWriter struct { + Output io.Writer + StdoutFormatter io.Writer + StderrFormatter io.Writer + AuxFormatter *streamformatter.AuxFormatter + ProgressReaderFunc func(io.ReadCloser) io.ReadCloser +} + +// BuildConfig is the configuration used by a BuildManager to start a build +type BuildConfig struct { + Source io.ReadCloser + ProgressWriter ProgressWriter + Options *types.ImageBuildOptions +} + +// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer +type GetImageAndLayerOptions struct { + PullOption PullOption + AuthConfig map[string]types.AuthConfig + Output io.Writer + Platform string +} diff --git a/fn/vendor/github.com/docker/docker/api/types/client.go b/fn/vendor/github.com/docker/docker/api/types/client.go index d7bc55011..18a1263f1 100644 --- a/fn/vendor/github.com/docker/docker/api/types/client.go +++ b/fn/vendor/github.com/docker/docker/api/types/client.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - "github.com/docker/go-units" + units "github.com/docker/go-units" ) // CheckpointCreateOptions holds parameters to create a checkpoint from a container @@ -97,6 +97,7 @@ type ContainerStartOptions struct { // about files to copy into a container type CopyToContainerOptions struct { AllowOverwriteDirWithFile bool + CopyUIDGID bool } // EventsOptions holds parameters to filter events with. @@ -177,6 +178,11 @@ type ImageBuildOptions struct { SecurityOpt []string ExtraHosts []string // List of extra hosts Target string + SessionID string + + // TODO @jhowardmsft LCOW Support: This will require extending to include + // `Platform string`, but is ommited for now as it's hard-coded temporarily + // to avoid API changes. } // ImageBuildResponse holds information @@ -275,6 +281,12 @@ type ServiceCreateOptions struct { // // This field follows the format of the X-Registry-Auth header. EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool } // ServiceCreateResponse contains the information returned to a client @@ -314,6 +326,12 @@ type ServiceUpdateOptions struct { // The valid values are "previous" and "none". An empty value is the // same as "none". Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool } // ServiceListOptions holds parameters to list services with. diff --git a/fn/vendor/github.com/docker/docker/api/types/configs.go b/fn/vendor/github.com/docker/docker/api/types/configs.go index 20c19f213..e4d2ce6e3 100644 --- a/fn/vendor/github.com/docker/docker/api/types/configs.go +++ b/fn/vendor/github.com/docker/docker/api/types/configs.go @@ -16,6 +16,7 @@ type ContainerCreateConfig struct { HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig AdjustCPUShares bool + Platform string } // ContainerRmConfig holds arguments for the container remove diff --git a/fn/vendor/github.com/docker/docker/api/types/container/config.go b/fn/vendor/github.com/docker/docker/api/types/container/config.go index 02e1b87a7..55a03fc98 100644 --- a/fn/vendor/github.com/docker/docker/api/types/container/config.go +++ b/fn/vendor/github.com/docker/docker/api/types/container/config.go @@ -7,6 +7,12 @@ import ( "github.com/docker/go-connections/nat" ) +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig struct { // Test is the test to perform to check that the container is healthy. diff --git a/fn/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/fn/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 000000000..64820fe35 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/fn/vendor/github.com/docker/docker/api/types/events/events.go b/fn/vendor/github.com/docker/docker/api/types/events/events.go index 7129a65ac..5f5f54034 100644 --- a/fn/vendor/github.com/docker/docker/api/types/events/events.go +++ b/fn/vendor/github.com/docker/docker/api/types/events/events.go @@ -13,6 +13,12 @@ const ( PluginEventType = "plugin" // VolumeEventType is the event type that volumes generate VolumeEventType = "volume" + // ServiceEventType is the event type that services generate + ServiceEventType = "service" + // NodeEventType is the event type that nodes generate + NodeEventType = "node" + // SecretEventType is the event type that secrets generate + SecretEventType = "secret" ) // Actor describes something that generates events, @@ -36,6 +42,8 @@ type Message struct { Type string Action string Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` diff --git a/fn/vendor/github.com/docker/docker/api/types/filters/parse_test.go b/fn/vendor/github.com/docker/docker/api/types/filters/parse_test.go index 8198f89d7..ccd1684a0 100644 --- a/fn/vendor/github.com/docker/docker/api/types/filters/parse_test.go +++ b/fn/vendor/github.com/docker/docker/api/types/filters/parse_test.go @@ -90,15 +90,15 @@ func TestFromParam(t *testing.T) { `{"key": "value"}`, } valid := map[*Args][]string{ - &Args{fields: map[string]map[string]bool{"key": {"value": true}}}: { + {fields: map[string]map[string]bool{"key": {"value": true}}}: { `{"key": ["value"]}`, `{"key": {"value": true}}`, }, - &Args{fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: { + {fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: { `{"key": ["value1", "value2"]}`, `{"key": {"value1": true, "value2": true}}`, }, - &Args{fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: { + {fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: { `{"key1": ["value1"], "key2": ["value2"]}`, `{"key1": {"value1": true}, "key2": {"value2": true}}`, }, @@ -172,14 +172,14 @@ func TestArgsMatchKVList(t *testing.T) { } matches := map[*Args]string{ - &Args{}: "field", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"today": true}, - "labels": map[string]bool{"key1": true}}, + {}: "field", + {map[string]map[string]bool{ + "created": {"today": true}, + "labels": {"key1": true}}, }: "labels", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"today": true}, - "labels": map[string]bool{"key1=value1": true}}, + {map[string]map[string]bool{ + "created": {"today": true}, + "labels": {"key1=value1": true}}, }: "labels", } @@ -190,16 +190,16 @@ func TestArgsMatchKVList(t *testing.T) { } differs := map[*Args]string{ - &Args{map[string]map[string]bool{ - "created": map[string]bool{"today": true}}, + {map[string]map[string]bool{ + "created": {"today": true}}, }: "created", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"today": true}, - "labels": map[string]bool{"key4": true}}, + {map[string]map[string]bool{ + "created": {"today": true}, + "labels": {"key4": true}}, }: "labels", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"today": true}, - "labels": map[string]bool{"key1=value3": true}}, + {map[string]map[string]bool{ + "created": {"today": true}, + "labels": {"key1=value3": true}}, }: "labels", } @@ -214,21 +214,21 @@ func TestArgsMatch(t *testing.T) { source := "today" matches := map[*Args]string{ - &Args{}: "field", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"today": true}}, + {}: "field", + {map[string]map[string]bool{ + "created": {"today": true}}, }: "today", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"to*": true}}, + {map[string]map[string]bool{ + "created": {"to*": true}}, }: "created", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"to(.*)": true}}, + {map[string]map[string]bool{ + "created": {"to(.*)": true}}, }: "created", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"tod": true}}, + {map[string]map[string]bool{ + "created": {"tod": true}}, }: "created", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"anyting": true, "to*": true}}, + {map[string]map[string]bool{ + "created": {"anything": true, "to*": true}}, }: "created", } @@ -239,21 +239,21 @@ func TestArgsMatch(t *testing.T) { } differs := map[*Args]string{ - &Args{map[string]map[string]bool{ - "created": map[string]bool{"tomorrow": true}}, + {map[string]map[string]bool{ + "created": {"tomorrow": true}}, }: "created", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"to(day": true}}, + {map[string]map[string]bool{ + "created": {"to(day": true}}, }: "created", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"tom(.*)": true}}, + {map[string]map[string]bool{ + "created": {"tom(.*)": true}}, }: "created", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"tom": true}}, + {map[string]map[string]bool{ + "created": {"tom": true}}, }: "created", - &Args{map[string]map[string]bool{ - "created": map[string]bool{"today1": true}, - "labels": map[string]bool{"today": true}}, + {map[string]map[string]bool{ + "created": {"today1": true}, + "labels": {"today": true}}, }: "created", } diff --git a/fn/vendor/github.com/docker/docker/api/types/network/network.go b/fn/vendor/github.com/docker/docker/api/types/network/network.go index 8d15ed21b..7c7dbacc8 100644 --- a/fn/vendor/github.com/docker/docker/api/types/network/network.go +++ b/fn/vendor/github.com/docker/docker/api/types/network/network.go @@ -58,6 +58,7 @@ type EndpointSettings struct { GlobalIPv6Address string GlobalIPv6PrefixLen int MacAddress string + DriverOpts map[string]string } // Task carries the information about one backend task @@ -100,3 +101,8 @@ func (es *EndpointSettings) Copy() *EndpointSettings { type NetworkingConfig struct { EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network } + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} diff --git a/fn/vendor/github.com/docker/docker/api/types/registry/registry.go b/fn/vendor/github.com/docker/docker/api/types/registry/registry.go index 28fafab90..b98a943a1 100644 --- a/fn/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/fn/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -3,13 +3,17 @@ package registry import ( "encoding/json" "net" + + "github.com/opencontainers/image-spec/specs-go/v1" ) // ServiceConfig stores daemon registry services configuration. type ServiceConfig struct { - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string } // NetIPNet is the net.IPNet type, which can be marshalled and @@ -102,3 +106,14 @@ type SearchResults struct { // Results is a slice containing the actual results for the search Results []SearchResult `json:"results"` } + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/common.go b/fn/vendor/github.com/docker/docker/api/types/swarm/common.go index dc76a146b..54af82b31 100644 --- a/fn/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -25,3 +25,16 @@ type Driver struct { Name string `json:",omitempty"` Options map[string]string `json:",omitempty"` } + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/config.go b/fn/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 000000000..0fb021ce9 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget + ConfigID string + ConfigName string +} diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/container.go b/fn/vendor/github.com/docker/docker/api/types/swarm/container.go index 135f7cbbf..6f8b45f6b 100644 --- a/fn/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -68,4 +68,5 @@ type ContainerSpec struct { Hosts []string `json:",omitempty"` DNSConfig *DNSConfig `json:",omitempty"` Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` } diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/network.go b/fn/vendor/github.com/docker/docker/api/types/swarm/network.go index 693f85cce..97c484e14 100644 --- a/fn/vendor/github.com/docker/docker/api/types/swarm/network.go +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -1,5 +1,9 @@ package swarm +import ( + "github.com/docker/docker/api/types/network" +) + // Endpoint represents an endpoint. type Endpoint struct { Spec EndpointSpec `json:",omitempty"` @@ -78,18 +82,21 @@ type Network struct { // NetworkSpec represents the spec of a network. type NetworkSpec struct { Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` } // NetworkAttachmentConfig represents the configuration of a network attachment. type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` } // NetworkAttachment represents a network attachment. diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/node.go b/fn/vendor/github.com/docker/docker/api/types/swarm/node.go index 379e17a77..28c6851e9 100644 --- a/fn/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -52,6 +52,7 @@ type NodeDescription struct { Platform Platform `json:",omitempty"` Resources Resources `json:",omitempty"` Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` } // Platform represents the platform (Arch/OS). diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/fn/vendor/github.com/docker/docker/api/types/swarm/runtime.go new file mode 100644 index 000000000..c4c731dc8 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -0,0 +1,19 @@ +package swarm + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/fn/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 000000000..47ae234ef --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/fn/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 000000000..1fdc9b043 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,712 @@ +// Code generated by protoc-gen-gogo. +// source: plugin.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, + 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, + 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, + 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, + 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, + 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, + 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, + 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, + 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, + 0x0c, 0x01, 0x00, 0x00, +} diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/fn/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 000000000..06eb7ba65 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/fn/vendor/github.com/docker/docker/api/types/swarm/swarm.go index c51327475..b65fa86da 100644 --- a/fn/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -2,12 +2,14 @@ package swarm import "time" -// ClusterInfo represents info about the cluster for outputing in "info" +// ClusterInfo represents info about the cluster for outputting in "info" // it contains the same information as "Swarm", but without the JoinTokens type ClusterInfo struct { ID string Meta - Spec Spec + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool } // Swarm represents a swarm. @@ -107,6 +109,16 @@ type CAConfig struct { // ExternalCAs is a list of CAs to which a manager node will make // certificate signing requests for node certificates. ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` } // ExternalCAProtocol represents type of external CA. @@ -126,12 +138,17 @@ type ExternalCA struct { // Options is a set of additional key/value pairs whose interpretation // depends on the specified CA type. Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string } // InitRequest is the request used to init a swarm. type InitRequest struct { ListenAddr string AdvertiseAddr string + DataPathAddr string ForceNewCluster bool Spec Spec AutoLockManagers bool @@ -142,6 +159,7 @@ type InitRequest struct { type JoinRequest struct { ListenAddr string AdvertiseAddr string + DataPathAddr string RemoteAddrs []string JoinToken string // accept by secret Availability NodeAvailability diff --git a/fn/vendor/github.com/docker/docker/api/types/swarm/task.go b/fn/vendor/github.com/docker/docker/api/types/swarm/task.go index 1769b6082..1712c06cf 100644 --- a/fn/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/fn/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -1,6 +1,10 @@ package swarm -import "time" +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) // TaskState represents the state of a task. type TaskState string @@ -51,7 +55,11 @@ type Task struct { // TaskSpec represents the spec of a task. type TaskSpec struct { - ContainerSpec ContainerSpec `json:",omitempty"` + // ContainerSpec and PluginSpec are mutually exclusive. + // PluginSpec will only be used when the `Runtime` field is set to `plugin` + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` RestartPolicy *RestartPolicy `json:",omitempty"` Placement *Placement `json:",omitempty"` @@ -65,6 +73,8 @@ type TaskSpec struct { // ForceUpdate is a counter that triggers an update even if no relevant // parameters have been changed. ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` } // Resources represents resources (CPU/Memory). @@ -83,6 +93,11 @@ type ResourceRequirements struct { type Placement struct { Constraints []string `json:",omitempty"` Preferences []PlacementPreference `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` } // PlacementPreference provides a way to make the scheduler aware of factors diff --git a/fn/vendor/github.com/docker/docker/api/types/types.go b/fn/vendor/github.com/docker/docker/api/types/types.go index bbaf2c553..c96df2733 100644 --- a/fn/vendor/github.com/docker/docker/api/types/types.go +++ b/fn/vendor/github.com/docker/docker/api/types/types.go @@ -45,6 +45,12 @@ type ImageInspect struct { VirtualSize int64 GraphDriver GraphDriverData RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` } // Container contains response of Engine API: @@ -238,6 +244,8 @@ type PluginsInfo struct { Network []string // List of Authorization plugins registered Authorization []string + // List of Log plugins registered + Log []string } // ExecStartCheck is a temp struct used by execStart @@ -275,7 +283,7 @@ type Health struct { // ContainerState stores container's running state // it's part of ContainerJSONBase and will return by "inspect" command type ContainerState struct { - Status string + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" Running bool Paused bool Restarting bool @@ -318,6 +326,7 @@ type ContainerJSONBase struct { Name string RestartCount int Driver string + Platform string MountLabel string ProcessLabel string AppArmorProfile string @@ -394,13 +403,15 @@ type NetworkResource struct { Name string // Name is the requested name of the network ID string `json:"Id"` // ID uniquely identifies a network on a single machine Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 IPAM network.IPAM // IPAM is the network's IP Address Management Internal bool // Internal represents if the network is used internal only Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. Containers map[string]EndpointResource // Containers contains endpoints belonging to the network Options map[string]string // Options holds the network specific options to use for when creating the network Labels map[string]string // Labels holds metadata specific to the network being created @@ -428,11 +439,14 @@ type NetworkCreate struct { // which has the same name but it is not guaranteed to catch all name collisions. CheckDuplicate bool Driver string + Scope string EnableIPv6 bool IPAM *network.IPAM Internal bool Attachable bool Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference Options map[string]string Labels map[string]string } @@ -461,6 +475,12 @@ type NetworkDisconnect struct { Force bool } +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + // Checkpoint represents the details of a checkpoint type Checkpoint struct { Name string // Name is the name of the checkpoint @@ -475,10 +495,11 @@ type Runtime struct { // DiskUsage contains response of Engine API: // GET "/system/df" type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuilderSize int64 } // ContainersPruneReport contains the response for Engine API: @@ -502,6 +523,12 @@ type ImagesPruneReport struct { SpaceReclaimed uint64 } +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + SpaceReclaimed uint64 +} + // NetworksPruneReport contains the response for Engine API: // POST "/networks/prune" type NetworksPruneReport struct { @@ -520,6 +547,18 @@ type SecretListOptions struct { Filters filters.Args } +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + // PushResult contains the tag, manifest digest, and manifest size from the // push. It's used to signal this information to the trust code in the client // so it can sign the manifest if necessary. @@ -528,3 +567,8 @@ type PushResult struct { Digest string Size int } + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} diff --git a/fn/vendor/github.com/docker/docker/api/types/volume.go b/fn/vendor/github.com/docker/docker/api/types/volume.go index da4f8ebd9..a69b0cfb1 100644 --- a/fn/vendor/github.com/docker/docker/api/types/volume.go +++ b/fn/vendor/github.com/docker/docker/api/types/volume.go @@ -7,6 +7,9 @@ package types // swagger:model Volume type Volume struct { + // Time volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + // Name of the volume driver used by the volume. // Required: true Driver string `json:"Driver"` diff --git a/fn/vendor/github.com/docker/docker/builder/builder.go b/fn/vendor/github.com/docker/docker/builder/builder.go index f7a4f91f2..e480601d4 100644 --- a/fn/vendor/github.com/docker/docker/builder/builder.go +++ b/fn/vendor/github.com/docker/docker/builder/builder.go @@ -6,14 +6,12 @@ package builder import ( "io" - "os" - "time" - "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" - "github.com/docker/docker/image" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/layer" "golang.org/x/net/context" ) @@ -22,145 +20,65 @@ const ( DefaultDockerfileName string = "Dockerfile" ) -// Context represents a file system tree. -type Context interface { +// Source defines a location that can be used as a source for the ADD/COPY +// instructions in the builder. +type Source interface { + // Root returns root path for accessing source + Root() string // Close allows to signal that the filesystem tree won't be used anymore. // For Context implementations using a temporary directory, it is recommended to // delete the temporary directory in Close(). Close() error - // Stat returns an entry corresponding to path if any. - // It is recommended to return an error if path was not found. - // If path is a symlink it also returns the path to the target file. - Stat(path string) (string, FileInfo, error) - // Open opens path from the context and returns a readable stream of it. - Open(path string) (io.ReadCloser, error) - // Walk walks the tree of the context with the function passed to it. - Walk(root string, walkFn WalkFunc) error -} - -// WalkFunc is the type of the function called for each file or directory visited by Context.Walk(). -type WalkFunc func(path string, fi FileInfo, err error) error - -// ModifiableContext represents a modifiable Context. -// TODO: remove this interface once we can get rid of Remove() -type ModifiableContext interface { - Context - // Remove deletes the entry specified by `path`. - // It is usual for directory entries to delete all its subentries. - Remove(path string) error -} - -// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file. -// TODO: remove this interface once pkg/archive exposes a walk function that Context can use. -type FileInfo interface { - os.FileInfo - Path() string -} - -// PathFileInfo is a convenience struct that implements the FileInfo interface. -type PathFileInfo struct { - os.FileInfo - // FilePath holds the absolute path to the file. - FilePath string - // FileName holds the basename for the file. - FileName string -} - -// Path returns the absolute path to the file. -func (fi PathFileInfo) Path() string { - return fi.FilePath -} - -// Name returns the basename of the file. -func (fi PathFileInfo) Name() string { - if fi.FileName != "" { - return fi.FileName - } - return fi.FileInfo.Name() -} - -// Hashed defines an extra method intended for implementations of os.FileInfo. -type Hashed interface { - // Hash returns the hash of a file. - Hash() string - SetHash(string) -} - -// HashedFileInfo is a convenient struct that augments FileInfo with a field. -type HashedFileInfo struct { - FileInfo - // FileHash represents the hash of a file. - FileHash string -} - -// Hash returns the hash of a file. -func (fi HashedFileInfo) Hash() string { - return fi.FileHash -} - -// SetHash sets the hash of a file. -func (fi *HashedFileInfo) SetHash(h string) { - fi.FileHash = h + // Hash returns a checksum for a file + Hash(path string) (string, error) } // Backend abstracts calls to a Docker Daemon. type Backend interface { - // TODO: use digest reference instead of name + ImageBackend + ExecBackend - // GetImageOnBuild looks up a Docker image referenced by `name`. - GetImageOnBuild(name string) (Image, error) - // TagImageWithReference tags an image with newTag - TagImageWithReference(image.ID, reference.Named) error - // PullOnBuild tells Docker to pull image referenced by `name`. - PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) + // Commit creates a new Docker image from an existing Docker container. + Commit(string, *backend.ContainerCommitConfig) (string, error) + // ContainerCreateWorkdir creates the workdir + ContainerCreateWorkdir(containerID string) error + + CreateImage(config []byte, parent string, platform string) (Image, error) + + ImageCacheBuilder +} + +// ImageBackend are the interface methods required from an image component +type ImageBackend interface { + GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error) +} + +// ExecBackend contains the interface methods required for executing containers +type ExecBackend interface { // ContainerAttachRaw attaches to container. - ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error // ContainerCreate creates a new Docker container and returns potential warnings ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) // ContainerRm removes a container specified by `id`. ContainerRm(name string, config *types.ContainerRmConfig) error - // Commit creates a new Docker image from an existing Docker container. - Commit(string, *backend.ContainerCommitConfig) (string, error) // ContainerKill stops the container execution abruptly. ContainerKill(containerID string, sig uint64) error // ContainerStart starts a new container ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error // ContainerWait stops processing until the given container is stopped. - ContainerWait(containerID string, timeout time.Duration) (int, error) - // ContainerUpdateCmdOnBuild updates container.Path and container.Args - ContainerUpdateCmdOnBuild(containerID string, cmd []string) error - // ContainerCreateWorkdir creates the workdir - ContainerCreateWorkdir(containerID string) error - - // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container - // specified by a container object. - // TODO: make an Extract method instead of passing `decompress` - // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used - // with Context.Walk - // ContainerCopy(name string, res string) (io.ReadCloser, error) - // TODO: use copyBackend api - CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error - - // HasExperimental checks if the backend supports experimental features - HasExperimental() bool - - // SquashImage squashes the fs layers from the provided image down to the specified `to` image - SquashImage(from string, to string) (string, error) - - // MountImage returns mounted path with rootfs of an image. - MountImage(name string) (string, func() error, error) + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) } -// Image represents a Docker image used by the builder. -type Image interface { - ImageID() string - RunConfig() *container.Config +// Result is the output produced by a Builder +type Result struct { + ImageID string + FromImage Image } // ImageCacheBuilder represents a generator for stateful image cache. type ImageCacheBuilder interface { // MakeImageCache creates a stateful image cache. - MakeImageCache(cacheFrom []string) ImageCache + MakeImageCache(cacheFrom []string, platform string) ImageCache } // ImageCache abstracts an image cache. @@ -170,3 +88,18 @@ type ImageCache interface { // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. GetCache(parentID string, cfg *container.Config) (imageID string, err error) } + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config + MarshalJSON() ([]byte, error) +} + +// ReleaseableLayer is an image layer that can be mounted and released +type ReleaseableLayer interface { + Release() error + Mount() (string, error) + Commit(platform string) (ReleaseableLayer, error) + DiffID() layer.DiffID +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/bflag.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/bflag.go index 34b3399ff..d84966162 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/bflag.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/bflag.go @@ -37,6 +37,13 @@ func NewBFlags() *BFlags { } } +// NewBFlagsWithArgs returns the new BFlags struct with Args set to args +func NewBFlagsWithArgs(args []string) *BFlags { + flags := NewBFlags() + flags.Args = args + return flags +} + // AddBool adds a bool flag to BFlags // Note, any error will be generated when Parse() is called (see Parse). func (bf *BFlags) AddBool(name string, def bool) *Flag { diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go index 4fde2e911..e0daf9a77 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go @@ -1,5 +1,12 @@ package dockerfile +import ( + "fmt" + "io" + + "github.com/docker/docker/runconfig/opts" +) + // builtinAllowedBuildArgs is list of built-in allowed build args // these args are considered transparent and are excluded from the image history. // Filtering from history is implemented in dispatchers.go @@ -35,16 +42,20 @@ func newBuildArgs(argsFromOptions map[string]*string) *buildArgs { } } -// UnreferencedOptionArgs returns the list of args that were set from options but -// were never referenced from the Dockerfile -func (b *buildArgs) UnreferencedOptionArgs() []string { +// WarnOnUnusedBuildArgs checks if there are any leftover build-args that were +// passed but not consumed during build. Print a warning, if there are any. +func (b *buildArgs) WarnOnUnusedBuildArgs(out io.Writer) { leftoverArgs := []string{} for arg := range b.argsFromOptions { - if _, ok := b.referencedArgs[arg]; !ok { + _, isReferenced := b.referencedArgs[arg] + _, isBuiltin := builtinAllowedBuildArgs[arg] + if !isBuiltin && !isReferenced { leftoverArgs = append(leftoverArgs, arg) } } - return leftoverArgs + if len(leftoverArgs) > 0 { + fmt.Fprintf(out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + } } // ResetAllowed clears the list of args that are allowed to be used by a @@ -64,13 +75,13 @@ func (b *buildArgs) AddArg(key string, value *string) { b.referencedArgs[key] = struct{}{} } -// IsUnreferencedBuiltin checks if the key is a built-in arg, or if it has been -// referenced by the Dockerfile. Returns true if the arg is a builtin that has -// not been referenced in the Dockerfile. -func (b *buildArgs) IsUnreferencedBuiltin(key string) bool { +// IsReferencedOrNotBuiltin checks if the key is a built-in arg, or if it has been +// referenced by the Dockerfile. Returns true if the arg is not a builtin or +// if the builtin has been referenced in the Dockerfile. +func (b *buildArgs) IsReferencedOrNotBuiltin(key string) bool { _, isBuiltin := builtinAllowedBuildArgs[key] _, isAllowed := b.allowedBuildArgs[key] - return isBuiltin && !isAllowed + return isAllowed || !isBuiltin } // GetAllAllowed returns a mapping with all the allowed args @@ -96,6 +107,19 @@ func (b *buildArgs) getAllFromMapping(source map[string]*string) map[string]stri return m } +// FilterAllowed returns all allowed args without the filtered args +func (b *buildArgs) FilterAllowed(filter []string) []string { + envs := []string{} + configEnv := opts.ConvertKVStringsToMap(filter) + + for key, val := range b.GetAllAllowed() { + if _, ok := configEnv[key]; !ok { + envs = append(envs, fmt.Sprintf("%s=%s", key, val)) + } + } + return envs +} + func (b *buildArgs) getBuildArg(key string, mapping map[string]*string) (string, bool) { defaultValue, exists := mapping[key] // Return override from options if one is defined diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/buildargs_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/buildargs_test.go index 03df19b2a..241bc8447 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/buildargs_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/buildargs_test.go @@ -1,8 +1,10 @@ package dockerfile import ( - "github.com/docker/docker/pkg/testutil/assert" + "bytes" "testing" + + "github.com/stretchr/testify/assert" ) func strPtr(source string) *string { @@ -18,7 +20,7 @@ func TestGetAllAllowed(t *testing.T) { }) buildArgs.AddMetaArg("ArgFromMeta", strPtr("frommeta1")) - buildArgs.AddMetaArg("ArgFromMetaOverriden", strPtr("frommeta2")) + buildArgs.AddMetaArg("ArgFromMetaOverridden", strPtr("frommeta2")) buildArgs.AddMetaArg("ArgFromMetaNotUsed", strPtr("frommeta3")) buildArgs.AddArg("ArgOverriddenByOptions", strPtr("fromdockerfile2")) @@ -26,7 +28,7 @@ func TestGetAllAllowed(t *testing.T) { buildArgs.AddArg("ArgNoDefaultInDockerfile", nil) buildArgs.AddArg("ArgNoDefaultInDockerfileFromOptions", nil) buildArgs.AddArg("ArgFromMeta", nil) - buildArgs.AddArg("ArgFromMetaOverriden", strPtr("fromdockerfile3")) + buildArgs.AddArg("ArgFromMetaOverridden", strPtr("fromdockerfile3")) all := buildArgs.GetAllAllowed() expected := map[string]string{ @@ -35,9 +37,9 @@ func TestGetAllAllowed(t *testing.T) { "ArgWithDefaultInDockerfile": "fromdockerfile1", "ArgNoDefaultInDockerfileFromOptions": "fromopt3", "ArgFromMeta": "frommeta1", - "ArgFromMetaOverriden": "fromdockerfile3", + "ArgFromMetaOverridden": "fromdockerfile3", } - assert.DeepEqual(t, all, expected) + assert.Equal(t, expected, all) } func TestGetAllMeta(t *testing.T) { @@ -59,5 +61,40 @@ func TestGetAllMeta(t *testing.T) { "ArgOverriddenByOptions": "fromopt2", "ArgNoDefaultInMetaFromOptions": "fromopt3", } - assert.DeepEqual(t, all, expected) + assert.Equal(t, expected, all) +} + +func TestWarnOnUnusedBuildArgs(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ThisArgIsUsed": strPtr("fromopt1"), + "ThisArgIsNotUsed": strPtr("fromopt2"), + "HTTPS_PROXY": strPtr("referenced builtin"), + "HTTP_PROXY": strPtr("unreferenced builtin"), + }) + buildArgs.AddArg("ThisArgIsUsed", nil) + buildArgs.AddArg("HTTPS_PROXY", nil) + + buffer := new(bytes.Buffer) + buildArgs.WarnOnUnusedBuildArgs(buffer) + out := buffer.String() + assert.NotContains(t, out, "ThisArgIsUsed") + assert.NotContains(t, out, "HTTPS_PROXY") + assert.NotContains(t, out, "HTTP_PROXY") + assert.Contains(t, out, "ThisArgIsNotUsed") +} + +func TestIsUnreferencedBuiltin(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ThisArgIsUsed": strPtr("fromopt1"), + "ThisArgIsNotUsed": strPtr("fromopt2"), + "HTTPS_PROXY": strPtr("referenced builtin"), + "HTTP_PROXY": strPtr("unreferenced builtin"), + }) + buildArgs.AddArg("ThisArgIsUsed", nil) + buildArgs.AddArg("HTTPS_PROXY", nil) + + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsUsed")) + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsNotUsed")) + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("HTTPS_PROXY")) + assert.False(t, buildArgs.IsReferencedOrNotBuiltin("HTTP_PROXY")) } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/builder.go index b62e7e220..fb1786225 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/builder.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -2,26 +2,32 @@ package dockerfile import ( "bytes" - "errors" "fmt" "io" "io/ioutil" - "os" + "runtime" "strings" + "time" "github.com/Sirupsen/logrus" - "github.com/docker/distribution/reference" - apierrors "github.com/docker/docker/api/errors" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" "github.com/docker/docker/builder/dockerfile/command" "github.com/docker/docker/builder/dockerfile/parser" - "github.com/docker/docker/image" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/client/session" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" - perrors "github.com/pkg/errors" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" "golang.org/x/net/context" + "golang.org/x/sync/syncmap" ) var validCommitCommands = map[string]bool{ @@ -37,7 +43,126 @@ var validCommitCommands = map[string]bool{ "workdir": true, } -var defaultLogConfig = container.LogConfig{Type: "none"} +// SessionGetter is object used to get access to a session by uuid +type SessionGetter interface { + Get(ctx context.Context, uuid string) (session.Caller, error) +} + +// BuildManager is shared across all Builder objects +type BuildManager struct { + archiver *archive.Archiver + backend builder.Backend + pathCache pathCache // TODO: make this persistent + sg SessionGetter + fsCache *fscache.FSCache +} + +// NewBuildManager creates a BuildManager +func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) { + bm := &BuildManager{ + backend: b, + pathCache: &syncmap.Map{}, + sg: sg, + archiver: chrootarchive.NewArchiver(idMappings), + fsCache: fsCache, + } + if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil { + return nil, err + } + return bm, nil +} + +// Build starts a new build from a BuildConfig +func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (*builder.Result, error) { + buildsTriggered.Inc() + if config.Options.Dockerfile == "" { + config.Options.Dockerfile = builder.DefaultDockerfileName + } + + source, dockerfile, err := remotecontext.Detect(config) + if err != nil { + return nil, err + } + defer func() { + if source != nil { + if err := source.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + } + }() + + // TODO @jhowardmsft LCOW support - this will require rework to allow both linux and Windows simultaneously. + // This is an interim solution to hardcode to linux if LCOW is turned on. + if dockerfile.Platform == "" { + dockerfile.Platform = runtime.GOOS + if dockerfile.Platform == "windows" && system.LCOWSupported() { + dockerfile.Platform = "linux" + } + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if src, err := bm.initializeClientSession(ctx, cancel, config.Options); err != nil { + return nil, err + } else if src != nil { + source = src + } + + builderOptions := builderOptions{ + Options: config.Options, + ProgressWriter: config.ProgressWriter, + Backend: bm.backend, + PathCache: bm.pathCache, + Archiver: bm.archiver, + Platform: dockerfile.Platform, + } + + return newBuilder(ctx, builderOptions).build(source, dockerfile) +} + +func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) { + if options.SessionID == "" || bm.sg == nil { + return nil, nil + } + logrus.Debug("client is session enabled") + + ctx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout) + defer cancelCtx() + + c, err := bm.sg.Get(ctx, options.SessionID) + if err != nil { + return nil, err + } + go func() { + <-c.Context().Done() + cancel() + }() + if options.RemoteContext == remotecontext.ClientSessionRemote { + st := time.Now() + csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg, options.SessionID) + if err != nil { + return nil, err + } + src, err := bm.fsCache.SyncFrom(ctx, csi) + if err != nil { + return nil, err + } + logrus.Debugf("sync-time: %v", time.Since(st)) + return src, nil + } + return nil, nil +} + +// builderOptions are the dependencies required by the builder +type builderOptions struct { + Options *types.ImageBuildOptions + Backend builder.Backend + ProgressWriter backend.ProgressWriter + PathCache pathCache + Archiver *archive.Archiver + Platform string +} // Builder is a Dockerfile builder // It implements the builder.Backend interface. @@ -46,244 +171,161 @@ type Builder struct { Stdout io.Writer Stderr io.Writer + Aux *streamformatter.AuxFormatter Output io.Writer docker builder.Backend - context builder.Context clientCtx context.Context - cancel context.CancelFunc - runConfig *container.Config // runconfig for cmd, run, entrypoint etc. - flags *BFlags - tmpContainers map[string]struct{} - image string // imageID - imageContexts *imageContexts // helper for storing contexts from builds - noBaseImage bool // A flag to track the use of `scratch` as the base image - maintainer string - cmdSet bool - disableCommit bool - cacheBusted bool - buildArgs *buildArgs - directive parser.Directive + archiver *archive.Archiver + buildStages *buildStages + disableCommit bool + buildArgs *buildArgs + imageSources *imageSources + pathCache pathCache + containerManager *containerManager + imageProber ImageProber - // TODO: remove once docker.Commit can receive a tag - id string - - imageCache builder.ImageCache - from builder.Image + // TODO @jhowardmft LCOW Support. This will be moved to options at a later + // stage, however that cannot be done now as it affects the public API + // if it were. + platform string } -// BuildManager implements builder.Backend and is shared across all Builder objects. -type BuildManager struct { - backend builder.Backend - pathCache *pathCache // TODO: make this persistent -} - -// NewBuildManager creates a BuildManager. -func NewBuildManager(b builder.Backend) (bm *BuildManager) { - return &BuildManager{backend: b, pathCache: &pathCache{}} -} - -// BuildFromContext builds a new image from a given context. -func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) { - if buildOptions.Squash && !bm.backend.HasExperimental() { - return "", apierrors.NewBadRequestError(errors.New("squash is only supported with experimental mode")) - } - buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc) - if err != nil { - return "", err - } - defer func() { - if err := buildContext.Close(); err != nil { - logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) - } - }() - - if len(dockerfileName) > 0 { - buildOptions.Dockerfile = dockerfileName - } - b, err := NewBuilder(ctx, buildOptions, bm.backend, builder.DockerIgnoreContext{ModifiableContext: buildContext}) - if err != nil { - return "", err - } - b.imageContexts.cache = bm.pathCache - return b.build(pg.StdoutFormatter, pg.StderrFormatter, pg.Output) -} - -// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. -// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, -// will be read from the Context passed to Build(). -func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, buildContext builder.Context) (b *Builder, err error) { +// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options. +// TODO @jhowardmsft LCOW support: Eventually platform can be moved into the builder +// options, however, that would be an API change as it shares types.ImageBuildOptions. +func newBuilder(clientCtx context.Context, options builderOptions) *Builder { + config := options.Options if config == nil { config = new(types.ImageBuildOptions) } - ctx, cancel := context.WithCancel(clientCtx) - b = &Builder{ - clientCtx: ctx, - cancel: cancel, - options: config, - Stdout: os.Stdout, - Stderr: os.Stderr, - docker: backend, - context: buildContext, - runConfig: new(container.Config), - tmpContainers: map[string]struct{}{}, - id: stringid.GenerateNonCryptoID(), - buildArgs: newBuildArgs(config.BuildArgs), - directive: parser.Directive{ - EscapeSeen: false, - LookingForDirectives: true, - }, - } - b.imageContexts = &imageContexts{b: b} - parser.SetEscapeToken(parser.DefaultEscapeToken, &b.directive) // Assume the default token for escape - return b, nil + // @jhowardmsft LCOW Support. For the time being, this is interim. Eventually + // will be moved to types.ImageBuildOptions, but it can't for now as that would + // be an API change. + if options.Platform == "" { + options.Platform = runtime.GOOS + } + if options.Platform == "windows" && system.LCOWSupported() { + options.Platform = "linux" + } + + b := &Builder{ + clientCtx: clientCtx, + options: config, + Stdout: options.ProgressWriter.StdoutFormatter, + Stderr: options.ProgressWriter.StderrFormatter, + Aux: options.ProgressWriter.AuxFormatter, + Output: options.ProgressWriter.Output, + docker: options.Backend, + archiver: options.Archiver, + buildArgs: newBuildArgs(config.BuildArgs), + buildStages: newBuildStages(), + imageSources: newImageSources(clientCtx, options), + pathCache: options.PathCache, + imageProber: newImageProber(options.Backend, config.CacheFrom, options.Platform, config.NoCache), + containerManager: newContainerManager(options.Backend), + platform: options.Platform, + } + + return b } -func (b *Builder) resetImageCache() { - if icb, ok := b.docker.(builder.ImageCacheBuilder); ok { - b.imageCache = icb.MakeImageCache(b.options.CacheFrom) +// Build runs the Dockerfile builder by parsing the Dockerfile and executing +// the instructions from the file. +func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) { + defer b.imageSources.Unmount() + + addNodesForLabelOption(dockerfile.AST, b.options.Labels) + + if err := checkDispatchDockerfile(dockerfile.AST); err != nil { + buildsFailed.WithValues(metricsDockerfileSyntaxError).Inc() + return nil, err } - b.noBaseImage = false - b.cacheBusted = false -} -// sanitizeRepoAndTags parses the raw "t" parameter received from the client -// to a slice of repoAndTag. -// It also validates each repoName and tag. -func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { - var ( - repoAndTags []reference.Named - // This map is used for deduplicating the "-t" parameter. - uniqNames = make(map[string]struct{}) - ) - for _, repo := range names { - if repo == "" { - continue - } - - ref, err := reference.ParseNormalizedNamed(repo) - if err != nil { - return nil, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("build tag cannot contain a digest") - } - - ref = reference.TagNameOnly(ref) - - nameWithTag := ref.String() - - if _, exists := uniqNames[nameWithTag]; !exists { - uniqNames[nameWithTag] = struct{}{} - repoAndTags = append(repoAndTags, ref) - } - } - return repoAndTags, nil -} - -// build runs the Dockerfile builder from a context and a docker object that allows to make calls -// to Docker. -// -// This will (barring errors): -// -// * read the dockerfile from context -// * parse the dockerfile if not already parsed -// * walk the AST and execute it by dispatching to handlers. If Remove -// or ForceRemove is set, additional cleanup around containers happens after -// processing. -// * Tag image, if applicable. -// * Print a happy message and return the image ID. -// -func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) { - defer b.imageContexts.unmount() - - b.Stdout = stdout - b.Stderr = stderr - b.Output = out - - dockerfile, err := b.readDockerfile() + dispatchState, err := b.dispatchDockerfileWithCancellation(dockerfile, source) if err != nil { - return "", err + return nil, err } - repoAndTags, err := sanitizeRepoAndTags(b.options.Tags) - if err != nil { - return "", err + if b.options.Target != "" && !dispatchState.isCurrentStage(b.options.Target) { + buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc() + return nil, errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target) } - addNodesForLabelOption(dockerfile, b.options.Labels) + dockerfile.PrintWarnings(b.Stderr) + b.buildArgs.WarnOnUnusedBuildArgs(b.Stderr) - var shortImgID string - total := len(dockerfile.Children) - for _, n := range dockerfile.Children { - if err := b.checkDispatch(n, false); err != nil { - return "", perrors.Wrapf(err, "Dockerfile parse error line %d", n.StartLine) - } + if dispatchState.imageID == "" { + buildsFailed.WithValues(metricsDockerfileEmptyError).Inc() + return nil, errors.New("No image was generated. Is your Dockerfile empty?") } + return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil +} - for i, n := range dockerfile.Children { +func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error { + if aux == nil || state.imageID == "" { + return nil + } + return aux.Emit(types.BuildResult{ID: state.imageID}) +} + +func (b *Builder) dispatchDockerfileWithCancellation(dockerfile *parser.Result, source builder.Source) (*dispatchState, error) { + shlex := NewShellLex(dockerfile.EscapeToken) + state := newDispatchState() + total := len(dockerfile.AST.Children) + var err error + for i, n := range dockerfile.AST.Children { select { case <-b.clientCtx.Done(): logrus.Debug("Builder: build cancelled!") fmt.Fprint(b.Stdout, "Build cancelled") - return "", errors.New("Build cancelled") + buildsFailed.WithValues(metricsBuildCanceled).Inc() + return nil, errors.New("Build cancelled") default: // Not cancelled yet, keep going... } - if command.From == n.Value && b.imageContexts.isCurrentTarget(b.options.Target) { + // If this is a FROM and we have a previous image then + // emit an aux message for that image since it is the + // end of the previous stage + if n.Value == command.From { + if err := emitImageID(b.Aux, state); err != nil { + return nil, err + } + } + + if n.Value == command.From && state.isCurrentStage(b.options.Target) { break } - if err := b.dispatch(i, total, n); err != nil { + opts := dispatchOptions{ + state: state, + stepMsg: formatStep(i, total), + node: n, + shlex: shlex, + source: source, + } + if state, err = b.dispatch(opts); err != nil { if b.options.ForceRemove { - b.clearTmp() + b.containerManager.RemoveAll(b.Stdout) } - return "", err + return nil, err } - shortImgID = stringid.TruncateID(b.image) - fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) + fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(state.imageID)) if b.options.Remove { - b.clearTmp() + b.containerManager.RemoveAll(b.Stdout) } } - if b.options.Target != "" && !b.imageContexts.isCurrentTarget(b.options.Target) { - return "", perrors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target) + // Emit a final aux message for the final image + if err := emitImageID(b.Aux, state); err != nil { + return nil, err } - b.warnOnUnusedBuildArgs() - - if b.image == "" { - return "", errors.New("No image was generated. Is your Dockerfile empty?") - } - - if b.options.Squash { - var fromID string - if b.from != nil { - fromID = b.from.ImageID() - } - b.image, err = b.docker.SquashImage(b.image, fromID) - if err != nil { - return "", perrors.Wrap(err, "error squashing image") - } - } - - fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) - - imageID := image.ID(b.image) - for _, rt := range repoAndTags { - if err := b.docker.TagImageWithReference(imageID, rt); err != nil { - return "", err - } - fmt.Fprintf(b.Stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) - } - - return b.image, nil + return state, nil } func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) { @@ -295,25 +337,6 @@ func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) { dockerfile.Children = append(dockerfile.Children, node) } -// check if there are any leftover build-args that were passed but not -// consumed during build. Print a warning, if there are any. -func (b *Builder) warnOnUnusedBuildArgs() { - leftoverArgs := b.buildArgs.UnreferencedOptionArgs() - if len(leftoverArgs) > 0 { - fmt.Fprintf(b.Stderr, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) - } -} - -// hasFromImage returns true if the builder has processed a `FROM ` line -func (b *Builder) hasFromImage() bool { - return b.image != "" || b.noBaseImage -} - -// Cancel cancels an ongoing Dockerfile build. -func (b *Builder) Cancel() { - b.cancel() -} - // BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile // It will: // - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. @@ -324,40 +347,74 @@ func (b *Builder) Cancel() { // // TODO: Remove? func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { - b, err := NewBuilder(context.Background(), nil, nil, nil) + if len(changes) == 0 { + return config, nil + } + + b := newBuilder(context.Background(), builderOptions{ + Options: &types.ImageBuildOptions{NoCache: true}, + }) + + dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) if err != nil { return nil, err } - ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")), &b.directive) - if err != nil { - return nil, err + // TODO @jhowardmsft LCOW support. For now, if LCOW enabled, switch to linux. + // Also explicitly set the platform. Ultimately this will be in the builder + // options, but we can't do that yet as it would change the API. + if dockerfile.Platform == "" { + dockerfile.Platform = runtime.GOOS } + if dockerfile.Platform == "windows" && system.LCOWSupported() { + dockerfile.Platform = "linux" + } + b.platform = dockerfile.Platform // ensure that the commands are valid - for _, n := range ast.Children { + for _, n := range dockerfile.AST.Children { if !validCommitCommands[n.Value] { return nil, fmt.Errorf("%s is not a valid change command", n.Value) } } - b.runConfig = config b.Stdout = ioutil.Discard b.Stderr = ioutil.Discard b.disableCommit = true - total := len(ast.Children) - for _, n := range ast.Children { - if err := b.checkDispatch(n, false); err != nil { - return nil, err + if err := checkDispatchDockerfile(dockerfile.AST); err != nil { + return nil, err + } + dispatchState := newDispatchState() + dispatchState.runConfig = config + return dispatchFromDockerfile(b, dockerfile, dispatchState, nil) +} + +func checkDispatchDockerfile(dockerfile *parser.Node) error { + for _, n := range dockerfile.Children { + if err := checkDispatch(n); err != nil { + return errors.Wrapf(err, "Dockerfile parse error line %d", n.StartLine) } } + return nil +} + +func dispatchFromDockerfile(b *Builder, result *parser.Result, dispatchState *dispatchState, source builder.Source) (*container.Config, error) { + shlex := NewShellLex(result.EscapeToken) + ast := result.AST + total := len(ast.Children) for i, n := range ast.Children { - if err := b.dispatch(i, total, n); err != nil { + opts := dispatchOptions{ + state: dispatchState, + stepMsg: formatStep(i, total), + node: n, + shlex: shlex, + source: source, + } + if _, err := b.dispatch(opts); err != nil { return nil, err } } - - return b.runConfig, nil + return dispatchState.runConfig, nil } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_test.go index 9a5a17ef4..5fedca0fd 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_test.go @@ -5,15 +5,13 @@ import ( "testing" "github.com/docker/docker/builder/dockerfile/parser" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/stretchr/testify/assert" ) func TestAddNodesForLabelOption(t *testing.T) { dockerfile := "FROM scratch" - d := parser.Directive{} - parser.SetEscapeToken(parser.DefaultEscapeToken, &d) - nodes, err := parser.Parse(strings.NewReader(dockerfile), &d) - assert.NilError(t, err) + result, err := parser.Parse(strings.NewReader(dockerfile)) + assert.NoError(t, err) labels := map[string]string{ "org.e": "cli-e", @@ -22,14 +20,15 @@ func TestAddNodesForLabelOption(t *testing.T) { "org.b": "cli-b", "org.a": "cli-a", } + nodes := result.AST addNodesForLabelOption(nodes, labels) expected := []string{ "FROM scratch", `LABEL "org.a"='cli-a' "org.b"='cli-b' "org.c"='cli-c' "org.d"='cli-d' "org.e"='cli-e'`, } - assert.Equal(t, len(nodes.Children), 2) + assert.Len(t, nodes.Children, 2) for i, v := range nodes.Children { - assert.Equal(t, v.Original, expected[i]) + assert.Equal(t, expected[i], v.Original) } } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go index 76a7ce74f..5ea63da82 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go @@ -2,4 +2,6 @@ package dockerfile -var defaultShell = []string{"/bin/sh", "-c"} +func defaultShellForPlatform(platform string) []string { + return []string{"/bin/sh", "-c"} +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go index 37e9fbcf4..7bfef3238 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go @@ -1,3 +1,8 @@ package dockerfile -var defaultShell = []string{"cmd", "/S", "/C"} +func defaultShellForPlatform(platform string) []string { + if platform == "linux" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go new file mode 100644 index 000000000..a7709ce51 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go @@ -0,0 +1,77 @@ +package dockerfile + +import ( + "time" + + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/client/session" + "github.com/docker/docker/client/session/filesync" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +const sessionConnectTimeout = 5 * time.Second + +// ClientSessionTransport is a transport for copying files from docker client +// to the daemon. +type ClientSessionTransport struct{} + +// NewClientSessionTransport returns new ClientSessionTransport instance +func NewClientSessionTransport() *ClientSessionTransport { + return &ClientSessionTransport{} +} + +// Copy data from a remote to a destination directory. +func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error { + csi, ok := id.(*ClientSessionSourceIdentifier) + if !ok { + return errors.New("invalid identifier for client session") + } + + return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{ + IncludePatterns: csi.includePatterns, + DestDir: dest, + CacheUpdater: cu, + }) +} + +// ClientSessionSourceIdentifier is an identifier that can be used for requesting +// files from remote client +type ClientSessionSourceIdentifier struct { + includePatterns []string + caller session.Caller + sharedKey string + uuid string +} + +// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance +func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string) (*ClientSessionSourceIdentifier, error) { + csi := &ClientSessionSourceIdentifier{ + uuid: uuid, + } + caller, err := sg.Get(ctx, uuid) + if err != nil { + return nil, errors.Wrapf(err, "failed to get session for %s", uuid) + } + + csi.caller = caller + return csi, nil +} + +// Transport returns transport identifier for remote identifier +func (csi *ClientSessionSourceIdentifier) Transport() string { + return remotecontext.ClientSessionRemote +} + +// SharedKey returns shared key for remote identifier. Shared key is used +// for finding the base for a repeated transfer. +func (csi *ClientSessionSourceIdentifier) SharedKey() string { + return csi.caller.SharedKey() +} + +// Key returns unique key for remote identifier. Requests with same key return +// same data. +func (csi *ClientSessionSourceIdentifier) Key() string { + return csi.uuid +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go new file mode 100644 index 000000000..7b241f3d3 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go @@ -0,0 +1,144 @@ +package dockerfile + +import ( + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type containerManager struct { + tmpContainers map[string]struct{} + backend builder.ExecBackend +} + +// newContainerManager creates a new container backend +func newContainerManager(docker builder.ExecBackend) *containerManager { + return &containerManager{ + backend: docker, + tmpContainers: make(map[string]struct{}), + } +} + +// Create a container +func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig, platform string) (container.ContainerCreateCreatedBody, error) { + container, err := c.backend.ContainerCreate(types.ContainerCreateConfig{ + Config: runConfig, + HostConfig: hostConfig, + Platform: platform, + }) + if err != nil { + return container, err + } + c.tmpContainers[container.ID] = struct{}{} + return container, nil +} + +var errCancelled = errors.New("build cancelled") + +// Run a container by ID +func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr io.Writer) (err error) { + attached := make(chan struct{}) + errCh := make(chan error) + go func() { + errCh <- c.backend.ContainerAttachRaw(cID, nil, stdout, stderr, true, attached) + }() + select { + case err := <-errCh: + return err + case <-attached: + } + + finished := make(chan struct{}) + cancelErrCh := make(chan error, 1) + go func() { + select { + case <-ctx.Done(): + logrus.Debugln("Build cancelled, killing and removing container:", cID) + c.backend.ContainerKill(cID, 0) + c.removeContainer(cID, stdout) + cancelErrCh <- errCancelled + case <-finished: + cancelErrCh <- nil + } + }() + + if err := c.backend.ContainerStart(cID, nil, "", ""); err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error()) + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from errCh: "+err.Error()) + return err + } + + waitC, err := c.backend.ContainerWait(ctx, cID, containerpkg.WaitConditionNotRunning) + if err != nil { + close(finished) + logCancellationError(cancelErrCh, fmt.Sprintf("unable to begin ContainerWait: %s", err)) + return err + } + + if status := <-waitC; status.ExitCode() != 0 { + close(finished) + logCancellationError(cancelErrCh, + fmt.Sprintf("a non-zero code from ContainerWait: %d", status.ExitCode())) + return &statusCodeError{code: status.ExitCode(), err: err} + } + + close(finished) + return <-cancelErrCh +} + +func logCancellationError(cancelErrCh chan error, msg string) { + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v): ", cancelErr, msg) + } +} + +type statusCodeError struct { + code int + err error +} + +func (e *statusCodeError) Error() string { + return e.err.Error() +} + +func (e *statusCodeError) StatusCode() int { + return e.code +} + +func (c *containerManager) removeContainer(containerID string, stdout io.Writer) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := c.backend.ContainerRm(containerID, rmConfig); err != nil { + fmt.Fprintf(stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(containerID), err) + return err + } + return nil +} + +// RemoveAll containers managed by this container manager +func (c *containerManager) RemoveAll(stdout io.Writer) { + for containerID := range c.tmpContainers { + if err := c.removeContainer(containerID, stdout); err != nil { + return + } + delete(c.tmpContainers, containerID) + fmt.Fprintf(stdout, "Removing intermediate container %s\n", stringid.TruncateID(containerID)) + } +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/copy.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/copy.go new file mode 100644 index 000000000..c7db943f5 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/copy.go @@ -0,0 +1,444 @@ +package dockerfile + +import ( + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +type pathCache interface { + Load(key interface{}) (value interface{}, ok bool) + Store(key, value interface{}) +} + +// copyInfo is a data object which stores the metadata about each source file in +// a copyInstruction +type copyInfo struct { + root string + path string + hash string + noDecompress bool +} + +func (c copyInfo) fullPath() (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(c.root, c.path), c.root) +} + +func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { + return copyInfo{root: source.Root(), path: path, hash: hash} +} + +func newCopyInfos(copyInfos ...copyInfo) []copyInfo { + return copyInfos +} + +// copyInstruction is a fully parsed COPY or ADD command that is passed to +// Builder.performCopy to copy files into the image filesystem +type copyInstruction struct { + cmdName string + infos []copyInfo + dest string + allowLocalDecompression bool +} + +// copier reads a raw COPY or ADD command, fetches remote sources using a downloader, +// and creates a copyInstruction +type copier struct { + imageSource *imageMount + source builder.Source + pathCache pathCache + download sourceDownloader + tmpPaths []string +} + +func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { + return copier{ + source: req.source, + pathCache: req.builder.pathCache, + download: download, + imageSource: imageSource, + } +} + +func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) { + inst := copyInstruction{cmdName: cmdName} + last := len(args) - 1 + + // Work in daemon-specific filepath semantics + inst.dest = filepath.FromSlash(args[last]) + + infos, err := o.getCopyInfosForSourcePaths(args[0:last]) + if err != nil { + return inst, errors.Wrapf(err, "%s failed", cmdName) + } + if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) { + return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + inst.infos = infos + return inst, nil +} + +// getCopyInfosForSourcePaths iterates over the source files and calculate the info +// needed to copy (e.g. hash value if cached) +func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error) { + var infos []copyInfo + for _, orig := range sources { + subinfos, err := o.getCopyInfoForSourcePath(orig) + if err != nil { + return nil, err + } + infos = append(infos, subinfos...) + } + + if len(infos) == 0 { + return nil, errors.New("no source files were specified") + } + return infos, nil +} + +func (o *copier) getCopyInfoForSourcePath(orig string) ([]copyInfo, error) { + if !urlutil.IsURL(orig) { + return o.calcCopyInfo(orig, true) + } + remote, path, err := o.download(orig) + if err != nil { + return nil, err + } + o.tmpPaths = append(o.tmpPaths, remote.Root()) + + hash, err := remote.Hash(path) + ci := newCopyInfoFromSource(remote, path, hash) + ci.noDecompress = true // data from http shouldn't be extracted even on ADD + return newCopyInfos(ci), err +} + +// Cleanup removes any temporary directories created as part of downloading +// remote files. +func (o *copier) Cleanup() { + for _, path := range o.tmpPaths { + os.RemoveAll(path) + } + o.tmpPaths = []string{} +} + +// TODO: allowWildcards can probably be removed by refactoring this function further. +func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) { + imageSource := o.imageSource + if err := validateCopySourcePath(imageSource, origPath); err != nil { + return nil, err + } + + // Work in daemon-specific OS filepath semantics + origPath = filepath.FromSlash(origPath) + origPath = strings.TrimPrefix(origPath, string(os.PathSeparator)) + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // TODO: do this when creating copier. Requires validateCopySourcePath + // (and other below) to be aware of the difference sources. Why is it only + // done on image Source? + if imageSource != nil { + var err error + o.source, err = imageSource.Source() + if err != nil { + return nil, errors.Wrapf(err, "failed to copy from %s", imageSource.ImageID()) + } + } + + if o.source == nil { + return nil, errors.Errorf("missing build context") + } + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + return o.copyWithWildcards(origPath) + } + + if imageSource != nil && imageSource.ImageID() != "" { + // return a cached copy if one exists + if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok { + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil + } + } + + // Deal with the single file case + copyInfo, err := copyInfoForFile(o.source, origPath) + switch { + case err != nil: + return nil, err + case copyInfo.hash != "": + o.storeInPathCache(imageSource, origPath, copyInfo.hash) + return newCopyInfos(copyInfo), err + } + + // TODO: remove, handle dirs in Hash() + subfiles, err := walkSource(o.source, origPath) + if err != nil { + return nil, err + } + + hash := hashStringSlice("dir", subfiles) + o.storeInPathCache(imageSource, origPath, hash) + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil +} + +func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { + if im != nil { + o.pathCache.Store(im.ImageID()+path, hash) + } +} + +func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { + var copyInfos []copyInfo + if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(o.source.Root(), path) + if err != nil { + return err + } + + if rel == "." { + return nil + } + if match, _ := filepath.Match(origPath, rel); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := o.calcCopyInfo(rel, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil +} + +func copyInfoForFile(source builder.Source, path string) (copyInfo, error) { + fi, err := remotecontext.StatAt(source, path) + if err != nil { + return copyInfo{}, err + } + + if fi.IsDir() { + return copyInfo{}, nil + } + hash, err := source.Hash(path) + if err != nil { + return copyInfo{}, err + } + return newCopyInfoFromSource(source, path, "file:"+hash), nil +} + +// TODO: dedupe with copyWithWildcards() +func walkSource(source builder.Source, origPath string) ([]string, error) { + fp, err := remotecontext.FullPath(source, origPath) + if err != nil { + return nil, err + } + // Must be a dir + var subfiles []string + err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(source.Root(), path) + if err != nil { + return err + } + if rel == "." { + return nil + } + hash, err := source.Hash(rel) + if err != nil { + return nil + } + // we already checked handleHash above + subfiles = append(subfiles, hash) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + return subfiles, nil +} + +type sourceDownloader func(string) (builder.Source, string, error) + +func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader { + return func(url string) (builder.Source, string, error) { + return downloadSource(output, stdout, url) + } +} + +func errOnSourceDownload(_ string) (builder.Source, string, error) { + return nil, "", errors.New("source can't be a URL for COPY") +} + +func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) { + u, err := url.Parse(srcURL) + if err != nil { + return + } + filename := filepath.Base(filepath.FromSlash(u.Path)) // Ensure in platform semantics + if filename == "" { + err = errors.Errorf("cannot determine filename from url: %s", u) + return + } + + resp, err := remotecontext.GetWithStatusError(srcURL) + if err != nil { + return + } + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + tmpFileName := filepath.Join(tmpDir, filename) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + progressOutput := streamformatter.NewJSONProgressOutput(output, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + // TODO: add filehash directly + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + // TODO: how important is this random blank line to the output? + fmt.Fprintln(stdout) + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + tmpFile.Close() + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + lc, err := remotecontext.NewLazySource(tmpDir) + return lc, filename, err +} + +type copyFileOptions struct { + decompress bool + archiver *archive.Archiver +} + +func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error { + srcPath, err := source.fullPath() + if err != nil { + return err + } + destPath, err := dest.fullPath() + if err != nil { + return err + } + + archiver := options.archiver + + src, err := os.Stat(srcPath) + if err != nil { + return errors.Wrapf(err, "source path not found") + } + if src.IsDir() { + return copyDirectory(archiver, srcPath, destPath) + } + if options.decompress && archive.IsArchivePath(srcPath) && !source.noDecompress { + return archiver.UntarPath(srcPath, destPath) + } + + destExistsAsDir, err := isExistingDirectory(destPath) + if err != nil { + return err + } + // dest.path must be used because destPath has already been cleaned of any + // trailing slash + if endsInSlash(dest.path) || destExistsAsDir { + // source.path must be used to get the correct filename when the source + // is a symlink + destPath = filepath.Join(destPath, filepath.Base(source.path)) + } + return copyFile(archiver, srcPath, destPath) +} + +func copyDirectory(archiver *archive.Archiver, source, dest string) error { + if err := archiver.CopyWithTar(source, dest); err != nil { + return errors.Wrapf(err, "failed to copy directory") + } + return fixPermissions(source, dest, archiver.IDMappings.RootPair()) +} + +func copyFile(archiver *archive.Archiver, source, dest string) error { + rootIDs := archiver.IDMappings.RootPair() + + if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0755, rootIDs); err != nil { + return errors.Wrapf(err, "failed to create new directory") + } + if err := archiver.CopyFileWithTar(source, dest); err != nil { + return errors.Wrapf(err, "failed to copy file") + } + return fixPermissions(source, dest, rootIDs) +} + +func endsInSlash(path string) bool { + return strings.HasSuffix(path, string(os.PathSeparator)) +} + +// isExistingDirectory returns true if the path exists and is a directory +func isExistingDirectory(path string) (bool, error) { + destStat, err := os.Stat(path) + switch { + case os.IsNotExist(err): + return false, nil + case err != nil: + return false, err + } + return destStat.IsDir(), nil +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go new file mode 100644 index 000000000..aee225b5f --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go @@ -0,0 +1,45 @@ +package dockerfile + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/stretchr/testify/assert" +) + +func TestIsExistingDirectory(t *testing.T) { + tmpfile := tempfile.NewTempFile(t, "file-exists-test", "something") + defer tmpfile.Remove() + tmpdir := tempfile.NewTempDir(t, "dir-exists-test") + defer tmpdir.Remove() + + var testcases = []struct { + doc string + path string + expected bool + }{ + { + doc: "directory exists", + path: tmpdir.Path, + expected: true, + }, + { + doc: "path doesn't exist", + path: "/bogus/path/does/not/exist", + expected: false, + }, + { + doc: "file exists", + path: tmpfile.Name(), + expected: false, + }, + } + + for _, testcase := range testcases { + result, err := isExistingDirectory(testcase.path) + if !assert.NoError(t, err) { + continue + } + assert.Equal(t, testcase.expected, result, testcase.doc) + } +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go new file mode 100644 index 000000000..326d95bb3 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package dockerfile + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/idtools" +) + +func fixPermissions(source, destination string, rootIDs idtools.IDPair) error { + skipChownRoot, err := isExistingDirectory(destination) + if err != nil { + return err + } + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if skipChownRoot && source == fullpath { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID) + }) +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go new file mode 100644 index 000000000..78f5b0945 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go @@ -0,0 +1,8 @@ +package dockerfile + +import "github.com/docker/docker/pkg/idtools" + +func fixPermissions(source, destination string, rootIDs idtools.IDPair) error { + // chown is not supported on Windows + return nil +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go index ad6aff67b..1f7424124 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -8,6 +8,7 @@ package dockerfile // package. import ( + "bytes" "fmt" "regexp" "runtime" @@ -18,11 +19,14 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" "github.com/docker/go-connections/nat" "github.com/pkg/errors" ) @@ -32,127 +36,104 @@ import ( // Sets the environment variable foo to bar, also makes interpolation // in the dockerfile available from the next statement on via ${foo}. // -func env(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { +func env(req dispatchRequest) error { + if len(req.args) == 0 { return errAtLeastOneArgument("ENV") } - if len(args)%2 != 0 { + if len(req.args)%2 != 0 { // should never get here, but just in case return errTooManyArguments("ENV") } - if err := b.flags.Parse(); err != nil { + if err := req.flags.Parse(); err != nil { return err } - // TODO/FIXME/NOT USED - // Just here to show how to use the builder flags stuff within the - // context of a builder command. Will remove once we actually add - // a builder command to something! - /* - flBool1 := b.flags.AddBool("bool1", false) - flStr1 := b.flags.AddString("str1", "HI") + runConfig := req.state.runConfig + commitMessage := bytes.NewBufferString("ENV") - if err := b.flags.Parse(); err != nil { - return err - } - - fmt.Printf("Bool1:%v\n", flBool1) - fmt.Printf("Str1:%v\n", flStr1) - */ - - commitStr := "ENV" - - for j := 0; j < len(args); j++ { - // name ==> args[j] - // value ==> args[j+1] - - if len(args[j]) == 0 { + for j := 0; j < len(req.args); j += 2 { + if len(req.args[j]) == 0 { return errBlankCommandNames("ENV") } - newVar := args[j] + "=" + args[j+1] + "" - commitStr += " " + newVar + name := req.args[j] + value := req.args[j+1] + newVar := name + "=" + value + commitMessage.WriteString(" " + newVar) gotOne := false - for i, envVar := range b.runConfig.Env { + for i, envVar := range runConfig.Env { envParts := strings.SplitN(envVar, "=", 2) compareFrom := envParts[0] - compareTo := args[j] - if runtime.GOOS == "windows" { - // Case insensitive environment variables on Windows - compareFrom = strings.ToUpper(compareFrom) - compareTo = strings.ToUpper(compareTo) - } - if compareFrom == compareTo { - b.runConfig.Env[i] = newVar + if equalEnvKeys(compareFrom, name) { + runConfig.Env[i] = newVar gotOne = true break } } if !gotOne { - b.runConfig.Env = append(b.runConfig.Env, newVar) + runConfig.Env = append(runConfig.Env, newVar) } - j++ } - return b.commit("", b.runConfig.Cmd, commitStr) + return req.builder.commit(req.state, commitMessage.String()) } // MAINTAINER some text // // Sets the maintainer metadata. -func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { +func maintainer(req dispatchRequest) error { + if len(req.args) != 1 { return errExactlyOneArgument("MAINTAINER") } - if err := b.flags.Parse(); err != nil { + if err := req.flags.Parse(); err != nil { return err } - b.maintainer = args[0] - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) + maintainer := req.args[0] + req.state.maintainer = maintainer + return req.builder.commit(req.state, "MAINTAINER "+maintainer) } // LABEL some json data describing the image // // Sets the Label variable foo to bar, // -func label(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { +func label(req dispatchRequest) error { + if len(req.args) == 0 { return errAtLeastOneArgument("LABEL") } - if len(args)%2 != 0 { + if len(req.args)%2 != 0 { // should never get here, but just in case return errTooManyArguments("LABEL") } - if err := b.flags.Parse(); err != nil { + if err := req.flags.Parse(); err != nil { return err } commitStr := "LABEL" + runConfig := req.state.runConfig - if b.runConfig.Labels == nil { - b.runConfig.Labels = map[string]string{} + if runConfig.Labels == nil { + runConfig.Labels = map[string]string{} } - for j := 0; j < len(args); j++ { - // name ==> args[j] - // value ==> args[j+1] - - if len(args[j]) == 0 { + for j := 0; j < len(req.args); j++ { + name := req.args[j] + if name == "" { return errBlankCommandNames("LABEL") } - newVar := args[j] + "=" + args[j+1] + "" - commitStr += " " + newVar + value := req.args[j+1] + commitStr += " " + name + "=" + value - b.runConfig.Labels[args[j]] = args[j+1] + runConfig.Labels[name] = value j++ } - return b.commit("", b.runConfig.Cmd, commitStr) + return req.builder.commit(req.state, commitStr) } // ADD foo /path @@ -160,108 +141,201 @@ func label(b *Builder, args []string, attributes map[string]bool, original strin // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling // exist here. If you do not wish to have this automatic handling, use COPY. // -func add(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) < 2 { +func add(req dispatchRequest) error { + if len(req.args) < 2 { return errAtLeastTwoArguments("ADD") } - if err := b.flags.Parse(); err != nil { + if err := req.flags.Parse(); err != nil { return err } - return b.runContextCommand(args, true, true, "ADD", nil) + downloader := newRemoteSourceDownloader(req.builder.Output, req.builder.Stdout) + copier := copierFromDispatchRequest(req, downloader, nil) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(req.args, "ADD") + if err != nil { + return err + } + copyInstruction.allowLocalDecompression = true + + return req.builder.performCopy(req.state, copyInstruction) } // COPY foo /path // // Same as 'ADD' but without the tar and remote url handling. // -func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) < 2 { +func dispatchCopy(req dispatchRequest) error { + if len(req.args) < 2 { return errAtLeastTwoArguments("COPY") } - flFrom := b.flags.AddString("from", "") - - if err := b.flags.Parse(); err != nil { + flFrom := req.flags.AddString("from", "") + if err := req.flags.Parse(); err != nil { return err } - var im *imageMount - if flFrom.IsUsed() { - var err error - im, err = b.imageContexts.get(flFrom.Value) - if err != nil { - return err - } + im, err := req.builder.getImageMount(flFrom) + if err != nil { + return errors.Wrapf(err, "invalid from flag value %s", flFrom.Value) } - return b.runContextCommand(args, false, false, "COPY", im) -} - -// FROM imagename -// -// This sets the image the dockerfile will build on top of. -// -func from(b *Builder, args []string, attributes map[string]bool, original string) error { - ctxName := "" - if len(args) == 3 && strings.EqualFold(args[1], "as") { - ctxName = strings.ToLower(args[2]) - if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", ctxName); !ok { - return errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", ctxName) - } - } else if len(args) != 1 { - return errExactlyOneArgument("FROM") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - substituionArgs := []string{} - for key, value := range b.buildArgs.GetAllMeta() { - substituionArgs = append(substituionArgs, key+"="+value) - } - - name, err := ProcessWord(args[0], substituionArgs, b.directive.EscapeToken) + copier := copierFromDispatchRequest(req, errOnSourceDownload, im) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(req.args, "COPY") if err != nil { return err } - var image builder.Image + return req.builder.performCopy(req.state, copyInstruction) +} - b.resetImageCache() - if _, err := b.imageContexts.new(ctxName, true); err != nil { +func (b *Builder) getImageMount(fromFlag *Flag) (*imageMount, error) { + if !fromFlag.IsUsed() { + // TODO: this could return the source in the default case as well? + return nil, nil + } + + var localOnly bool + imageRefOrID := fromFlag.Value + stage, err := b.buildStages.get(fromFlag.Value) + if err != nil { + return nil, err + } + if stage != nil { + imageRefOrID = stage.ImageID() + localOnly = true + } + return b.imageSources.Get(imageRefOrID, localOnly) +} + +// FROM imagename[:tag | @digest] [AS build-stage-name] +// +func from(req dispatchRequest) error { + stageName, err := parseBuildStageName(req.args) + if err != nil { return err } - if im, ok := b.imageContexts.byName[name]; ok { - if len(im.ImageID()) > 0 { - image = im + if err := req.flags.Parse(); err != nil { + return err + } + + req.builder.imageProber.Reset() + image, err := req.builder.getFromImage(req.shlex, req.args[0]) + if err != nil { + return err + } + if err := req.builder.buildStages.add(stageName, image); err != nil { + return err + } + req.state.beginStage(stageName, image) + req.builder.buildArgs.ResetAllowed() + if image.ImageID() == "" { + // Typically this means they used "FROM scratch" + return nil + } + + return processOnBuild(req) +} + +func parseBuildStageName(args []string) (string, error) { + stageName := "" + switch { + case len(args) == 3 && strings.EqualFold(args[1], "as"): + stageName = strings.ToLower(args[2]) + if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok { + return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName) } - } else { - // Windows cannot support a container with no base image. - if name == api.NoBaseImageSpecifier { - if runtime.GOOS == "windows" { - return errors.New("Windows does not support FROM scratch") + case len(args) != 1: + return "", errors.New("FROM requires either one or three arguments") + } + + return stageName, nil +} + +// scratchImage is used as a token for the empty base image. +var scratchImage builder.Image = &image.Image{} + +func (b *Builder) getFromImage(shlex *ShellLex, name string) (builder.Image, error) { + substitutionArgs := []string{} + for key, value := range b.buildArgs.GetAllMeta() { + substitutionArgs = append(substitutionArgs, key+"="+value) + } + + name, err := shlex.ProcessWord(name, substitutionArgs) + if err != nil { + return nil, err + } + + var localOnly bool + if stage, ok := b.buildStages.getByName(name); ok { + name = stage.ImageID() + localOnly = true + } + + // Windows cannot support a container with no base image unless it is LCOW. + if name == api.NoBaseImageSpecifier { + if runtime.GOOS == "windows" { + if b.platform == "windows" || (b.platform != "windows" && !system.LCOWSupported()) { + return nil, errors.New("Windows does not support FROM scratch") } - b.image = "" - b.noBaseImage = true - } else { - var err error - image, err = pullOrGetImage(b, name) - if err != nil { + } + return scratchImage, nil + } + imageMount, err := b.imageSources.Get(name, localOnly) + if err != nil { + return nil, err + } + return imageMount.Image(), nil +} + +func processOnBuild(req dispatchRequest) error { + dispatchState := req.state + // Process ONBUILD triggers if they exist + if nTriggers := len(dispatchState.runConfig.OnBuild); nTriggers != 0 { + word := "trigger" + if nTriggers > 1 { + word = "triggers" + } + fmt.Fprintf(req.builder.Stderr, "# Executing %d build %s...\n", nTriggers, word) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := dispatchState.runConfig.OnBuild + dispatchState.runConfig.OnBuild = []string{} + + // Reset stdin settings as all build actions run without stdin + dispatchState.runConfig.OpenStdin = false + dispatchState.runConfig.StdinOnce = false + + // parse the ONBUILD triggers by invoking the parser + for _, step := range onBuildTriggers { + dockerfile, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return err + } + + for _, n := range dockerfile.AST.Children { + if err := checkDispatch(n); err != nil { return err } + + upperCasedCmd := strings.ToUpper(n.Value) + switch upperCasedCmd { + case "ONBUILD": + return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return errors.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd) + } + } + + if _, err := dispatchFromDockerfile(req.builder, dockerfile, dispatchState, req.source); err != nil { + return err } } - if image != nil { - b.imageContexts.update(image.ImageID(), image.RunConfig()) - } - b.from = image - - b.buildArgs.ResetAllowed() - return b.processImageFrom(image) + return nil } // ONBUILD RUN echo yo @@ -273,16 +347,16 @@ func from(b *Builder, args []string, attributes map[string]bool, original string // special cases. search for 'OnBuild' in internals.go for additional special // cases. // -func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { +func onbuild(req dispatchRequest) error { + if len(req.args) == 0 { return errAtLeastOneArgument("ONBUILD") } - if err := b.flags.Parse(); err != nil { + if err := req.flags.Parse(); err != nil { return err } - triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0])) switch triggerInstruction { case "ONBUILD": return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") @@ -290,29 +364,30 @@ func onbuild(b *Builder, args []string, attributes map[string]bool, original str return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) } - original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") - - b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) + runConfig := req.state.runConfig + original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "") + runConfig.OnBuild = append(runConfig.OnBuild, original) + return req.builder.commit(req.state, "ONBUILD "+original) } // WORKDIR /tmp // // Set the working directory for future RUN/CMD/etc statements. // -func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { +func workdir(req dispatchRequest) error { + if len(req.args) != 1 { return errExactlyOneArgument("WORKDIR") } - err := b.flags.Parse() + err := req.flags.Parse() if err != nil { return err } + runConfig := req.state.runConfig // This is from the Dockerfile and will not necessarily be in platform // specific semantics, hence ensure it is converted. - b.runConfig.WorkingDir, err = normaliseWorkdir(b.runConfig.WorkingDir, args[0]) + runConfig.WorkingDir, err = normaliseWorkdir(runConfig.WorkingDir, req.args[0]) if err != nil { return err } @@ -321,39 +396,23 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str // This avoids having an unnecessary expensive mount/unmount calls // (on Windows in particular) during each container create. // Prior to 1.13, the mkdir was deferred and not executed at this step. - if b.disableCommit { + if req.builder.disableCommit { // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". // We've already updated the runConfig and that's enough. return nil } - b.runConfig.Image = b.image - cmd := b.runConfig.Cmd - comment := "WORKDIR " + b.runConfig.WorkingDir - // reset the command for cache detection - b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) "+comment)) - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - - if hit, err := b.probeCache(); err != nil { - return err - } else if hit { - return nil - } - - container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ - Config: b.runConfig, - // Set a log config to override any default value set on the daemon - HostConfig: &container.HostConfig{LogConfig: defaultLogConfig}, - }) - if err != nil { + comment := "WORKDIR " + runConfig.WorkingDir + runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, req.builder.platform)) + containerID, err := req.builder.probeAndCreate(req.state, runConfigWithCommentCmd) + if err != nil || containerID == "" { return err } - b.tmpContainers[container.ID] = struct{}{} - if err := b.docker.ContainerCreateWorkdir(container.ID); err != nil { + if err := req.builder.docker.ContainerCreateWorkdir(containerID); err != nil { return err } - return b.commit(container.ID, cmd, comment) + return req.builder.commitContainer(req.state, containerID, runConfigWithCommentCmd) } // RUN some command yo @@ -362,96 +421,85 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str // the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under // Windows, in the event there is only one argument The difference in processing: // -// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # sh -c echo hi (Linux and LCOW) // RUN echo hi # cmd /S /C echo hi (Windows) // RUN [ "echo", "hi" ] # echo hi // -func run(b *Builder, args []string, attributes map[string]bool, original string) error { - if !b.hasFromImage() { +func run(req dispatchRequest) error { + if !req.state.hasFromImage() { return errors.New("Please provide a source image with `from` prior to run") } - if err := b.flags.Parse(); err != nil { + if err := req.flags.Parse(); err != nil { return err } - args = handleJSONArgs(args, attributes) - - if !attributes["json"] { - args = append(getShell(b.runConfig), args...) + stateRunConfig := req.state.runConfig + args := handleJSONArgs(req.args, req.attributes) + if !req.attributes["json"] { + args = append(getShell(stateRunConfig, req.builder.platform), args...) } - config := &container.Config{ - Cmd: strslice.StrSlice(args), - Image: b.image, + cmdFromArgs := strslice.StrSlice(args) + buildArgs := req.builder.buildArgs.FilterAllowed(stateRunConfig.Env) + + saveCmd := cmdFromArgs + if len(buildArgs) > 0 { + saveCmd = prependEnvOnCmd(req.builder.buildArgs, buildArgs, cmdFromArgs) } - // stash the cmd - cmd := b.runConfig.Cmd - if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 { - b.runConfig.Cmd = config.Cmd - } - - // stash the config environment - env := b.runConfig.Env - - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - defer func(env []string) { b.runConfig.Env = env }(env) - - cmdBuildEnv := b.buildArgsWithoutConfigEnv() - - // derive the command to use for probeCache() and to commit in this container. - // Note that we only do this if there are any build-time env vars. Also, we - // use the special argument "|#" at the start of the args array. This will - // avoid conflicts with any RUN command since commands can not - // start with | (vertical bar). The "#" (number of build envs) is there to - // help ensure proper cache matches. We don't want a RUN command - // that starts with "foo=abc" to be considered part of a build-time env var. - saveCmd := config.Cmd - if len(cmdBuildEnv) > 0 { - saveCmd = prependEnvOnCmd(b.buildArgs, cmdBuildEnv, saveCmd) - } - - b.runConfig.Cmd = saveCmd - hit, err := b.probeCache() - if err != nil { + runConfigForCacheProbe := copyRunConfig(stateRunConfig, + withCmd(saveCmd), + withEntrypointOverride(saveCmd, nil)) + hit, err := req.builder.probeCache(req.state, runConfigForCacheProbe) + if err != nil || hit { return err } - if hit { - return nil - } - // set Cmd manually, this is special case only for Dockerfiles - b.runConfig.Cmd = config.Cmd - // set build-time environment for 'run'. - b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) + runConfig := copyRunConfig(stateRunConfig, + withCmd(cmdFromArgs), + withEnv(append(stateRunConfig.Env, buildArgs...)), + withEntrypointOverride(saveCmd, strslice.StrSlice{""})) + // set config as already being escaped, this prevents double escaping on windows - b.runConfig.ArgsEscaped = true + runConfig.ArgsEscaped = true - logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) - - cID, err := b.create() + logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) + cID, err := req.builder.create(runConfig) if err != nil { return err } - - if err := b.run(cID); err != nil { + if err := req.builder.containerManager.Run(req.builder.clientCtx, cID, req.builder.Stdout, req.builder.Stderr); err != nil { + if err, ok := err.(*statusCodeError); ok { + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + return &jsonmessage.JSONError{ + Message: fmt.Sprintf( + "The command '%s' returned a non-zero code: %d", + strings.Join(runConfig.Cmd, " "), err.StatusCode()), + Code: err.StatusCode(), + } + } return err } - // revert to original config environment and set the command string to - // have the build-time env vars in it (if any) so that future cache look-ups - // properly match it. - b.runConfig.Env = env - - b.runConfig.Cmd = saveCmd - return b.commit(cID, cmd, "run") + return req.builder.commitContainer(req.state, cID, runConfigForCacheProbe) } +// Derive the command to use for probeCache() and to commit in this container. +// Note that we only do this if there are any build-time env vars. Also, we +// use the special argument "|#" at the start of the args array. This will +// avoid conflicts with any RUN command since commands can not +// start with | (vertical bar). The "#" (number of build envs) is there to +// help ensure proper cache matches. We don't want a RUN command +// that starts with "foo=abc" to be considered part of a build-time env var. +// +// remove any unreferenced built-in args from the environment variables. +// These args are transparent so resulting image should be the same regardless +// of the value. func prependEnvOnCmd(buildArgs *buildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice { var tmpBuildEnv []string for _, env := range buildArgVars { key := strings.SplitN(env, "=", 2)[0] - if !buildArgs.IsUnreferencedBuiltin(key) { + if buildArgs.IsReferencedOrNotBuiltin(key) { tmpBuildEnv = append(tmpBuildEnv, env) } } @@ -466,34 +514,34 @@ func prependEnvOnCmd(buildArgs *buildArgs, buildArgVars []string, cmd strslice.S // Set the default command to run in the container (which may be empty). // Argument handling is the same as RUN. // -func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { +func cmd(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { return err } - cmdSlice := handleJSONArgs(args, attributes) - - if !attributes["json"] { - cmdSlice = append(getShell(b.runConfig), cmdSlice...) + runConfig := req.state.runConfig + cmdSlice := handleJSONArgs(req.args, req.attributes) + if !req.attributes["json"] { + cmdSlice = append(getShell(runConfig, req.builder.platform), cmdSlice...) } - b.runConfig.Cmd = strslice.StrSlice(cmdSlice) + runConfig.Cmd = strslice.StrSlice(cmdSlice) // set config as already being escaped, this prevents double escaping on windows - b.runConfig.ArgsEscaped = true + runConfig.ArgsEscaped = true - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + if err := req.builder.commit(req.state, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { return err } - if len(args) != 0 { - b.cmdSet = true + if len(req.args) != 0 { + req.state.cmdSet = true } return nil } // parseOptInterval(flag) is the duration of flag.Value, or 0 if -// empty. An error is reported if the value is given and less than 1 second. +// empty. An error is reported if the value is given and less than minimum duration. func parseOptInterval(f *Flag) (time.Duration, error) { s := f.Value if s == "" { @@ -503,8 +551,8 @@ func parseOptInterval(f *Flag) (time.Duration, error) { if err != nil { return 0, err } - if d < time.Duration(time.Second) { - return 0, fmt.Errorf("Interval %#v cannot be less than 1 second", f.name) + if d < time.Duration(container.MinimumDuration) { + return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) } return d, nil } @@ -514,47 +562,48 @@ func parseOptInterval(f *Flag) (time.Duration, error) { // Set the default healthcheck command to run in the container (which may be empty). // Argument handling is the same as RUN. // -func healthcheck(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { +func healthcheck(req dispatchRequest) error { + if len(req.args) == 0 { return errAtLeastOneArgument("HEALTHCHECK") } - typ := strings.ToUpper(args[0]) - args = args[1:] + runConfig := req.state.runConfig + typ := strings.ToUpper(req.args[0]) + args := req.args[1:] if typ == "NONE" { if len(args) != 0 { return errors.New("HEALTHCHECK NONE takes no arguments") } test := strslice.StrSlice{typ} - b.runConfig.Healthcheck = &container.HealthConfig{ + runConfig.Healthcheck = &container.HealthConfig{ Test: test, } } else { - if b.runConfig.Healthcheck != nil { - oldCmd := b.runConfig.Healthcheck.Test + if runConfig.Healthcheck != nil { + oldCmd := runConfig.Healthcheck.Test if len(oldCmd) > 0 && oldCmd[0] != "NONE" { - fmt.Fprintf(b.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) + fmt.Fprintf(req.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) } } healthcheck := container.HealthConfig{} - flInterval := b.flags.AddString("interval", "") - flTimeout := b.flags.AddString("timeout", "") - flStartPeriod := b.flags.AddString("start-period", "") - flRetries := b.flags.AddString("retries", "") + flInterval := req.flags.AddString("interval", "") + flTimeout := req.flags.AddString("timeout", "") + flStartPeriod := req.flags.AddString("start-period", "") + flRetries := req.flags.AddString("retries", "") - if err := b.flags.Parse(); err != nil { + if err := req.flags.Parse(); err != nil { return err } switch typ { case "CMD": - cmdSlice := handleJSONArgs(args, attributes) + cmdSlice := handleJSONArgs(args, req.attributes) if len(cmdSlice) == 0 { return errors.New("Missing command after HEALTHCHECK CMD") } - if !attributes["json"] { + if !req.attributes["json"] { typ = "CMD-SHELL" } @@ -594,10 +643,10 @@ func healthcheck(b *Builder, args []string, attributes map[string]bool, original healthcheck.Retries = 0 } - b.runConfig.Healthcheck = &healthcheck + runConfig.Healthcheck = &healthcheck } - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("HEALTHCHECK %q", b.runConfig.Healthcheck)) + return req.builder.commit(req.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck)) } // ENTRYPOINT /usr/sbin/nginx @@ -605,59 +654,57 @@ func healthcheck(b *Builder, args []string, attributes map[string]bool, original // Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments // to /usr/sbin/nginx. Uses the default shell if not in JSON format. // -// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint -// is initialized at NewBuilder time instead of through argument parsing. +// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint +// is initialized at newBuilder time instead of through argument parsing. // -func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { +func entrypoint(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { return err } - parsed := handleJSONArgs(args, attributes) + runConfig := req.state.runConfig + parsed := handleJSONArgs(req.args, req.attributes) switch { - case attributes["json"]: + case req.attributes["json"]: // ENTRYPOINT ["echo", "hi"] - b.runConfig.Entrypoint = strslice.StrSlice(parsed) + runConfig.Entrypoint = strslice.StrSlice(parsed) case len(parsed) == 0: // ENTRYPOINT [] - b.runConfig.Entrypoint = nil + runConfig.Entrypoint = nil default: // ENTRYPOINT echo hi - b.runConfig.Entrypoint = strslice.StrSlice(append(getShell(b.runConfig), parsed[0])) + runConfig.Entrypoint = strslice.StrSlice(append(getShell(runConfig, req.builder.platform), parsed[0])) } // when setting the entrypoint if a CMD was not explicitly set then // set the command to nil - if !b.cmdSet { - b.runConfig.Cmd = nil + if !req.state.cmdSet { + runConfig.Cmd = nil } - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { - return err - } - - return nil + return req.builder.commit(req.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint)) } // EXPOSE 6667/tcp 7000/tcp // // Expose ports for links and port mappings. This all ends up in -// b.runConfig.ExposedPorts for runconfig. +// req.runConfig.ExposedPorts for runconfig. // -func expose(b *Builder, args []string, attributes map[string]bool, original string) error { - portsTab := args +func expose(req dispatchRequest) error { + portsTab := req.args - if len(args) == 0 { + if len(req.args) == 0 { return errAtLeastOneArgument("EXPOSE") } - if err := b.flags.Parse(); err != nil { + if err := req.flags.Parse(); err != nil { return err } - if b.runConfig.ExposedPorts == nil { - b.runConfig.ExposedPorts = make(nat.PortSet) + runConfig := req.state.runConfig + if runConfig.ExposedPorts == nil { + runConfig.ExposedPorts = make(nat.PortSet) } ports, _, err := nat.ParsePortSpecs(portsTab) @@ -671,14 +718,14 @@ func expose(b *Builder, args []string, attributes map[string]bool, original stri portList := make([]string, len(ports)) var i int for port := range ports { - if _, exists := b.runConfig.ExposedPorts[port]; !exists { - b.runConfig.ExposedPorts[port] = struct{}{} + if _, exists := runConfig.ExposedPorts[port]; !exists { + runConfig.ExposedPorts[port] = struct{}{} } portList[i] = string(port) i++ } sort.Strings(portList) - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) + return req.builder.commit(req.state, "EXPOSE "+strings.Join(portList, " ")) } // USER foo @@ -686,64 +733,62 @@ func expose(b *Builder, args []string, attributes map[string]bool, original stri // Set the user to 'foo' for future commands and when running the // ENTRYPOINT/CMD at container run time. // -func user(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { +func user(req dispatchRequest) error { + if len(req.args) != 1 { return errExactlyOneArgument("USER") } - if err := b.flags.Parse(); err != nil { + if err := req.flags.Parse(); err != nil { return err } - b.runConfig.User = args[0] - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) + req.state.runConfig.User = req.args[0] + return req.builder.commit(req.state, fmt.Sprintf("USER %v", req.args)) } // VOLUME /foo // // Expose the volume /foo for use. Will also accept the JSON array form. // -func volume(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { +func volume(req dispatchRequest) error { + if len(req.args) == 0 { return errAtLeastOneArgument("VOLUME") } - if err := b.flags.Parse(); err != nil { + if err := req.flags.Parse(); err != nil { return err } - if b.runConfig.Volumes == nil { - b.runConfig.Volumes = map[string]struct{}{} + runConfig := req.state.runConfig + if runConfig.Volumes == nil { + runConfig.Volumes = map[string]struct{}{} } - for _, v := range args { + for _, v := range req.args { v = strings.TrimSpace(v) if v == "" { return errors.New("VOLUME specified can not be an empty string") } - b.runConfig.Volumes[v] = struct{}{} + runConfig.Volumes[v] = struct{}{} } - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { - return err - } - return nil + return req.builder.commit(req.state, fmt.Sprintf("VOLUME %v", req.args)) } // STOPSIGNAL signal // // Set the signal that will be used to kill the container. -func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { +func stopSignal(req dispatchRequest) error { + if len(req.args) != 1 { return errExactlyOneArgument("STOPSIGNAL") } - sig := args[0] + sig := req.args[0] _, err := signal.ParseSignal(sig) if err != nil { return err } - b.runConfig.StopSignal = sig - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) + req.state.runConfig.StopSignal = sig + return req.builder.commit(req.state, fmt.Sprintf("STOPSIGNAL %v", req.args)) } // ARG name[=value] @@ -751,8 +796,8 @@ func stopSignal(b *Builder, args []string, attributes map[string]bool, original // Adds the variable foo to the trusted list of variables that can be passed // to builder using the --build-arg flag for expansion/substitution or passing to 'run'. // Dockerfile author may optionally set a default value of this variable. -func arg(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { +func arg(req dispatchRequest) error { + if len(req.args) != 1 { return errExactlyOneArgument("ARG") } @@ -762,7 +807,7 @@ func arg(b *Builder, args []string, attributes map[string]bool, original string) hasDefault bool ) - arg := args[0] + arg := req.args[0] // 'arg' can just be a name or name-value pair. Note that this is different // from 'env' that handles the split of name and value at the parser level. // The reason for doing it differently for 'arg' is that we support just @@ -786,36 +831,36 @@ func arg(b *Builder, args []string, attributes map[string]bool, original string) if hasDefault { value = &newValue } - b.buildArgs.AddArg(name, value) + req.builder.buildArgs.AddArg(name, value) // Arg before FROM doesn't add a layer - if !b.hasFromImage() { - b.buildArgs.AddMetaArg(name, value) + if !req.state.hasFromImage() { + req.builder.buildArgs.AddMetaArg(name, value) return nil } - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) + return req.builder.commit(req.state, "ARG "+arg) } // SHELL powershell -command // // Set the non-default shell to use. -func shell(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { +func shell(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { return err } - shellSlice := handleJSONArgs(args, attributes) + shellSlice := handleJSONArgs(req.args, req.attributes) switch { case len(shellSlice) == 0: // SHELL [] return errAtLeastOneArgument("SHELL") - case attributes["json"]: + case req.attributes["json"]: // SHELL ["powershell", "-command"] - b.runConfig.Shell = strslice.StrSlice(shellSlice) + req.state.runConfig.Shell = strslice.StrSlice(shellSlice) default: // SHELL powershell -command - not JSON - return errNotJSON("SHELL", original) + return errNotJSON("SHELL", req.original) } - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("SHELL %v", shellSlice)) + return req.builder.commit(req.state, fmt.Sprintf("SHELL %v", shellSlice)) } func errAtLeastOneArgument(command string) error { @@ -837,42 +882,3 @@ func errBlankCommandNames(command string) error { func errTooManyArguments(command string) error { return fmt.Errorf("Bad input to %s, too many arguments", command) } - -// getShell is a helper function which gets the right shell for prefixing the -// shell-form of RUN, ENTRYPOINT and CMD instructions -func getShell(c *container.Config) []string { - if 0 == len(c.Shell) { - return append([]string{}, defaultShell[:]...) - } - return append([]string{}, c.Shell[:]...) -} - -// mountByRef creates an imageMount from a reference. pulling the image if needed. -func mountByRef(b *Builder, name string) (*imageMount, error) { - image, err := pullOrGetImage(b, name) - if err != nil { - return nil, err - } - im, err := b.imageContexts.new("", false) - if err != nil { - return nil, err - } - im.id = image.ImageID() - return im, nil -} - -func pullOrGetImage(b *Builder, name string) (builder.Image, error) { - var image builder.Image - if !b.options.PullParent { - image, _ = b.docker.GetImageOnBuild(name) - // TODO: shouldn't we error out if error is different from "not found" ? - } - if image == nil { - var err error - image, err = b.docker.PullOnBuild(b.clientCtx, name, b.options.AuthConfigs, b.Output) - if err != nil { - return nil, err - } - } - return image, nil -} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go index 77506d712..b3672fce1 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go @@ -3,15 +3,22 @@ package dockerfile import ( "fmt" "runtime" - "strings" "testing" + "bytes" + "context" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/builder" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/testutil" "github.com/docker/go-connections/nat" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type commandWithFunction struct { @@ -19,155 +26,153 @@ type commandWithFunction struct { function func(args []string) error } +func withArgs(f dispatcher) func([]string) error { + return func(args []string) error { + return f(dispatchRequest{args: args}) + } +} + +func withBuilderAndArgs(builder *Builder, f dispatcher) func([]string) error { + return func(args []string) error { + return f(defaultDispatchReq(builder, args...)) + } +} + +func defaultDispatchReq(builder *Builder, args ...string) dispatchRequest { + return dispatchRequest{ + builder: builder, + args: args, + flags: NewBFlags(), + shlex: NewShellLex(parser.DefaultEscapeToken), + state: &dispatchState{runConfig: &container.Config{}}, + } +} + +func newBuilderWithMockBackend() *Builder { + mockBackend := &MockBackend{} + ctx := context.Background() + b := &Builder{ + options: &types.ImageBuildOptions{}, + docker: mockBackend, + buildArgs: newBuildArgs(make(map[string]*string)), + Stdout: new(bytes.Buffer), + clientCtx: ctx, + disableCommit: true, + imageSources: newImageSources(ctx, builderOptions{ + Options: &types.ImageBuildOptions{}, + Backend: mockBackend, + }), + buildStages: newBuildStages(), + imageProber: newImageProber(mockBackend, nil, runtime.GOOS, false), + containerManager: newContainerManager(mockBackend), + } + return b +} + func TestCommandsExactlyOneArgument(t *testing.T) { commands := []commandWithFunction{ - {"MAINTAINER", func(args []string) error { return maintainer(nil, args, nil, "") }}, - {"FROM", func(args []string) error { return from(nil, args, nil, "") }}, - {"WORKDIR", func(args []string) error { return workdir(nil, args, nil, "") }}, - {"USER", func(args []string) error { return user(nil, args, nil, "") }}, - {"STOPSIGNAL", func(args []string) error { return stopSignal(nil, args, nil, "") }}} + {"MAINTAINER", withArgs(maintainer)}, + {"WORKDIR", withArgs(workdir)}, + {"USER", withArgs(user)}, + {"STOPSIGNAL", withArgs(stopSignal)}, + } for _, command := range commands { err := command.function([]string{}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := errExactlyOneArgument(command.name) - - if err.Error() != expectedError.Error() { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } + assert.EqualError(t, err, errExactlyOneArgument(command.name).Error()) } } func TestCommandsAtLeastOneArgument(t *testing.T) { commands := []commandWithFunction{ - {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, - {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}, - {"ONBUILD", func(args []string) error { return onbuild(nil, args, nil, "") }}, - {"HEALTHCHECK", func(args []string) error { return healthcheck(nil, args, nil, "") }}, - {"EXPOSE", func(args []string) error { return expose(nil, args, nil, "") }}, - {"VOLUME", func(args []string) error { return volume(nil, args, nil, "") }}} + {"ENV", withArgs(env)}, + {"LABEL", withArgs(label)}, + {"ONBUILD", withArgs(onbuild)}, + {"HEALTHCHECK", withArgs(healthcheck)}, + {"EXPOSE", withArgs(expose)}, + {"VOLUME", withArgs(volume)}, + } for _, command := range commands { err := command.function([]string{}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := errAtLeastOneArgument(command.name) - - if err.Error() != expectedError.Error() { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } + assert.EqualError(t, err, errAtLeastOneArgument(command.name).Error()) } } func TestCommandsAtLeastTwoArguments(t *testing.T) { commands := []commandWithFunction{ - {"ADD", func(args []string) error { return add(nil, args, nil, "") }}, - {"COPY", func(args []string) error { return dispatchCopy(nil, args, nil, "") }}} + {"ADD", withArgs(add)}, + {"COPY", withArgs(dispatchCopy)}} for _, command := range commands { err := command.function([]string{"arg1"}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := errAtLeastTwoArguments(command.name) - - if err.Error() != expectedError.Error() { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } + assert.EqualError(t, err, errAtLeastTwoArguments(command.name).Error()) } } func TestCommandsTooManyArguments(t *testing.T) { commands := []commandWithFunction{ - {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, - {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}} + {"ENV", withArgs(env)}, + {"LABEL", withArgs(label)}} for _, command := range commands { err := command.function([]string{"arg1", "arg2", "arg3"}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := errTooManyArguments(command.name) - - if err.Error() != expectedError.Error() { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } + assert.EqualError(t, err, errTooManyArguments(command.name).Error()) } } -func TestCommandseBlankNames(t *testing.T) { - bflags := &BFlags{} - config := &container.Config{} - - b := &Builder{flags: bflags, runConfig: config, disableCommit: true} - +func TestCommandsBlankNames(t *testing.T) { + builder := newBuilderWithMockBackend() commands := []commandWithFunction{ - {"ENV", func(args []string) error { return env(b, args, nil, "") }}, - {"LABEL", func(args []string) error { return label(b, args, nil, "") }}, + {"ENV", withBuilderAndArgs(builder, env)}, + {"LABEL", withBuilderAndArgs(builder, label)}, } for _, command := range commands { err := command.function([]string{"", ""}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := errBlankCommandNames(command.name) - - if err.Error() != expectedError.Error() { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } + assert.EqualError(t, err, errBlankCommandNames(command.name).Error()) } } func TestEnv2Variables(t *testing.T) { - variables := []string{"var1", "val1", "var2", "val2"} + b := newBuilderWithMockBackend() - bflags := &BFlags{} - config := &container.Config{} + args := []string{"var1", "val1", "var2", "val2"} + req := defaultDispatchReq(b, args...) + err := env(req) + require.NoError(t, err) - b := &Builder{flags: bflags, runConfig: config, disableCommit: true} - - if err := env(b, variables, nil, ""); err != nil { - t.Fatalf("Error when executing env: %s", err.Error()) + expected := []string{ + fmt.Sprintf("%s=%s", args[0], args[1]), + fmt.Sprintf("%s=%s", args[2], args[3]), } + assert.Equal(t, expected, req.state.runConfig.Env) +} - expectedVar1 := fmt.Sprintf("%s=%s", variables[0], variables[1]) - expectedVar2 := fmt.Sprintf("%s=%s", variables[2], variables[3]) +func TestEnvValueWithExistingRunConfigEnv(t *testing.T) { + b := newBuilderWithMockBackend() - if b.runConfig.Env[0] != expectedVar1 { - t.Fatalf("Wrong env output for first variable. Got: %s. Should be: %s", b.runConfig.Env[0], expectedVar1) - } - - if b.runConfig.Env[1] != expectedVar2 { - t.Fatalf("Wrong env output for second variable. Got: %s, Should be: %s", b.runConfig.Env[1], expectedVar2) + args := []string{"var1", "val1"} + req := defaultDispatchReq(b, args...) + req.state.runConfig.Env = []string{"var1=old", "var2=fromenv"} + err := env(req) + require.NoError(t, err) + + expected := []string{ + fmt.Sprintf("%s=%s", args[0], args[1]), + "var2=fromenv", } + assert.Equal(t, expected, req.state.runConfig.Env) } func TestMaintainer(t *testing.T) { maintainerEntry := "Some Maintainer " - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - if err := maintainer(b, []string{maintainerEntry}, nil, ""); err != nil { - t.Fatalf("Error when executing maintainer: %s", err.Error()) - } - - if b.maintainer != maintainerEntry { - t.Fatalf("Maintainer in builder should be set to %s. Got: %s", maintainerEntry, b.maintainer) - } + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, maintainerEntry) + err := maintainer(req) + require.NoError(t, err) + assert.Equal(t, maintainerEntry, req.state.maintainer) } func TestLabel(t *testing.T) { @@ -175,83 +180,87 @@ func TestLabel(t *testing.T) { labelValue := "value" labelEntry := []string{labelName, labelValue} + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, labelEntry...) + err := label(req) + require.NoError(t, err) - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - if err := label(b, labelEntry, nil, ""); err != nil { - t.Fatalf("Error when executing label: %s", err.Error()) - } - - if val, ok := b.runConfig.Labels[labelName]; ok { - if val != labelValue { - t.Fatalf("Label %s should have value %s, had %s instead", labelName, labelValue, val) - } - } else { - t.Fatalf("Label %s should be present but it is not", labelName) - } -} - -func newBuilderWithMockBackend() *Builder { - b := &Builder{ - flags: &BFlags{}, - runConfig: &container.Config{}, - options: &types.ImageBuildOptions{}, - docker: &MockBackend{}, - buildArgs: newBuildArgs(make(map[string]*string)), - } - b.imageContexts = &imageContexts{b: b} - return b + require.Contains(t, req.state.runConfig.Labels, labelName) + assert.Equal(t, req.state.runConfig.Labels[labelName], labelValue) } func TestFromScratch(t *testing.T) { b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, "scratch") + err := from(req) - err := from(b, []string{"scratch"}, nil, "") - - if runtime.GOOS == "windows" { - assert.Error(t, err, "Windows does not support FROM scratch") + if runtime.GOOS == "windows" && !system.LCOWSupported() { + assert.EqualError(t, err, "Windows does not support FROM scratch") return } - assert.NilError(t, err) - assert.Equal(t, b.image, "") - assert.Equal(t, b.noBaseImage, true) + require.NoError(t, err) + assert.True(t, req.state.hasFromImage()) + assert.Equal(t, "", req.state.imageID) + // Windows does not set the default path. TODO @jhowardmsft LCOW support. This will need revisiting as we get further into the implementation + expected := "PATH=" + system.DefaultPathEnv(runtime.GOOS) + if runtime.GOOS == "windows" { + expected = "" + } + assert.Equal(t, []string{expected}, req.state.runConfig.Env) } func TestFromWithArg(t *testing.T) { tag, expected := ":sometag", "expectedthisid" - getImage := func(name string) (builder.Image, error) { - assert.Equal(t, name, "alpine"+tag) - return &mockImage{id: "expectedthisid"}, nil + getImage := func(name string) (builder.Image, builder.ReleaseableLayer, error) { + assert.Equal(t, "alpine"+tag, name) + return &mockImage{id: "expectedthisid"}, nil, nil } b := newBuilderWithMockBackend() - b.docker.(*MockBackend).getImageOnBuildFunc = getImage + b.docker.(*MockBackend).getImageFunc = getImage - assert.NilError(t, arg(b, []string{"THETAG=" + tag}, nil, "")) - err := from(b, []string{"alpine${THETAG}"}, nil, "") + require.NoError(t, arg(defaultDispatchReq(b, "THETAG="+tag))) + req := defaultDispatchReq(b, "alpine${THETAG}") + err := from(req) - assert.NilError(t, err) - assert.Equal(t, b.image, expected) - assert.Equal(t, b.from.ImageID(), expected) - assert.Equal(t, len(b.buildArgs.GetAllAllowed()), 0) - assert.Equal(t, len(b.buildArgs.GetAllMeta()), 1) + require.NoError(t, err) + assert.Equal(t, expected, req.state.imageID) + assert.Equal(t, expected, req.state.baseImage.ImageID()) + assert.Len(t, b.buildArgs.GetAllAllowed(), 0) + assert.Len(t, b.buildArgs.GetAllMeta(), 1) } func TestFromWithUndefinedArg(t *testing.T) { tag, expected := "sometag", "expectedthisid" - getImage := func(name string) (builder.Image, error) { - assert.Equal(t, name, "alpine") - return &mockImage{id: "expectedthisid"}, nil + getImage := func(name string) (builder.Image, builder.ReleaseableLayer, error) { + assert.Equal(t, "alpine", name) + return &mockImage{id: "expectedthisid"}, nil, nil } b := newBuilderWithMockBackend() - b.docker.(*MockBackend).getImageOnBuildFunc = getImage + b.docker.(*MockBackend).getImageFunc = getImage b.options.BuildArgs = map[string]*string{"THETAG": &tag} - err := from(b, []string{"alpine${THETAG}"}, nil, "") - assert.NilError(t, err) - assert.Equal(t, b.image, expected) + req := defaultDispatchReq(b, "alpine${THETAG}") + err := from(req) + require.NoError(t, err) + assert.Equal(t, expected, req.state.imageID) +} + +func TestFromMultiStageWithScratchNamedStage(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Windows does not support scratch") + } + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, "scratch", "AS", "base") + + require.NoError(t, from(req)) + assert.True(t, req.state.hasFromImage()) + + req.args = []string{"base"} + require.NoError(t, from(req)) + assert.True(t, req.state.hasFromImage()) } func TestOnbuildIllegalTriggers(t *testing.T) { @@ -261,237 +270,147 @@ func TestOnbuildIllegalTriggers(t *testing.T) { {"FROM", "FROM isn't allowed as an ONBUILD trigger"}} for _, trigger := range triggers { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() - err := onbuild(b, []string{trigger.command}, nil, "") - - if err == nil { - t.Fatal("Error should not be nil") - } - - if !strings.Contains(err.Error(), trigger.expectedError) { - t.Fatalf("Error message not correct. Should be: %s, got: %s", trigger.expectedError, err.Error()) - } + err := onbuild(defaultDispatchReq(b, trigger.command)) + testutil.ErrorContains(t, err, trigger.expectedError) } } func TestOnbuild(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() - err := onbuild(b, []string{"ADD", ".", "/app/src"}, nil, "ONBUILD ADD . /app/src") + req := defaultDispatchReq(b, "ADD", ".", "/app/src") + req.original = "ONBUILD ADD . /app/src" + req.state.runConfig = &container.Config{} - if err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - expectedOnbuild := "ADD . /app/src" - - if b.runConfig.OnBuild[0] != expectedOnbuild { - t.Fatalf("Wrong ONBUILD command. Expected: %s, got: %s", expectedOnbuild, b.runConfig.OnBuild[0]) - } + err := onbuild(req) + require.NoError(t, err) + assert.Equal(t, "ADD . /app/src", req.state.runConfig.OnBuild[0]) } func TestWorkdir(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - + b := newBuilderWithMockBackend() workingDir := "/app" - if runtime.GOOS == "windows" { workingDir = "C:\app" } - err := workdir(b, []string{workingDir}, nil, "") - - if err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - if b.runConfig.WorkingDir != workingDir { - t.Fatalf("WorkingDir should be set to %s, got %s", workingDir, b.runConfig.WorkingDir) - } - + req := defaultDispatchReq(b, workingDir) + err := workdir(req) + require.NoError(t, err) + assert.Equal(t, workingDir, req.state.runConfig.WorkingDir) } func TestCmd(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - + b := newBuilderWithMockBackend() command := "./executable" - err := cmd(b, []string{command}, nil, "") - - if err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } + req := defaultDispatchReq(b, command) + err := cmd(req) + require.NoError(t, err) var expectedCommand strslice.StrSlice - if runtime.GOOS == "windows" { expectedCommand = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", command)) } else { expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command)) } - if !compareStrSlice(b.runConfig.Cmd, expectedCommand) { - t.Fatalf("Command should be set to %s, got %s", command, b.runConfig.Cmd) - } - - if !b.cmdSet { - t.Fatal("Command should be marked as set") - } -} - -func compareStrSlice(slice1, slice2 strslice.StrSlice) bool { - if len(slice1) != len(slice2) { - return false - } - - for i := range slice1 { - if slice1[i] != slice2[i] { - return false - } - } - - return true + assert.Equal(t, expectedCommand, req.state.runConfig.Cmd) + assert.True(t, req.state.cmdSet) } func TestHealthcheckNone(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() - if err := healthcheck(b, []string{"NONE"}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } + req := defaultDispatchReq(b, "NONE") + err := healthcheck(req) + require.NoError(t, err) - if b.runConfig.Healthcheck == nil { - t.Fatal("Healthcheck should be set, got nil") - } - - expectedTest := strslice.StrSlice(append([]string{"NONE"})) - - if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) { - t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test) - } + require.NotNil(t, req.state.runConfig.Healthcheck) + assert.Equal(t, []string{"NONE"}, req.state.runConfig.Healthcheck.Test) } func TestHealthcheckCmd(t *testing.T) { - b := &Builder{flags: &BFlags{flags: make(map[string]*Flag)}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() - if err := healthcheck(b, []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } + args := []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"} + req := defaultDispatchReq(b, args...) + err := healthcheck(req) + require.NoError(t, err) - if b.runConfig.Healthcheck == nil { - t.Fatal("Healthcheck should be set, got nil") - } - - expectedTest := strslice.StrSlice(append([]string{"CMD-SHELL"}, "curl -f http://localhost/ || exit 1")) - - if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) { - t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test) - } + require.NotNil(t, req.state.runConfig.Healthcheck) + expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"} + assert.Equal(t, expectedTest, req.state.runConfig.Healthcheck.Test) } func TestEntrypoint(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - + b := newBuilderWithMockBackend() entrypointCmd := "/usr/sbin/nginx" - if err := entrypoint(b, []string{entrypointCmd}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - if b.runConfig.Entrypoint == nil { - t.Fatal("Entrypoint should be set") - } + req := defaultDispatchReq(b, entrypointCmd) + err := entrypoint(req) + require.NoError(t, err) + require.NotNil(t, req.state.runConfig.Entrypoint) var expectedEntrypoint strslice.StrSlice - if runtime.GOOS == "windows" { expectedEntrypoint = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", entrypointCmd)) } else { expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd)) } - - if !compareStrSlice(expectedEntrypoint, b.runConfig.Entrypoint) { - t.Fatalf("Entrypoint command should be set to %s, got %s", expectedEntrypoint, b.runConfig.Entrypoint) - } + assert.Equal(t, expectedEntrypoint, req.state.runConfig.Entrypoint) } func TestExpose(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() exposedPort := "80" + req := defaultDispatchReq(b, exposedPort) + err := expose(req) + require.NoError(t, err) - if err := expose(b, []string{exposedPort}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - if b.runConfig.ExposedPorts == nil { - t.Fatal("ExposedPorts should be set") - } - - if len(b.runConfig.ExposedPorts) != 1 { - t.Fatalf("ExposedPorts should contain only 1 element. Got %s", b.runConfig.ExposedPorts) - } + require.NotNil(t, req.state.runConfig.ExposedPorts) + require.Len(t, req.state.runConfig.ExposedPorts, 1) portsMapping, err := nat.ParsePortSpec(exposedPort) - - if err != nil { - t.Fatalf("Error when parsing port spec: %s", err.Error()) - } - - if _, ok := b.runConfig.ExposedPorts[portsMapping[0].Port]; !ok { - t.Fatalf("Port %s should be present. Got %s", exposedPort, b.runConfig.ExposedPorts) - } + require.NoError(t, err) + assert.Contains(t, req.state.runConfig.ExposedPorts, portsMapping[0].Port) } func TestUser(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - + b := newBuilderWithMockBackend() userCommand := "foo" - if err := user(b, []string{userCommand}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - if b.runConfig.User != userCommand { - t.Fatalf("User should be set to %s, got %s", userCommand, b.runConfig.User) - } + req := defaultDispatchReq(b, userCommand) + err := user(req) + require.NoError(t, err) + assert.Equal(t, userCommand, req.state.runConfig.User) } func TestVolume(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() exposedVolume := "/foo" - if err := volume(b, []string{exposedVolume}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } + req := defaultDispatchReq(b, exposedVolume) + err := volume(req) + require.NoError(t, err) - if b.runConfig.Volumes == nil { - t.Fatal("Volumes should be set") - } - - if len(b.runConfig.Volumes) != 1 { - t.Fatalf("Volumes should contain only 1 element. Got %s", b.runConfig.Volumes) - } - - if _, ok := b.runConfig.Volumes[exposedVolume]; !ok { - t.Fatalf("Volume %s should be present. Got %s", exposedVolume, b.runConfig.Volumes) - } + require.NotNil(t, req.state.runConfig.Volumes) + assert.Len(t, req.state.runConfig.Volumes, 1) + assert.Contains(t, req.state.runConfig.Volumes, exposedVolume) } func TestStopSignal(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - + b := newBuilderWithMockBackend() signal := "SIGKILL" - if err := stopSignal(b, []string{signal}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - if b.runConfig.StopSignal != signal { - t.Fatalf("StopSignal should be set to %s, got %s", signal, b.runConfig.StopSignal) - } + req := defaultDispatchReq(b, signal) + err := stopSignal(req) + require.NoError(t, err) + assert.Equal(t, signal, req.state.runConfig.StopSignal) } func TestArg(t *testing.T) { @@ -501,33 +420,106 @@ func TestArg(t *testing.T) { argVal := "bar" argDef := fmt.Sprintf("%s=%s", argName, argVal) - err := arg(b, []string{argDef}, nil, "") - assert.NilError(t, err) + err := arg(defaultDispatchReq(b, argDef)) + require.NoError(t, err) expected := map[string]string{argName: argVal} - allowed := b.buildArgs.GetAllAllowed() - assert.DeepEqual(t, allowed, expected) + assert.Equal(t, expected, b.buildArgs.GetAllAllowed()) } func TestShell(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() shellCmd := "powershell" + req := defaultDispatchReq(b, shellCmd) + req.attributes = map[string]bool{"json": true} - attrs := make(map[string]bool) - attrs["json"] = true - - if err := shell(b, []string{shellCmd}, attrs, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - if b.runConfig.Shell == nil { - t.Fatal("Shell should be set") - } + err := shell(req) + require.NoError(t, err) expectedShell := strslice.StrSlice([]string{shellCmd}) - - if !compareStrSlice(expectedShell, b.runConfig.Shell) { - t.Fatalf("Shell should be set to %s, got %s", expectedShell, b.runConfig.Shell) - } + assert.Equal(t, expectedShell, req.state.runConfig.Shell) +} + +func TestParseOptInterval(t *testing.T) { + flInterval := &Flag{ + name: "interval", + flagType: stringType, + Value: "50ns", + } + _, err := parseOptInterval(flInterval) + testutil.ErrorContains(t, err, "cannot be less than 1ms") + + flInterval.Value = "1ms" + _, err = parseOptInterval(flInterval) + require.NoError(t, err) +} + +func TestPrependEnvOnCmd(t *testing.T) { + buildArgs := newBuildArgs(nil) + buildArgs.AddArg("NO_PROXY", nil) + + args := []string{"sorted=nope", "args=not", "http_proxy=foo", "NO_PROXY=YA"} + cmd := []string{"foo", "bar"} + cmdWithEnv := prependEnvOnCmd(buildArgs, args, cmd) + expected := strslice.StrSlice([]string{ + "|3", "NO_PROXY=YA", "args=not", "sorted=nope", "foo", "bar"}) + assert.Equal(t, expected, cmdWithEnv) +} + +func TestRunWithBuildArgs(t *testing.T) { + b := newBuilderWithMockBackend() + b.buildArgs.argsFromOptions["HTTP_PROXY"] = strPtr("FOO") + b.disableCommit = false + + runConfig := &container.Config{} + origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"}) + cmdWithShell := strslice.StrSlice(append(getShell(runConfig, runtime.GOOS), "echo foo")) + envVars := []string{"|1", "one=two"} + cachedCmd := strslice.StrSlice(append(envVars, cmdWithShell...)) + + imageCache := &mockImageCache{ + getCacheFunc: func(parentID string, cfg *container.Config) (string, error) { + // Check the runConfig.Cmd sent to probeCache() + assert.Equal(t, cachedCmd, cfg.Cmd) + assert.Equal(t, strslice.StrSlice(nil), cfg.Entrypoint) + return "", nil + }, + } + + mockBackend := b.docker.(*MockBackend) + mockBackend.makeImageCacheFunc = func(_ []string, _ string) builder.ImageCache { + return imageCache + } + b.imageProber = newImageProber(mockBackend, nil, runtime.GOOS, false) + mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ReleaseableLayer, error) { + return &mockImage{ + id: "abcdef", + config: &container.Config{Cmd: origCmd}, + }, nil, nil + } + mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { + // Check the runConfig.Cmd sent to create() + assert.Equal(t, cmdWithShell, config.Config.Cmd) + assert.Contains(t, config.Config.Env, "one=two") + assert.Equal(t, strslice.StrSlice{""}, config.Config.Entrypoint) + return container.ContainerCreateCreatedBody{ID: "12345"}, nil + } + mockBackend.commitFunc = func(cID string, cfg *backend.ContainerCommitConfig) (string, error) { + // Check the runConfig.Cmd sent to commit() + assert.Equal(t, origCmd, cfg.Config.Cmd) + assert.Equal(t, cachedCmd, cfg.ContainerConfig.Cmd) + assert.Equal(t, strslice.StrSlice(nil), cfg.Config.Entrypoint) + return "", nil + } + + req := defaultDispatchReq(b, "abcdef") + require.NoError(t, from(req)) + b.buildArgs.AddArg("one", strPtr("two")) + + req.args = []string{"echo foo"} + require.NoError(t, run(req)) + + // Check that runConfig.Cmd has not been modified by run + assert.Equal(t, origCmd, req.state.runConfig.Cmd) } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go index 29eb2fb00..62ee371df 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go @@ -26,3 +26,9 @@ func normaliseWorkdir(current string, requested string) (string, error) { func errNotJSON(command, _ string) error { return fmt.Errorf("%s requires the arguments to be in JSON form", command) } + +// equalEnvKeys compare two strings and returns true if they are equal. On +// Windows this comparison is case insensitive. +func equalEnvKeys(from, to string) bool { + return from == to +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go index 471232f3c..71f7c9288 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go @@ -85,3 +85,9 @@ func errNotJSON(command, original string) error { } return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) } + +// equalEnvKeys compare two strings and returns true if they are equal. On +// Windows this comparison is case insensitive. +func equalEnvKeys(from, to string) bool { + return strings.ToUpper(from) == strings.ToUpper(to) +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/envVarTest b/fn/vendor/github.com/docker/docker/builder/dockerfile/envVarTest index 067dca9a5..946b27859 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/envVarTest +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/envVarTest @@ -1,18 +1,21 @@ A|hello | hello A|he'll'o | hello -A|he'llo | hello +A|he'llo | error A|he\'llo | he'llo -A|he\\'llo | he\llo +A|he\\'llo | error A|abc\tdef | abctdef A|"abc\tdef" | abc\tdef +A|"abc\\tdef" | abc\tdef A|'abc\tdef' | abc\tdef A|hello\ | hello A|hello\\ | hello\ -A|"hello | hello -A|"hello\" | hello" +A|"hello | error +A|"hello\" | error A|"hel'lo" | hel'lo -A|'hello | hello +A|'hello | error A|'hello\' | hello\ +A|'hello\there' | hello\there +A|'hello\\there' | hello\\there A|"''" | '' A|$. | $. A|$1 | @@ -24,6 +27,8 @@ W|he$pwd. | he/home. A|he$PWD | he/home A|he\$PWD | he$PWD A|he\\$PWD | he\/home +A|"he\$PWD" | he$PWD +A|"he\\$PWD" | he\/home A|he\${} | he${} A|he\${}xx | he${}xx A|he${} | he @@ -60,18 +65,18 @@ A|he${XXX:-\$PWD:}xx | he$PWD:xx A|he${XXX:-\${PWD}z}xx | he${PWDz}xx A|안녕하세요 | 안녕하세요 A|안'녕'하세요 | 안녕하세요 -A|안'녕하세요 | 안녕하세요 +A|안'녕하세요 | error A|안녕\'하세요 | 안녕'하세요 -A|안\\'녕하세요 | 안\녕하세요 +A|안\\'녕하세요 | error A|안녕\t하세요 | 안녕t하세요 A|"안녕\t하세요" | 안녕\t하세요 -A|'안녕\t하세요 | 안녕\t하세요 +A|'안녕\t하세요 | error A|안녕하세요\ | 안녕하세요 A|안녕하세요\\ | 안녕하세요\ -A|"안녕하세요 | 안녕하세요 -A|"안녕하세요\" | 안녕하세요" +A|"안녕하세요 | error +A|"안녕하세요\" | error A|"안녕'하세요" | 안녕'하세요 -A|'안녕하세요 | 안녕하세요 +A|'안녕하세요 | error A|'안녕하세요\' | 안녕하세요\ A|안녕$1x | 안녕x A|안녕$.x | 안녕$.x diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go index d29a18c7b..ba4315940 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -2,7 +2,7 @@ // // It incorporates a dispatch table based on the parser.Node values (see the // parser package for more information) that are yielded from the parser itself. -// Calling NewBuilder with the BuildOpts struct can be used to customize the +// Calling newBuilder with the BuildOpts struct can be used to customize the // experience for execution purposes only. Parsing is controlled in the parser // package, and this division of responsibility should be respected. // @@ -20,13 +20,18 @@ package dockerfile import ( - "errors" + "bytes" "fmt" + "runtime" "strings" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" "github.com/docker/docker/builder/dockerfile/command" "github.com/docker/docker/builder/dockerfile/parser" - runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig/opts" + "github.com/pkg/errors" ) // Environment variable interpolation will happen on these statements only. @@ -56,10 +61,36 @@ var allowWordExpansion = map[string]bool{ command.Expose: true, } -var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error +type dispatchRequest struct { + builder *Builder // TODO: replace this with a smaller interface + args []string + attributes map[string]bool + flags *BFlags + original string + shlex *ShellLex + state *dispatchState + source builder.Source +} + +func newDispatchRequestFromOptions(options dispatchOptions, builder *Builder, args []string) dispatchRequest { + return dispatchRequest{ + builder: builder, + args: args, + attributes: options.node.Attributes, + original: options.node.Original, + flags: NewBFlagsWithArgs(options.node.Flags), + shlex: options.shlex, + state: options.state, + source: options.source, + } +} + +type dispatcher func(dispatchRequest) error + +var evaluateTable map[string]dispatcher func init() { - evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ + evaluateTable = map[string]dispatcher{ command.Add: add, command.Arg: arg, command.Cmd: cmd, @@ -81,6 +112,10 @@ func init() { } } +func formatStep(stepN int, stepTotal int) string { + return fmt.Sprintf("%d/%d", stepN+1, stepTotal) +} + // This method is the entrypoint to all statement handling routines. // // Almost all nodes will have this structure: @@ -95,106 +130,169 @@ func init() { // such as `RUN` in ONBUILD RUN foo. There is special case logic in here to // deal with that, at least until it becomes more of a general concern with new // features. -func (b *Builder) dispatch(stepN int, stepTotal int, ast *parser.Node) error { - cmd := ast.Value +func (b *Builder) dispatch(options dispatchOptions) (*dispatchState, error) { + node := options.node + cmd := node.Value upperCasedCmd := strings.ToUpper(cmd) // To ensure the user is given a decent error message if the platform // on which the daemon is running does not support a builder command. if err := platformSupports(strings.ToLower(cmd)); err != nil { - return err + buildsFailed.WithValues(metricsCommandNotSupportedError).Inc() + return nil, err } - attrs := ast.Attributes - original := ast.Original - flags := ast.Flags - strList := []string{} - msg := fmt.Sprintf("Step %d/%d : %s", stepN+1, stepTotal, upperCasedCmd) + msg := bytes.NewBufferString(fmt.Sprintf("Step %s : %s%s", + options.stepMsg, upperCasedCmd, formatFlags(node.Flags))) - if len(ast.Flags) > 0 { - msg += " " + strings.Join(ast.Flags, " ") - } - - if cmd == "onbuild" { - if ast.Next == nil { - return errors.New("ONBUILD requires at least one argument") + args := []string{} + ast := node + if cmd == command.Onbuild { + var err error + ast, args, err = handleOnBuildNode(node, msg) + if err != nil { + return nil, err } - ast = ast.Next.Children[0] - strList = append(strList, ast.Value) - msg += " " + ast.Value - - if len(ast.Flags) > 0 { - msg += " " + strings.Join(ast.Flags, " ") - } - } - msgList := initMsgList(ast) - // Append build args to runConfig environment variables - envs := append(b.runConfig.Env, b.buildArgsWithoutConfigEnv()...) + runConfigEnv := options.state.runConfig.Env + envs := append(runConfigEnv, b.buildArgs.FilterAllowed(runConfigEnv)...) + processFunc := createProcessWordFunc(options.shlex, cmd, envs) + words, err := getDispatchArgsFromNode(ast, processFunc, msg) + if err != nil { + buildsFailed.WithValues(metricsErrorProcessingCommandsError).Inc() + return nil, err + } + args = append(args, words...) + fmt.Fprintln(b.Stdout, msg.String()) + + f, ok := evaluateTable[cmd] + if !ok { + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + return nil, fmt.Errorf("unknown instruction: %s", upperCasedCmd) + } + options.state.updateRunConfig() + err = f(newDispatchRequestFromOptions(options, b, args)) + return options.state, err +} + +type dispatchOptions struct { + state *dispatchState + stepMsg string + node *parser.Node + shlex *ShellLex + source builder.Source +} + +// dispatchState is a data object which is modified by dispatchers +type dispatchState struct { + runConfig *container.Config + maintainer string + cmdSet bool + imageID string + baseImage builder.Image + stageName string +} + +func newDispatchState() *dispatchState { + return &dispatchState{runConfig: &container.Config{}} +} + +func (s *dispatchState) updateRunConfig() { + s.runConfig.Image = s.imageID +} + +// hasFromImage returns true if the builder has processed a `FROM ` line +func (s *dispatchState) hasFromImage() bool { + return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "") +} + +func (s *dispatchState) isCurrentStage(target string) bool { + if target == "" { + return false + } + return strings.EqualFold(s.stageName, target) +} + +func (s *dispatchState) beginStage(stageName string, image builder.Image) { + s.stageName = stageName + s.imageID = image.ImageID() + + if image.RunConfig() != nil { + s.runConfig = image.RunConfig() + } else { + s.runConfig = &container.Config{} + } + s.baseImage = image + s.setDefaultPath() +} + +// Add the default PATH to runConfig.ENV if one exists for the platform and there +// is no PATH set. Note that Windows containers on Windows won't have one as it's set by HCS +func (s *dispatchState) setDefaultPath() { + // TODO @jhowardmsft LCOW Support - This will need revisiting later + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + if system.DefaultPathEnv(platform) == "" { + return + } + envMap := opts.ConvertKVStringsToMap(s.runConfig.Env) + if _, ok := envMap["PATH"]; !ok { + s.runConfig.Env = append(s.runConfig.Env, "PATH="+system.DefaultPathEnv(platform)) + } +} + +func handleOnBuildNode(ast *parser.Node, msg *bytes.Buffer) (*parser.Node, []string, error) { + if ast.Next == nil { + return nil, nil, errors.New("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + msg.WriteString(" " + ast.Value + formatFlags(ast.Flags)) + return ast, []string{ast.Value}, nil +} + +func formatFlags(flags []string) string { + if len(flags) > 0 { + return " " + strings.Join(flags, " ") + } + return "" +} + +func getDispatchArgsFromNode(ast *parser.Node, processFunc processWordFunc, msg *bytes.Buffer) ([]string, error) { + args := []string{} for i := 0; ast.Next != nil; i++ { ast = ast.Next - words, err := b.evaluateEnv(cmd, ast.Value, envs) + words, err := processFunc(ast.Value) if err != nil { - return err + return nil, err } - strList = append(strList, words...) - msgList[i] = ast.Value + args = append(args, words...) + msg.WriteString(" " + ast.Value) } - - msg += " " + strings.Join(msgList, " ") - fmt.Fprintln(b.Stdout, msg) - - // XXX yes, we skip any cmds that are not valid; the parser should have - // picked these out already. - if f, ok := evaluateTable[cmd]; ok { - b.flags = NewBFlags() - b.flags.Args = flags - return f(b, strList, attrs, original) - } - - return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) + return args, nil } -// count the number of nodes that we are going to traverse first -// allocation of those list a lot when they have a lot of arguments -func initMsgList(cursor *parser.Node) []string { - var n int - for ; cursor.Next != nil; n++ { - cursor = cursor.Next - } - return make([]string, n) -} +type processWordFunc func(string) ([]string, error) -func (b *Builder) evaluateEnv(cmd string, str string, envs []string) ([]string, error) { - if !replaceEnvAllowed[cmd] { - return []string{str}, nil - } - var processFunc func(string, []string, rune) ([]string, error) - if allowWordExpansion[cmd] { - processFunc = ProcessWords - } else { - processFunc = func(word string, envs []string, escape rune) ([]string, error) { - word, err := ProcessWord(word, envs, escape) +func createProcessWordFunc(shlex *ShellLex, cmd string, envs []string) processWordFunc { + switch { + case !replaceEnvAllowed[cmd]: + return func(word string) ([]string, error) { + return []string{word}, nil + } + case allowWordExpansion[cmd]: + return func(word string) ([]string, error) { + return shlex.ProcessWords(word, envs) + } + default: + return func(word string) ([]string, error) { + word, err := shlex.ProcessWord(word, envs) return []string{word}, err } } - return processFunc(str, envs, b.directive.EscapeToken) -} - -// buildArgsWithoutConfigEnv returns a list of key=value pairs for all the build -// args that are not overriden by runConfig environment variables. -func (b *Builder) buildArgsWithoutConfigEnv() []string { - envs := []string{} - configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) - - for key, val := range b.buildArgs.GetAllAllowed() { - if _, ok := configEnv[key]; !ok { - envs = append(envs, fmt.Sprintf("%s=%s", key, val)) - } - } - return envs } // checkDispatch does a simple check for syntax errors of the Dockerfile. @@ -202,8 +300,7 @@ func (b *Builder) buildArgsWithoutConfigEnv() []string { // arg, env, etc., this syntax check will not be complete and could not replace // the runtime check. Instead, this function is only a helper that allows // user to find out the obvious error in Dockerfile earlier on. -// onbuild bool: indicate if instruction XXX is part of `ONBUILD XXX` trigger -func (b *Builder) checkDispatch(ast *parser.Node, onbuild bool) error { +func checkDispatch(ast *parser.Node) error { cmd := ast.Value upperCasedCmd := strings.ToUpper(cmd) @@ -217,23 +314,14 @@ func (b *Builder) checkDispatch(ast *parser.Node, onbuild bool) error { // least one argument if upperCasedCmd == "ONBUILD" { if ast.Next == nil { + buildsFailed.WithValues(metricsMissingOnbuildArgumentsError).Inc() return errors.New("ONBUILD requires at least one argument") } } - // The instruction is part of ONBUILD trigger (not the instruction itself) - if onbuild { - switch upperCasedCmd { - case "ONBUILD": - return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd) - } - } - if _, ok := evaluateTable[cmd]; ok { return nil } - - return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + return errors.Errorf("unknown instruction: %s", upperCasedCmd) } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go index 0d4952837..72d7ce10e 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go @@ -7,8 +7,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder" "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/remotecontext" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" ) @@ -105,13 +105,13 @@ func initDispatchTestCases() []dispatchTestCase { { name: "COPY wildcard no files", dockerfile: `COPY file*.txt /tmp/`, - expectedError: "No source files were specified", + expectedError: "COPY failed: no source files were specified", files: nil, }, { name: "COPY url", dockerfile: `COPY https://index.docker.io/robots.txt /`, - expectedError: "Source can't be a URL for COPY", + expectedError: "source can't be a URL for COPY", files: nil, }, { @@ -123,7 +123,7 @@ func initDispatchTestCases() []dispatchTestCase { { name: "Invalid instruction", dockerfile: `foo bar`, - expectedError: "Unknown instruction: FOO", + expectedError: "unknown instruction: FOO", files: nil, }} @@ -158,7 +158,7 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) { } }() - context, err := builder.MakeTarSumContext(tarStream) + context, err := remotecontext.FromArchive(tarStream) if err != nil { t.Fatalf("Error when creating tar context: %s", err) @@ -171,28 +171,33 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) { }() r := strings.NewReader(testCase.dockerfile) - d := parser.Directive{} - parser.SetEscapeToken(parser.DefaultEscapeToken, &d) - n, err := parser.Parse(r, &d) + result, err := parser.Parse(r) if err != nil { t.Fatalf("Error when parsing Dockerfile: %s", err) } - config := &container.Config{} options := &types.ImageBuildOptions{ BuildArgs: make(map[string]*string), } b := &Builder{ - runConfig: config, options: options, Stdout: ioutil.Discard, - context: context, buildArgs: newBuildArgs(options.BuildArgs), } - err = b.dispatch(0, len(n.Children), n.Children[0]) + shlex := NewShellLex(parser.DefaultEscapeToken) + n := result.AST + state := &dispatchState{runConfig: &container.Config{}} + opts := dispatchOptions{ + state: state, + stepMsg: formatStep(0, len(n.Children)), + node: n.Children[0], + shlex: shlex, + source: context, + } + state, err = b.dispatch(opts) if err == nil { t.Fatalf("No error when executing test %s", testCase.name) diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go index 1b92ced17..64b2572b8 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go @@ -3,85 +3,142 @@ package dockerfile import ( "strconv" "strings" - "sync" "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" "github.com/docker/docker/builder/remotecontext" + dockerimage "github.com/docker/docker/image" "github.com/pkg/errors" + "golang.org/x/net/context" ) -// imageContexts is a helper for stacking up built image rootfs and reusing -// them as contexts -type imageContexts struct { - b *Builder - list []*imageMount - byName map[string]*imageMount - cache *pathCache - currentName string +type buildStage struct { + id string } -func (ic *imageContexts) new(name string, increment bool) (*imageMount, error) { - im := &imageMount{ic: ic} - if len(name) > 0 { - if ic.byName == nil { - ic.byName = make(map[string]*imageMount) +func newBuildStage(imageID string) *buildStage { + return &buildStage{id: imageID} +} + +func (b *buildStage) ImageID() string { + return b.id +} + +func (b *buildStage) update(imageID string) { + b.id = imageID +} + +// buildStages tracks each stage of a build so they can be retrieved by index +// or by name. +type buildStages struct { + sequence []*buildStage + byName map[string]*buildStage +} + +func newBuildStages() *buildStages { + return &buildStages{byName: make(map[string]*buildStage)} +} + +func (s *buildStages) getByName(name string) (*buildStage, bool) { + stage, ok := s.byName[strings.ToLower(name)] + return stage, ok +} + +func (s *buildStages) get(indexOrName string) (*buildStage, error) { + index, err := strconv.Atoi(indexOrName) + if err == nil { + if err := s.validateIndex(index); err != nil { + return nil, err } - if _, ok := ic.byName[name]; ok { - return nil, errors.Errorf("duplicate name %s", name) - } - ic.byName[name] = im + return s.sequence[index], nil } - if increment { - ic.list = append(ic.list, im) + if im, ok := s.byName[strings.ToLower(indexOrName)]; ok { + return im, nil } - ic.currentName = name - return im, nil + return nil, nil } -func (ic *imageContexts) update(imageID string, runConfig *container.Config) { - ic.list[len(ic.list)-1].id = imageID - ic.list[len(ic.list)-1].runConfig = runConfig -} - -func (ic *imageContexts) validate(i int) error { - if i < 0 || i >= len(ic.list)-1 { - var extraMsg string - if i == len(ic.list)-1 { - extraMsg = " refers current build block" +func (s *buildStages) validateIndex(i int) error { + if i < 0 || i >= len(s.sequence)-1 { + if i == len(s.sequence)-1 { + return errors.New("refers to current build stage") } - return errors.Errorf("invalid from flag value %d%s", i, extraMsg) + return errors.New("index out of bounds") } return nil } -func (ic *imageContexts) get(indexOrName string) (*imageMount, error) { - index, err := strconv.Atoi(indexOrName) - if err == nil { - if err := ic.validate(index); err != nil { - return nil, err +func (s *buildStages) add(name string, image builder.Image) error { + stage := newBuildStage(image.ImageID()) + name = strings.ToLower(name) + if len(name) > 0 { + if _, ok := s.byName[name]; ok { + return errors.Errorf("duplicate name %s", name) } - return ic.list[index], nil + s.byName[name] = stage } - if im, ok := ic.byName[strings.ToLower(indexOrName)]; ok { + s.sequence = append(s.sequence, stage) + return nil +} + +func (s *buildStages) update(imageID string) { + s.sequence[len(s.sequence)-1].update(imageID) +} + +type getAndMountFunc func(string, bool) (builder.Image, builder.ReleaseableLayer, error) + +// imageSources mounts images and provides a cache for mounted images. It tracks +// all images so they can be unmounted at the end of the build. +type imageSources struct { + byImageID map[string]*imageMount + mounts []*imageMount + getImage getAndMountFunc + cache pathCache // TODO: remove +} + +// TODO @jhowardmsft LCOW Support: Eventually, platform can be moved to options.Options.Platform, +// and removed from builderOptions, but that can't be done yet as it would affect the API. +func newImageSources(ctx context.Context, options builderOptions) *imageSources { + getAndMount := func(idOrRef string, localOnly bool) (builder.Image, builder.ReleaseableLayer, error) { + pullOption := backend.PullOptionNoPull + if !localOnly { + if options.Options.PullParent { + pullOption = backend.PullOptionForcePull + } else { + pullOption = backend.PullOptionPreferLocal + } + } + return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{ + PullOption: pullOption, + AuthConfig: options.Options.AuthConfigs, + Output: options.ProgressWriter.Output, + Platform: options.Platform, + }) + } + + return &imageSources{ + byImageID: make(map[string]*imageMount), + getImage: getAndMount, + } +} + +func (m *imageSources) Get(idOrRef string, localOnly bool) (*imageMount, error) { + if im, ok := m.byImageID[idOrRef]; ok { return im, nil } - im, err := mountByRef(ic.b, indexOrName) + + image, layer, err := m.getImage(idOrRef, localOnly) if err != nil { - return nil, errors.Wrapf(err, "invalid from flag value %s", indexOrName) + return nil, err } + im := newImageMount(image, layer) + m.Add(im) return im, nil } -func (ic *imageContexts) unmount() (retErr error) { - for _, im := range ic.list { - if err := im.unmount(); err != nil { - logrus.Error(err) - retErr = err - } - } - for _, im := range ic.byName { +func (m *imageSources) Unmount() (retErr error) { + for _, im := range m.mounts { if err := im.unmount(); err != nil { logrus.Error(err) retErr = err @@ -90,96 +147,65 @@ func (ic *imageContexts) unmount() (retErr error) { return } -func (ic *imageContexts) isCurrentTarget(target string) bool { - if target == "" { - return false +func (m *imageSources) Add(im *imageMount) { + switch im.image { + case nil: + im.image = &dockerimage.Image{} + default: + m.byImageID[im.image.ImageID()] = im } - return strings.EqualFold(ic.currentName, target) + m.mounts = append(m.mounts, im) } -func (ic *imageContexts) getCache(id, path string) (interface{}, bool) { - if ic.cache != nil { - if id == "" { - return nil, false - } - return ic.cache.get(id + path) - } - return nil, false -} - -func (ic *imageContexts) setCache(id, path string, v interface{}) { - if ic.cache != nil { - ic.cache.set(id+path, v) - } -} - -// imageMount is a reference for getting access to a buildcontext that is backed -// by an existing image +// imageMount is a reference to an image that can be used as a builder.Source type imageMount struct { - id string - ctx builder.Context - release func() error - ic *imageContexts - runConfig *container.Config + image builder.Image + source builder.Source + layer builder.ReleaseableLayer } -func (im *imageMount) context() (builder.Context, error) { - if im.ctx == nil { - if im.id == "" { - return nil, errors.Errorf("could not copy from empty context") +func newImageMount(image builder.Image, layer builder.ReleaseableLayer) *imageMount { + im := &imageMount{image: image, layer: layer} + return im +} + +func (im *imageMount) Source() (builder.Source, error) { + if im.source == nil { + if im.layer == nil { + return nil, errors.Errorf("empty context") } - p, release, err := im.ic.b.docker.MountImage(im.id) + mountPath, err := im.layer.Mount() if err != nil { - return nil, errors.Wrapf(err, "failed to mount %s", im.id) + return nil, errors.Wrapf(err, "failed to mount %s", im.image.ImageID()) } - ctx, err := remotecontext.NewLazyContext(p) + source, err := remotecontext.NewLazySource(mountPath) if err != nil { - return nil, errors.Wrapf(err, "failed to create lazycontext for %s", p) + return nil, errors.Wrapf(err, "failed to create lazycontext for %s", mountPath) } - im.release = release - im.ctx = ctx + im.source = source } - return im.ctx, nil + return im.source, nil } func (im *imageMount) unmount() error { - if im.release != nil { - if err := im.release(); err != nil { - return errors.Wrapf(err, "failed to unmount previous build image %s", im.id) - } - im.release = nil + if im.layer == nil { + return nil } + if err := im.layer.Release(); err != nil { + return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID()) + } + im.layer = nil return nil } +func (im *imageMount) Image() builder.Image { + return im.image +} + +func (im *imageMount) Layer() builder.ReleaseableLayer { + return im.layer +} + func (im *imageMount) ImageID() string { - return im.id -} -func (im *imageMount) RunConfig() *container.Config { - return im.runConfig -} - -type pathCache struct { - mu sync.Mutex - items map[string]interface{} -} - -func (c *pathCache) set(k string, v interface{}) { - c.mu.Lock() - if c.items == nil { - c.items = make(map[string]interface{}) - } - c.items[k] = v - c.mu.Unlock() -} - -func (c *pathCache) get(k string) (interface{}, bool) { - c.mu.Lock() - if c.items == nil { - c.mu.Unlock() - return nil, false - } - v, ok := c.items[k] - c.mu.Unlock() - return v, ok + return im.image.ImageID() } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go new file mode 100644 index 000000000..3433612de --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go @@ -0,0 +1,63 @@ +package dockerfile + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" +) + +// ImageProber exposes an Image cache to the Builder. It supports resetting a +// cache. +type ImageProber interface { + Reset() + Probe(parentID string, runConfig *container.Config) (string, error) +} + +type imageProber struct { + cache builder.ImageCache + reset func() builder.ImageCache + cacheBusted bool +} + +func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, platform string, noCache bool) ImageProber { + if noCache { + return &nopProber{} + } + + reset := func() builder.ImageCache { + return cacheBuilder.MakeImageCache(cacheFrom, platform) + } + return &imageProber{cache: reset(), reset: reset} +} + +func (c *imageProber) Reset() { + c.cache = c.reset() + c.cacheBusted = false +} + +// Probe checks if cache match can be found for current build instruction. +// It returns the cachedID if there is a hit, and the empty string on miss +func (c *imageProber) Probe(parentID string, runConfig *container.Config) (string, error) { + if c.cacheBusted { + return "", nil + } + cacheID, err := c.cache.GetCache(parentID, runConfig) + if err != nil { + return "", err + } + if len(cacheID) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) + c.cacheBusted = true + return "", nil + } + logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) + return cacheID, nil +} + +type nopProber struct{} + +func (c *nopProber) Reset() {} + +func (c *nopProber) Probe(_ string, _ *container.Config) (string, error) { + return "", nil +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/internals.go index eecd47fde..c0d6081d0 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/internals.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -7,74 +7,50 @@ import ( "crypto/sha256" "encoding/hex" "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" - "sort" "strings" - "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile/parser" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/docker/runconfig/opts" "github.com/pkg/errors" ) -func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { +func (b *Builder) commit(dispatchState *dispatchState, comment string) error { if b.disableCommit { return nil } - if !b.hasFromImage() { + if !dispatchState.hasFromImage() { return errors.New("Please provide a source image with `from` prior to commit") } - b.runConfig.Image = b.image - if id == "" { - cmd := b.runConfig.Cmd - b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) ", comment)) - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } else if hit { - return nil - } - id, err = b.create() - if err != nil { - return err - } + runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, b.platform)) + hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd) + if err != nil || hit { + return err + } + id, err := b.create(runConfigWithCommentCmd) + if err != nil { + return err } - // Note: Actually copy the struct - autoConfig := *b.runConfig - autoConfig.Cmd = autoCmd + return b.commitContainer(dispatchState, id, runConfigWithCommentCmd) +} + +func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error { + if b.disableCommit { + return nil + } commitCfg := &backend.ContainerCommitConfig{ ContainerCommitConfig: types.ContainerCommitConfig{ - Author: b.maintainer, + Author: dispatchState.maintainer, Pause: true, - Config: &autoConfig, + // TODO: this should be done by Commit() + Config: copyRunConfig(dispatchState.runConfig), }, + ContainerConfig: containerConfig, } // Commit the container @@ -83,624 +59,242 @@ func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) e return err } - b.image = imageID - b.imageContexts.update(imageID, &autoConfig) + dispatchState.imageID = imageID + b.buildStages.update(imageID) return nil } -type copyInfo struct { - builder.FileInfo - decompress bool -} - -func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string, imageSource *imageMount) error { - if len(args) < 2 { - return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) - } - - // Work in daemon-specific filepath semantics - dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest - - b.runConfig.Image = b.image - - var infos []copyInfo - - // Loop through each src file and calculate the info we need to - // do the copy (e.g. hash value if cached). Don't actually do - // the copy until we've looked at all src files - var err error - for _, orig := range args[0 : len(args)-1] { - var fi builder.FileInfo - if urlutil.IsURL(orig) { - if !allowRemote { - return fmt.Errorf("Source can't be a URL for %s", cmdName) - } - fi, err = b.download(orig) - if err != nil { - return err - } - defer os.RemoveAll(filepath.Dir(fi.Path())) - infos = append(infos, copyInfo{ - FileInfo: fi, - decompress: false, - }) - continue - } - // not a URL - subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true, imageSource) - if err != nil { - return err - } - - infos = append(infos, subInfos...) - } - - if len(infos) == 0 { - return errors.New("No source files were specified") - } - if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { - return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) - } - - // For backwards compat, if there's just one info then use it as the - // cache look-up string, otherwise hash 'em all into one - var srcHash string - var origPaths string - - if len(infos) == 1 { - fi := infos[0].FileInfo - origPaths = fi.Name() - if hfi, ok := fi.(builder.Hashed); ok { - srcHash = hfi.Hash() - } - } else { - var hashs []string - var origs []string - for _, info := range infos { - fi := info.FileInfo - origs = append(origs, fi.Name()) - if hfi, ok := fi.(builder.Hashed); ok { - hashs = append(hashs, hfi.Hash()) - } - } - hasher := sha256.New() - hasher.Write([]byte(strings.Join(hashs, ","))) - srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) - origPaths = strings.Join(origs, " ") - } - - cmd := b.runConfig.Cmd - b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), fmt.Sprintf("#(nop) %s %s in %s ", cmdName, srcHash, dest))) - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - - if hit, err := b.probeCache(); err != nil { - return err - } else if hit { - return nil - } - - container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ - Config: b.runConfig, - // Set a log config to override any default value set on the daemon - HostConfig: &container.HostConfig{LogConfig: defaultLogConfig}, - }) +func (b *Builder) exportImage(state *dispatchState, imageMount *imageMount, runConfig *container.Config) error { + newLayer, err := imageMount.Layer().Commit(b.platform) if err != nil { return err } - b.tmpContainers[container.ID] = struct{}{} - comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) + // add an image mount without an image so the layer is properly unmounted + // if there is an error before we can add the full mount with image + b.imageSources.Add(newImageMount(nil, newLayer)) + parentImage, ok := imageMount.Image().(*image.Image) + if !ok { + return errors.Errorf("unexpected image type") + } + + newImage := image.NewChildImage(parentImage, image.ChildConfig{ + Author: state.maintainer, + ContainerConfig: runConfig, + DiffID: newLayer.DiffID(), + Config: copyRunConfig(state.runConfig), + }, parentImage.OS) + + // TODO: it seems strange to marshal this here instead of just passing in the + // image struct + config, err := newImage.MarshalJSON() + if err != nil { + return errors.Wrap(err, "failed to encode image config") + } + + exportedImage, err := b.docker.CreateImage(config, state.imageID, parentImage.OS) + if err != nil { + return errors.Wrapf(err, "failed to export image") + } + + state.imageID = exportedImage.ImageID() + b.imageSources.Add(newImageMount(exportedImage, newLayer)) + b.buildStages.update(state.imageID) + return nil +} + +func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error { + srcHash := getSourceHashFromInfos(inst.infos) + + // TODO: should this have been using origPaths instead of srcHash in the comment? + runConfigWithCommentCmd := copyRunConfig( + state.runConfig, + withCmdCommentString(fmt.Sprintf("%s %s in %s ", inst.cmdName, srcHash, inst.dest), b.platform)) + hit, err := b.probeCache(state, runConfigWithCommentCmd) + if err != nil || hit { + return err + } + + imageMount, err := b.imageSources.Get(state.imageID, true) + if err != nil { + return errors.Wrapf(err, "failed to get destination image %q", state.imageID) + } + destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount) + if err != nil { + return err + } + + opts := copyFileOptions{ + decompress: inst.allowLocalDecompression, + archiver: b.archiver, + } + for _, info := range inst.infos { + if err := performCopyForInfo(destInfo, info, opts); err != nil { + return errors.Wrapf(err, "failed to copy files") + } + } + return b.exportImage(state, imageMount, runConfigWithCommentCmd) +} + +func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount) (copyInfo, error) { // Twiddle the destination when it's a relative path - meaning, make it // relative to the WORKINGDIR - if dest, err = normaliseDest(cmdName, b.runConfig.WorkingDir, dest); err != nil { - return err + dest, err := normaliseDest(workingDir, inst.dest) + if err != nil { + return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName) } + destMount, err := imageMount.Source() + if err != nil { + return copyInfo{}, errors.Wrapf(err, "failed to mount copy source") + } + + return newCopyInfoFromSource(destMount, dest, ""), nil +} + +// For backwards compat, if there's just one info then use it as the +// cache look-up string, otherwise hash 'em all into one +func getSourceHashFromInfos(infos []copyInfo) string { + if len(infos) == 1 { + return infos[0].hash + } + var hashs []string for _, info := range infos { - if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { - return err - } + hashs = append(hashs, info.hash) } - - return b.commit(container.ID, cmd, comment) + return hashStringSlice("multi", hashs) } -func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { - // get filename from URL - u, err := url.Parse(srcURL) - if err != nil { - return - } - path := filepath.FromSlash(u.Path) // Ensure in platform semantics - if strings.HasSuffix(path, string(os.PathSeparator)) { - path = path[:len(path)-1] - } - parts := strings.Split(path, string(os.PathSeparator)) - filename := parts[len(parts)-1] - if filename == "" { - err = fmt.Errorf("cannot determine filename from url: %s", u) - return - } - - // Initiate the download - resp, err := httputils.Download(srcURL) - if err != nil { - return - } - - // Prepare file in a tmp dir - tmpDir, err := ioutils.TempDir("", "docker-remote") - if err != nil { - return - } - defer func() { - if err != nil { - os.RemoveAll(tmpDir) - } - }() - tmpFileName := filepath.Join(tmpDir, filename) - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return - } - - stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) - progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) - progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") - // Download and dump result to tmp file - if _, err = io.Copy(tmpFile, progressReader); err != nil { - tmpFile.Close() - return - } - fmt.Fprintln(b.Stdout) - // ignoring error because the file was already opened successfully - tmpFileSt, err := tmpFile.Stat() - if err != nil { - tmpFile.Close() - return - } - - // Set the mtime to the Last-Modified header value if present - // Otherwise just remove atime and mtime - mTime := time.Time{} - - lastMod := resp.Header.Get("Last-Modified") - if lastMod != "" { - // If we can't parse it then just let it default to 'zero' - // otherwise use the parsed time value - if parsedMTime, err := http.ParseTime(lastMod); err == nil { - mTime = parsedMTime - } - } - - tmpFile.Close() - - if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { - return - } - - // Calc the checksum, even if we're using the cache - r, err := archive.Tar(tmpFileName, archive.Uncompressed) - if err != nil { - return - } - tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) - if err != nil { - return - } - if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { - return - } - hash := tarSum.Sum(nil) - r.Close() - return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil -} - -var windowsBlacklist = map[string]bool{ - "c:\\": true, - "c:\\windows": true, -} - -func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool, imageSource *imageMount) ([]copyInfo, error) { - - // Work in daemon-specific OS filepath semantics - origPath = filepath.FromSlash(origPath) - // validate windows paths from other images - if imageSource != nil && runtime.GOOS == "windows" { - p := strings.ToLower(filepath.Clean(origPath)) - if !filepath.IsAbs(p) { - if filepath.VolumeName(p) != "" { - if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths - p = p[:len(p)-1] - } - p += "\\" - } else { - p = filepath.Join("c:\\", p) - } - } - if _, blacklisted := windowsBlacklist[p]; blacklisted { - return nil, errors.New("copy from c:\\ or c:\\windows is not allowed on windows") - } - } - - if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { - origPath = origPath[1:] - } - origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) - - context := b.context - var err error - if imageSource != nil { - context, err = imageSource.context() - if err != nil { - return nil, err - } - } - - if context == nil { - return nil, errors.Errorf("No context given. Impossible to use %s", cmdName) - } - - // Deal with wildcards - if allowWildcards && containsWildcards(origPath) { - var copyInfos []copyInfo - if err := context.Walk("", func(path string, info builder.FileInfo, err error) error { - if err != nil { - return err - } - if info.Name() == "" { - // Why are we doing this check? - return nil - } - if match, _ := filepath.Match(origPath, path); !match { - return nil - } - - // Note we set allowWildcards to false in case the name has - // a * in it - subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false, imageSource) - if err != nil { - return err - } - copyInfos = append(copyInfos, subInfos...) - return nil - }); err != nil { - return nil, err - } - return copyInfos, nil - } - - // Must be a dir or a file - statPath, fi, err := context.Stat(origPath) - if err != nil { - return nil, err - } - - copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} - - hfi, handleHash := fi.(builder.Hashed) - if !handleHash { - return copyInfos, nil - } - if imageSource != nil { - // fast-cache based on imageID - if h, ok := b.imageContexts.getCache(imageSource.id, origPath); ok { - hfi.SetHash(h.(string)) - return copyInfos, nil - } - } - - // Deal with the single file case - if !fi.IsDir() { - hfi.SetHash("file:" + hfi.Hash()) - return copyInfos, nil - } - // Must be a dir - var subfiles []string - err = context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { - if err != nil { - return err - } - // we already checked handleHash above - subfiles = append(subfiles, info.(builder.Hashed).Hash()) - return nil - }) - if err != nil { - return nil, err - } - - sort.Strings(subfiles) +func hashStringSlice(prefix string, slice []string) string { hasher := sha256.New() - hasher.Write([]byte(strings.Join(subfiles, ","))) - hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) - if imageSource != nil { - b.imageContexts.setCache(imageSource.id, origPath, hfi.Hash()) - } - - return copyInfos, nil + hasher.Write([]byte(strings.Join(slice, ","))) + return prefix + ":" + hex.EncodeToString(hasher.Sum(nil)) } -func (b *Builder) processImageFrom(img builder.Image) error { - if img != nil { - b.image = img.ImageID() +type runConfigModifier func(*container.Config) - if img.RunConfig() != nil { - b.runConfig = img.RunConfig() - } +func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config { + copy := *runConfig + for _, modifier := range modifiers { + modifier(©) } - - // Check to see if we have a default PATH, note that windows won't - // have one as it's set by HCS - if system.DefaultPathEnv != "" { - // Convert the slice of strings that represent the current list - // of env vars into a map so we can see if PATH is already set. - // If it's not set then go ahead and give it our default value - configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) - if _, ok := configEnv["PATH"]; !ok { - b.runConfig.Env = append(b.runConfig.Env, - "PATH="+system.DefaultPathEnv) - } - } - - if img == nil { - // Typically this means they used "FROM scratch" - return nil - } - - // Process ONBUILD triggers if they exist - if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { - word := "trigger" - if nTriggers > 1 { - word = "triggers" - } - fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) - } - - // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. - onBuildTriggers := b.runConfig.OnBuild - b.runConfig.OnBuild = []string{} - - // Reset stdin settings as all build actions run without stdin - b.runConfig.OpenStdin = false - b.runConfig.StdinOnce = false - - // parse the ONBUILD triggers by invoking the parser - for _, step := range onBuildTriggers { - ast, err := parser.Parse(strings.NewReader(step), &b.directive) - if err != nil { - return err - } - - total := len(ast.Children) - for _, n := range ast.Children { - if err := b.checkDispatch(n, true); err != nil { - return err - } - } - for i, n := range ast.Children { - if err := b.dispatch(i, total, n); err != nil { - return err - } - } - } - - return nil + return © } -// probeCache checks if cache match can be found for current build instruction. -// If an image is found, probeCache returns `(true, nil)`. -// If no image is found, it returns `(false, nil)`. -// If there is any error, it returns `(false, err)`. -func (b *Builder) probeCache() (bool, error) { - c := b.imageCache - if c == nil || b.options.NoCache || b.cacheBusted { - return false, nil +func withCmd(cmd []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = cmd } - cache, err := c.GetCache(b.image, b.runConfig) - if err != nil { +} + +// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for +// why there are two almost identical versions of this. +func withCmdComment(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) ", comment) + } +} + +// withCmdCommentString exists to maintain compatibility with older versions. +// A few instructions (workdir, copy, add) used a nop comment that is a single arg +// where as all the other instructions used a two arg comment string. This +// function implements the single arg version. +func withCmdCommentString(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) "+comment) + } +} + +func withEnv(env []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Env = env + } +} + +// withEntrypointOverride sets an entrypoint on runConfig if the command is +// not empty. The entrypoint is left unmodified if command is empty. +// +// The dockerfile RUN instruction expect to run without an entrypoint +// so the runConfig entrypoint needs to be modified accordingly. ContainerCreate +// will change a []string{""} entrypoint to nil, so we probe the cache with the +// nil entrypoint. +func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier { + return func(runConfig *container.Config) { + if len(cmd) > 0 { + runConfig.Entrypoint = entrypoint + } + } +} + +// getShell is a helper function which gets the right shell for prefixing the +// shell-form of RUN, ENTRYPOINT and CMD instructions +func getShell(c *container.Config, platform string) []string { + if 0 == len(c.Shell) { + return append([]string{}, defaultShellForPlatform(platform)[:]...) + } + return append([]string{}, c.Shell[:]...) +} + +func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) { + cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig) + if cachedID == "" || err != nil { return false, err } - if len(cache) == 0 { - logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) - b.cacheBusted = true - return false, nil - } - fmt.Fprint(b.Stdout, " ---> Using cache\n") - logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) - b.image = string(cache) - b.imageContexts.update(b.image, b.runConfig) + dispatchState.imageID = string(cachedID) + b.buildStages.update(dispatchState.imageID) return true, nil } -func (b *Builder) create() (string, error) { - if !b.hasFromImage() { - return "", errors.New("Please provide a source image with `from` prior to run") +var defaultLogConfig = container.LogConfig{Type: "none"} + +func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *container.Config) (string, error) { + if hit, err := b.probeCache(dispatchState, runConfig); err != nil || hit { + return "", err } - b.runConfig.Image = b.image + // Set a log config to override any default value set on the daemon + hostConfig := &container.HostConfig{LogConfig: defaultLogConfig} + container, err := b.containerManager.Create(runConfig, hostConfig, b.platform) + return container.ID, err +} - resources := container.Resources{ - CgroupParent: b.options.CgroupParent, - CPUShares: b.options.CPUShares, - CPUPeriod: b.options.CPUPeriod, - CPUQuota: b.options.CPUQuota, - CpusetCpus: b.options.CPUSetCPUs, - CpusetMems: b.options.CPUSetMems, - Memory: b.options.Memory, - MemorySwap: b.options.MemorySwap, - Ulimits: b.options.Ulimits, - } - - // TODO: why not embed a hostconfig in builder? - hostConfig := &container.HostConfig{ - SecurityOpt: b.options.SecurityOpt, - Isolation: b.options.Isolation, - ShmSize: b.options.ShmSize, - Resources: resources, - NetworkMode: container.NetworkMode(b.options.NetworkMode), - // Set a log config to override any default value set on the daemon - LogConfig: defaultLogConfig, - ExtraHosts: b.options.ExtraHosts, - } - - config := *b.runConfig - - // Create the container - c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ - Config: b.runConfig, - HostConfig: hostConfig, - }) +func (b *Builder) create(runConfig *container.Config) (string, error) { + hostConfig := hostConfigFromOptions(b.options) + container, err := b.containerManager.Create(runConfig, hostConfig, b.platform) if err != nil { return "", err } - for _, warning := range c.Warnings { + // TODO: could this be moved into containerManager.Create() ? + for _, warning := range container.Warnings { fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) } - - b.tmpContainers[c.ID] = struct{}{} - fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) - - // override the entry point that may have been picked up from the base image - if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { - return "", err - } - - return c.ID, nil + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(container.ID)) + return container.ID, nil } -var errCancelled = errors.New("build cancelled") - -func (b *Builder) run(cID string) (err error) { - errCh := make(chan error) - go func() { - errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) - }() - - finished := make(chan struct{}) - cancelErrCh := make(chan error, 1) - go func() { - select { - case <-b.clientCtx.Done(): - logrus.Debugln("Build cancelled, killing and removing container:", cID) - b.docker.ContainerKill(cID, 0) - b.removeContainer(cID) - cancelErrCh <- errCancelled - case <-finished: - cancelErrCh <- nil - } - }() - - if err := b.docker.ContainerStart(cID, nil, "", ""); err != nil { - close(finished) - if cancelErr := <-cancelErrCh; cancelErr != nil { - logrus.Debugf("Build cancelled (%v) and got an error from ContainerStart: %v", - cancelErr, err) - } - return err +func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConfig { + resources := container.Resources{ + CgroupParent: options.CgroupParent, + CPUShares: options.CPUShares, + CPUPeriod: options.CPUPeriod, + CPUQuota: options.CPUQuota, + CpusetCpus: options.CPUSetCPUs, + CpusetMems: options.CPUSetMems, + Memory: options.Memory, + MemorySwap: options.MemorySwap, + Ulimits: options.Ulimits, } - // Block on reading output from container, stop on err or chan closed - if err := <-errCh; err != nil { - close(finished) - if cancelErr := <-cancelErrCh; cancelErr != nil { - logrus.Debugf("Build cancelled (%v) and got an error from errCh: %v", - cancelErr, err) - } - return err - } - - if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { - close(finished) - if cancelErr := <-cancelErrCh; cancelErr != nil { - logrus.Debugf("Build cancelled (%v) and got a non-zero code from ContainerWait: %d", - cancelErr, ret) - } - // TODO: change error type, because jsonmessage.JSONError assumes HTTP - return &jsonmessage.JSONError{ - Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), - Code: ret, - } - } - close(finished) - return <-cancelErrCh -} - -func (b *Builder) removeContainer(c string) error { - rmConfig := &types.ContainerRmConfig{ - ForceRemove: true, - RemoveVolume: true, - } - if err := b.docker.ContainerRm(c, rmConfig); err != nil { - fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) - return err - } - return nil -} - -func (b *Builder) clearTmp() { - for c := range b.tmpContainers { - if err := b.removeContainer(c); err != nil { - return - } - delete(b.tmpContainers, c) - fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) + return &container.HostConfig{ + SecurityOpt: options.SecurityOpt, + Isolation: options.Isolation, + ShmSize: options.ShmSize, + Resources: resources, + NetworkMode: container.NetworkMode(options.NetworkMode), + // Set a log config to override any default value set on the daemon + LogConfig: defaultLogConfig, + ExtraHosts: options.ExtraHosts, } } - -// readDockerfile reads a Dockerfile from the current context. -func (b *Builder) readDockerfile() (*parser.Node, error) { - // If no -f was specified then look for 'Dockerfile'. If we can't find - // that then look for 'dockerfile'. If neither are found then default - // back to 'Dockerfile' and use that in the error message. - if b.options.Dockerfile == "" { - b.options.Dockerfile = builder.DefaultDockerfileName - if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { - lowercase := strings.ToLower(b.options.Dockerfile) - if _, _, err := b.context.Stat(lowercase); err == nil { - b.options.Dockerfile = lowercase - } - } - } - - nodes, err := b.parseDockerfile() - if err != nil { - return nodes, err - } - - // After the Dockerfile has been parsed, we need to check the .dockerignore - // file for either "Dockerfile" or ".dockerignore", and if either are - // present then erase them from the build context. These files should never - // have been sent from the client but we did send them to make sure that - // we had the Dockerfile to actually parse, and then we also need the - // .dockerignore file to know whether either file should be removed. - // Note that this assumes the Dockerfile has been read into memory and - // is now safe to be removed. - if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { - dockerIgnore.Process([]string{b.options.Dockerfile}) - } - return nodes, nil -} - -func (b *Builder) parseDockerfile() (*parser.Node, error) { - f, err := b.context.Open(b.options.Dockerfile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) - } - return nil, err - } - defer f.Close() - if f, ok := f.(*os.File); ok { - // ignoring error because Open already succeeded - fi, err := f.Stat() - if err != nil { - return nil, fmt.Errorf("Unexpected error reading Dockerfile: %v", err) - } - if fi.Size() == 0 { - return nil, fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) - } - } - return parser.Parse(f, &b.directive) -} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go index 56d5e4f18..8073cc671 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go @@ -2,12 +2,17 @@ package dockerfile import ( "fmt" + "runtime" "testing" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestEmptyDockerfile(t *testing.T) { @@ -16,7 +21,7 @@ func TestEmptyDockerfile(t *testing.T) { createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) - readAndCheckDockerfile(t, "emptyDockerfile", contextDir, "", "The Dockerfile (Dockerfile) cannot be empty") + readAndCheckDockerfile(t, "emptyDockerfile", contextDir, "", "the Dockerfile (Dockerfile) cannot be empty") } func TestSymlinkDockerfile(t *testing.T) { @@ -38,7 +43,7 @@ func TestDockerfileOutsideTheBuildContext(t *testing.T) { contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") defer cleanup() - expectedError := "Forbidden path outside the build context" + expectedError := "Forbidden path outside the build context: ../../Dockerfile ()" readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError) } @@ -54,7 +59,7 @@ func TestNonExistingDockerfile(t *testing.T) { func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) { tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - assert.NilError(t, err) + require.NoError(t, err) defer func() { if err = tarStream.Close(); err != nil { @@ -62,21 +67,65 @@ func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, } }() - context, err := builder.MakeTarSumContext(tarStream) - assert.NilError(t, err) - - defer func() { - if err = context.Close(); err != nil { - t.Fatalf("Error when closing tar context: %s", err) - } - }() - - options := &types.ImageBuildOptions{ - Dockerfile: dockerfilePath, + if dockerfilePath == "" { // handled in BuildWithContext + dockerfilePath = builder.DefaultDockerfileName + } + + config := backend.BuildConfig{ + Options: &types.ImageBuildOptions{Dockerfile: dockerfilePath}, + Source: tarStream, + } + _, _, err = remotecontext.Detect(config) + assert.EqualError(t, err, expectedError) +} + +func TestCopyRunConfig(t *testing.T) { + defaultEnv := []string{"foo=1"} + defaultCmd := []string{"old"} + + var testcases = []struct { + doc string + modifiers []runConfigModifier + expected *container.Config + }{ + { + doc: "Set the command", + modifiers: []runConfigModifier{withCmd([]string{"new"})}, + expected: &container.Config{ + Cmd: []string{"new"}, + Env: defaultEnv, + }, + }, + { + doc: "Set the command to a comment", + modifiers: []runConfigModifier{withCmdComment("comment", runtime.GOOS)}, + expected: &container.Config{ + Cmd: append(defaultShellForPlatform(runtime.GOOS), "#(nop) ", "comment"), + Env: defaultEnv, + }, + }, + { + doc: "Set the command and env", + modifiers: []runConfigModifier{ + withCmd([]string{"new"}), + withEnv([]string{"one", "two"}), + }, + expected: &container.Config{ + Cmd: []string{"new"}, + Env: []string{"one", "two"}, + }, + }, + } + + for _, testcase := range testcases { + runConfig := &container.Config{ + Cmd: defaultCmd, + Env: defaultEnv, + } + runConfigCopy := copyRunConfig(runConfig, testcase.modifiers...) + assert.Equal(t, testcase.expected, runConfigCopy, testcase.doc) + // Assert the original was not modified + assert.NotEqual(t, runConfig, runConfigCopy, testcase.doc) } - b := &Builder{options: options, context: context} - - _, err = b.readDockerfile() - assert.Error(t, err, expectedError) } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go index a8a47c358..f4784e1cc 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go @@ -12,7 +12,7 @@ import ( // normaliseDest normalises the destination of a COPY/ADD command in a // platform semantically consistent way. -func normaliseDest(cmdName, workingDir, requested string) (string, error) { +func normaliseDest(workingDir, requested string) (string, error) { dest := filepath.FromSlash(requested) endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) if !system.IsAbs(requested) { @@ -36,3 +36,7 @@ func containsWildcards(name string) bool { } return false } + +func validateCopySourcePath(imageSource *imageMount, origPath string) error { + return nil +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go index f60b11204..bb3285925 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go @@ -7,11 +7,12 @@ import ( "strings" "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" ) // normaliseDest normalises the destination of a COPY/ADD command in a // platform semantically consistent way. -func normaliseDest(cmdName, workingDir, requested string) (string, error) { +func normaliseDest(workingDir, requested string) (string, error) { dest := filepath.FromSlash(requested) endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) @@ -31,7 +32,7 @@ func normaliseDest(cmdName, workingDir, requested string) (string, error) { // we only want to validate where the DriveColon part has been supplied. if filepath.IsAbs(dest) { if strings.ToUpper(string(dest[0])) != "C" { - return "", fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName) + return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)") } dest = dest[2:] // Strip the drive letter } @@ -43,7 +44,7 @@ func normaliseDest(cmdName, workingDir, requested string) (string, error) { } if !system.IsAbs(dest) { if string(workingDir[0]) != "C" { - return "", fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName) + return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive") } dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) // Make sure we preserve any trailing slash @@ -64,3 +65,31 @@ func containsWildcards(name string) bool { } return false } + +var pathBlacklist = map[string]bool{ + "c:\\": true, + "c:\\windows": true, +} + +func validateCopySourcePath(imageSource *imageMount, origPath string) error { + // validate windows paths from other images + if imageSource == nil { + return nil + } + origPath = filepath.FromSlash(origPath) + p := strings.ToLower(filepath.Clean(origPath)) + if !filepath.IsAbs(p) { + if filepath.VolumeName(p) != "" { + if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths + p = p[:len(p)-1] + } + p += "\\" + } else { + p = filepath.Join("c:\\", p) + } + } + if _, blacklisted := pathBlacklist[p]; blacklisted { + return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") + } + return nil +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go index 868a6671a..b4c8d4b3c 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go @@ -2,16 +2,22 @@ package dockerfile -import "testing" +import ( + "fmt" + "testing" + + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/assert" +) func TestNormaliseDest(t *testing.T) { tests := []struct{ current, requested, expected, etext string }{ - {``, `D:\`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, - {``, `e:/`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, + {``, `D:\`, ``, `Windows does not support destinations not on the system drive (C:)`}, + {``, `e:/`, ``, `Windows does not support destinations not on the system drive (C:)`}, {`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`}, {`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`}, {`C`, ``, ``, `Current WorkingDir C is not platform consistent`}, - {`D:\`, `.`, ``, "Windows does not support TEST with relative paths when WORKDIR is not the system drive"}, + {`D:\`, `.`, ``, "Windows does not support relative paths when WORKDIR is not the system drive"}, {``, `D`, `D`, ``}, {``, `./a1`, `.\a1`, ``}, {``, `.\b1`, `.\b1`, ``}, @@ -32,20 +38,16 @@ func TestNormaliseDest(t *testing.T) { {`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``}, {`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``}, } - for _, i := range tests { - got, err := normaliseDest("TEST", i.current, i.requested) - if err != nil && i.etext == "" { - t.Fatalf("TestNormaliseDest Got unexpected error %q for %s %s. ", err.Error(), i.current, i.requested) - } - if i.etext != "" && ((err == nil) || (err != nil && err.Error() != i.etext)) { - if err == nil { - t.Fatalf("TestNormaliseDest Expected an error for %s %s but didn't get one", i.current, i.requested) - } else { - t.Fatalf("TestNormaliseDest Wrong error text for %s %s - %s", i.current, i.requested, err.Error()) + for _, testcase := range tests { + msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested) + actual, err := normaliseDest(testcase.current, testcase.requested) + if testcase.etext == "" { + if !assert.NoError(t, err, msg) { + continue } - } - if i.etext == "" && got != i.expected { - t.Fatalf("TestNormaliseDest Expected %q for %q and %q. Got %q", i.expected, i.current, i.requested, got) + assert.Equal(t, testcase.expected, actual, msg) + } else { + testutil.ErrorContains(t, err, testcase.etext) } } } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/metrics.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/metrics.go new file mode 100644 index 000000000..5aa953aa7 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/metrics.go @@ -0,0 +1,44 @@ +package dockerfile + +import ( + "github.com/docker/go-metrics" +) + +var ( + buildsTriggered metrics.Counter + buildsFailed metrics.LabeledCounter +) + +// Build metrics prometheus messages, these values must be initialized before +// using them. See the example below in the "builds_failed" metric definition. +const ( + metricsDockerfileSyntaxError = "dockerfile_syntax_error" + metricsDockerfileEmptyError = "dockerfile_empty_error" + metricsCommandNotSupportedError = "command_not_supported_error" + metricsErrorProcessingCommandsError = "error_processing_commands_error" + metricsBuildTargetNotReachableError = "build_target_not_reachable_error" + metricsMissingOnbuildArgumentsError = "missing_onbuild_arguments_error" + metricsUnknownInstructionError = "unknown_instruction_error" + metricsBuildCanceled = "build_canceled" +) + +func init() { + buildMetrics := metrics.NewNamespace("builder", "", nil) + + buildsTriggered = buildMetrics.NewCounter("builds_triggered", "Number of triggered image builds") + buildsFailed = buildMetrics.NewLabeledCounter("builds_failed", "Number of failed image builds", "reason") + for _, r := range []string{ + metricsDockerfileSyntaxError, + metricsDockerfileEmptyError, + metricsCommandNotSupportedError, + metricsErrorProcessingCommandsError, + metricsBuildTargetNotReachableError, + metricsMissingOnbuildArgumentsError, + metricsUnknownInstructionError, + metricsBuildCanceled, + } { + buildsFailed.WithValues(r) + } + + metrics.Register(buildMetrics) +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/mockbackend_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/mockbackend_test.go index 4c0356967..adc22762e 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/mockbackend_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/mockbackend_test.go @@ -1,43 +1,34 @@ package dockerfile import ( + "encoding/json" "io" - "time" - "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" - "github.com/docker/docker/image" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/layer" "golang.org/x/net/context" ) // MockBackend implements the builder.Backend interface for unit testing type MockBackend struct { - getImageOnBuildFunc func(string) (builder.Image, error) + containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + commitFunc func(string, *backend.ContainerCommitConfig) (string, error) + getImageFunc func(string) (builder.Image, builder.ReleaseableLayer, error) + makeImageCacheFunc func(cacheFrom []string, platform string) builder.ImageCache } -func (m *MockBackend) GetImageOnBuild(name string) (builder.Image, error) { - if m.getImageOnBuildFunc != nil { - return m.getImageOnBuildFunc(name) - } - return &mockImage{id: "theid"}, nil -} - -func (m *MockBackend) TagImageWithReference(image.ID, reference.Named) error { - return nil -} - -func (m *MockBackend) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { - return nil, nil -} - -func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { +func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error { return nil } func (m *MockBackend) ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { + if m.containerCreateFunc != nil { + return m.containerCreateFunc(config) + } return container.ContainerCreateCreatedBody{}, nil } @@ -45,7 +36,10 @@ func (m *MockBackend) ContainerRm(name string, config *types.ContainerRmConfig) return nil } -func (m *MockBackend) Commit(string, *backend.ContainerCommitConfig) (string, error) { +func (m *MockBackend) Commit(cID string, cfg *backend.ContainerCommitConfig) (string, error) { + if m.commitFunc != nil { + return m.commitFunc(cID, cfg) + } return "", nil } @@ -57,32 +51,35 @@ func (m *MockBackend) ContainerStart(containerID string, hostConfig *container.H return nil } -func (m *MockBackend) ContainerWait(containerID string, timeout time.Duration) (int, error) { - return 0, nil -} - -func (m *MockBackend) ContainerUpdateCmdOnBuild(containerID string, cmd []string) error { - return nil +func (m *MockBackend) ContainerWait(ctx context.Context, containerID string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) { + return nil, nil } func (m *MockBackend) ContainerCreateWorkdir(containerID string) error { return nil } -func (m *MockBackend) CopyOnBuild(containerID string, destPath string, src builder.FileInfo, decompress bool) error { +func (m *MockBackend) CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error { return nil } -func (m *MockBackend) HasExperimental() bool { - return false +func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) { + if m.getImageFunc != nil { + return m.getImageFunc(refOrID) + } + + return &mockImage{id: "theid"}, &mockLayer{}, nil } -func (m *MockBackend) SquashImage(from string, to string) (string, error) { - return "", nil +func (m *MockBackend) MakeImageCache(cacheFrom []string, platform string) builder.ImageCache { + if m.makeImageCacheFunc != nil { + return m.makeImageCacheFunc(cacheFrom, platform) + } + return nil } -func (m *MockBackend) MountImage(name string) (string, func() error, error) { - return "", func() error { return nil }, nil +func (m *MockBackend) CreateImage(config []byte, parent string, platform string) (builder.Image, error) { + return nil, nil } type mockImage struct { @@ -97,3 +94,37 @@ func (i *mockImage) ImageID() string { func (i *mockImage) RunConfig() *container.Config { return i.config } + +func (i *mockImage) MarshalJSON() ([]byte, error) { + type rawImage mockImage + return json.Marshal(rawImage(*i)) +} + +type mockImageCache struct { + getCacheFunc func(parentID string, cfg *container.Config) (string, error) +} + +func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config) (string, error) { + if mic.getCacheFunc != nil { + return mic.getCacheFunc(parentID, cfg) + } + return "", nil +} + +type mockLayer struct{} + +func (l *mockLayer) Release() error { + return nil +} + +func (l *mockLayer) Mount() (string, error) { + return "mountPath", nil +} + +func (l *mockLayer) Commit(string) (builder.ReleaseableLayer, error) { + return nil, nil +} + +func (l *mockLayer) DiffID() layer.DiffID { + return layer.DiffID("abcdef") +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go index fff3046fd..ea6205073 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go @@ -23,14 +23,10 @@ func main() { } defer f.Close() - d := parser.Directive{LookingForDirectives: true} - parser.SetEscapeToken(parser.DefaultEscapeToken, &d) - - ast, err := parser.Parse(f, &d) + result, err := parser.Parse(f) if err != nil { panic(err) - } else { - fmt.Println(ast.Dump()) } + fmt.Println(result.AST.Dump()) } } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go index 60d74d9c3..d4489191d 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go @@ -28,10 +28,9 @@ var validJSONArraysOfStrings = map[string][]string{ func TestJSONArraysOfStrings(t *testing.T) { for json, expected := range validJSONArraysOfStrings { - d := Directive{} - SetEscapeToken(DefaultEscapeToken, &d) + d := NewDefaultDirective() - if node, _, err := parseJSON(json, &d); err != nil { + if node, _, err := parseJSON(json, d); err != nil { t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) } else { i := 0 @@ -51,10 +50,9 @@ func TestJSONArraysOfStrings(t *testing.T) { } } for _, json := range invalidJSONArraysOfStrings { - d := Directive{} - SetEscapeToken(DefaultEscapeToken, &d) + d := NewDefaultDirective() - if _, _, err := parseJSON(json, &d); err != errDockerfileNotStringArray { + if _, _, err := parseJSON(json, d); err != errDockerfileNotStringArray { t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) } } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go index 0334834e6..d0e182e8e 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go @@ -42,7 +42,7 @@ func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) return nil, nil, nil } - _, child, err := ParseLine(rest, d, false) + child, err := newNodeFromLine(rest, d) if err != nil { return nil, nil, err } @@ -103,7 +103,7 @@ func parseWords(rest string, d *Directive) []string { blankOK = true phase = inQuote } - if ch == d.EscapeToken { + if ch == d.escapeToken { if pos+chWidth == len(rest) { continue // just skip an escape token at end of line } @@ -122,7 +122,7 @@ func parseWords(rest string, d *Directive) []string { phase = inWord } // The escape token is special except for ' quotes - can't escape anything for ' - if ch == d.EscapeToken && quote != '\'' { + if ch == d.escapeToken && quote != '\'' { if pos+chWidth == len(rest) { phase = inWord continue // just skip the escape token at end diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers_test.go index 30b6bdd82..cf0b21bb5 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers_test.go @@ -1,26 +1,27 @@ package parser import ( - "github.com/docker/docker/pkg/testutil/assert" "testing" + + "github.com/stretchr/testify/assert" ) func TestParseNameValOldFormat(t *testing.T) { directive := Directive{} node, err := parseNameVal("foo bar", "LABEL", &directive) - assert.NilError(t, err) + assert.NoError(t, err) expected := &Node{ Value: "foo", Next: &Node{Value: "bar"}, } - assert.DeepEqual(t, node, expected) + assert.Equal(t, expected, node) } func TestParseNameValNewFormat(t *testing.T) { directive := Directive{} node, err := parseNameVal("foo=bar thing=star", "LABEL", &directive) - assert.NilError(t, err) + assert.NoError(t, err) expected := &Node{ Value: "foo", @@ -34,7 +35,7 @@ func TestParseNameValNewFormat(t *testing.T) { }, }, } - assert.DeepEqual(t, node, expected) + assert.Equal(t, expected, node) } func TestNodeFromLabels(t *testing.T) { @@ -60,6 +61,14 @@ func TestNodeFromLabels(t *testing.T) { } node := NodeFromLabels(labels) - assert.DeepEqual(t, node, expected) + assert.Equal(t, expected, node) } + +func TestParseNameValWithoutVal(t *testing.T) { + directive := Directive{} + // In Config.Env, a variable without `=` is removed from the environment. (#31634) + // However, in Dockerfile, we don't allow "unsetting" an environment variable. (#11922) + _, err := parseNameVal("foo", "ENV", &directive) + assert.Error(t, err, "ENV must have two arguments") +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go index adc8a90eb..7f07ff215 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go @@ -7,11 +7,14 @@ import ( "fmt" "io" "regexp" + "runtime" "strconv" "strings" "unicode" "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" ) // Node is a structure used to represent a parse tree. @@ -34,7 +37,7 @@ type Node struct { Original string // original line used before parsing Flags []string // only top Node should have this set StartLine int // the line in the original dockerfile where the node begins - EndLine int // the line in the original dockerfile where the node ends + endLine int // the line in the original dockerfile where the node ends } // Dump dumps the AST defined by `node` as a list of sexps. @@ -62,35 +65,122 @@ func (node *Node) Dump() string { return strings.TrimSpace(str) } -// Directive is the structure used during a build run to hold the state of -// parsing directives. -type Directive struct { - EscapeToken rune // Current escape token - LineContinuationRegex *regexp.Regexp // Current line continuation regex - LookingForDirectives bool // Whether we are currently looking for directives - EscapeSeen bool // Whether the escape directive has been seen +func (node *Node) lines(start, end int) { + node.StartLine = start + node.endLine = end +} + +// AddChild adds a new child node, and updates line information +func (node *Node) AddChild(child *Node, startLine, endLine int) { + child.lines(startLine, endLine) + if node.StartLine < 0 { + node.StartLine = startLine + } + node.endLine = endLine + node.Children = append(node.Children, child) } var ( - dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) - tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) - tokenComment = regexp.MustCompile(`^#.*$`) + dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) + tokenPlatformCommand = regexp.MustCompile(`^#[ \t]*platform[ \t]*=[ \t]*(?P.*)$`) + tokenComment = regexp.MustCompile(`^#.*$`) ) // DefaultEscapeToken is the default escape token -const DefaultEscapeToken = "\\" +const DefaultEscapeToken = '\\' -// SetEscapeToken sets the default token for escaping characters in a Dockerfile. -func SetEscapeToken(s string, d *Directive) error { +// defaultPlatformToken is the platform assumed for the build if not explicitly provided +var defaultPlatformToken = runtime.GOOS + +// Directive is the structure used during a build run to hold the state of +// parsing directives. +type Directive struct { + escapeToken rune // Current escape token + platformToken string // Current platform token + lineContinuationRegex *regexp.Regexp // Current line continuation regex + processingComplete bool // Whether we are done looking for directives + escapeSeen bool // Whether the escape directive has been seen + platformSeen bool // Whether the platform directive has been seen +} + +// setEscapeToken sets the default token for escaping characters in a Dockerfile. +func (d *Directive) setEscapeToken(s string) error { if s != "`" && s != "\\" { return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) } - d.EscapeToken = rune(s[0]) - d.LineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) + d.escapeToken = rune(s[0]) + d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) return nil } +// setPlatformToken sets the default platform for pulling images in a Dockerfile. +func (d *Directive) setPlatformToken(s string) error { + s = strings.ToLower(s) + valid := []string{runtime.GOOS} + if system.LCOWSupported() { + valid = append(valid, "linux") + } + for _, item := range valid { + if s == item { + d.platformToken = s + return nil + } + } + return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid) +} + +// possibleParserDirective looks for one or more parser directives '# escapeToken=' and +// '# platform='. Parser directives must precede any builder instruction +// or other comments, and cannot be repeated. +func (d *Directive) possibleParserDirective(line string) error { + if d.processingComplete { + return nil + } + + tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tecMatch) != 0 { + for i, n := range tokenEscapeCommand.SubexpNames() { + if n == "escapechar" { + if d.escapeSeen == true { + return errors.New("only one escape parser directive can be used") + } + d.escapeSeen = true + return d.setEscapeToken(tecMatch[i]) + } + } + } + + // TODO @jhowardmsft LCOW Support: Eventually this check can be removed, + // but only recognise a platform token if running in LCOW mode. + if system.LCOWSupported() { + tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tpcMatch) != 0 { + for i, n := range tokenPlatformCommand.SubexpNames() { + if n == "platform" { + if d.platformSeen == true { + return errors.New("only one platform parser directive can be used") + } + d.platformSeen = true + return d.setPlatformToken(tpcMatch[i]) + } + } + } + } + + d.processingComplete = true + return nil +} + +// NewDefaultDirective returns a new Directive with the default escapeToken token +func NewDefaultDirective() *Directive { + directive := Directive{} + directive.setEscapeToken(string(DefaultEscapeToken)) + directive.setPlatformToken(defaultPlatformToken) + return &directive +} + func init() { // Dispatch Table. see line_parsers.go for the parse functions. // The command is parsed and mapped to the line parser. The line parser @@ -120,28 +210,6 @@ func init() { } } -// ParseLine parses a line and returns the remainder. -func ParseLine(line string, d *Directive, ignoreCont bool) (string, *Node, error) { - if escapeFound, err := handleParserDirective(line, d); err != nil || escapeFound { - d.EscapeSeen = escapeFound - return "", nil, err - } - - d.LookingForDirectives = false - - if line = stripComments(line); line == "" { - return "", nil, nil - } - - if !ignoreCont && d.LineContinuationRegex.MatchString(line) { - line = d.LineContinuationRegex.ReplaceAllString(line, "") - return line, nil, nil - } - - node, err := newNodeFromLine(line, d) - return "", node, err -} - // newNodeFromLine splits the line into parts, and dispatches to a function // based on the command and command arguments. A Node is created from the // result of the dispatch. @@ -170,109 +238,118 @@ func newNodeFromLine(line string, directive *Directive) (*Node, error) { }, nil } -// Handle the parser directive '# escape=. Parser directives must precede -// any builder instruction or other comments, and cannot be repeated. -func handleParserDirective(line string, d *Directive) (bool, error) { - if !d.LookingForDirectives { - return false, nil - } - tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) - if len(tecMatch) == 0 { - return false, nil - } - if d.EscapeSeen == true { - return false, fmt.Errorf("only one escape parser directive can be used") - } - for i, n := range tokenEscapeCommand.SubexpNames() { - if n == "escapechar" { - if err := SetEscapeToken(tecMatch[i], d); err != nil { - return false, err - } - return true, nil - } - } - return false, nil +// Result is the result of parsing a Dockerfile +type Result struct { + AST *Node + EscapeToken rune + Platform string + Warnings []string } -// Parse is the main parse routine. -// It handles an io.ReadWriteCloser and returns the root of the AST. -func Parse(rwc io.Reader, d *Directive) (*Node, error) { - currentLine := 0 - root := &Node{} - root.StartLine = -1 - scanner := bufio.NewScanner(rwc) +// PrintWarnings to the writer +func (r *Result) PrintWarnings(out io.Writer) { + if len(r.Warnings) == 0 { + return + } + fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n") +} - utf8bom := []byte{0xEF, 0xBB, 0xBF} +// Parse reads lines from a Reader, parses the lines into an AST and returns +// the AST and escape token +func Parse(rwc io.Reader) (*Result, error) { + d := NewDefaultDirective() + currentLine := 0 + root := &Node{StartLine: -1} + scanner := bufio.NewScanner(rwc) + warnings := []string{} + + var err error for scanner.Scan() { - scannedBytes := scanner.Bytes() - // We trim UTF8 BOM + bytesRead := scanner.Bytes() if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + // First line, strip the byte-order-marker if present + bytesRead = bytes.TrimPrefix(bytesRead, utf8bom) } - scannedLine := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) - currentLine++ - line, child, err := ParseLine(scannedLine, d, false) + bytesRead, err = processLine(d, bytesRead, true) if err != nil { return nil, err } + currentLine++ + startLine := currentLine - - if line != "" && child == nil { - for scanner.Scan() { - newline := scanner.Text() - currentLine++ - - if stripComments(strings.TrimSpace(newline)) == "" { - continue - } - - line, child, err = ParseLine(line+newline, d, false) - if err != nil { - return nil, err - } - - if child != nil { - break - } - } - if child == nil && line != "" { - // When we call ParseLine we'll pass in 'true' for - // the ignoreCont param if we're at the EOF. This will - // prevent the func from returning immediately w/o - // parsing the line thinking that there's more input - // to come. - - _, child, err = ParseLine(line, d, scanner.Err() == nil) - if err != nil { - return nil, err - } - } + line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d) + if isEndOfLine && line == "" { + continue } - if child != nil { - // Update the line information for the current child. - child.StartLine = startLine - child.EndLine = currentLine - // Update the line information for the root. The starting line of the root is always the - // starting line of the first child and the ending line is the ending line of the last child. - if root.StartLine < 0 { - root.StartLine = currentLine + var hasEmptyContinuationLine bool + for !isEndOfLine && scanner.Scan() { + bytesRead, err := processLine(d, scanner.Bytes(), false) + if err != nil { + return nil, err } - root.EndLine = currentLine - root.Children = append(root.Children, child) + currentLine++ + + if isEmptyContinuationLine(bytesRead) { + hasEmptyContinuationLine = true + continue + } + + continuationLine := string(bytesRead) + continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d) + line += continuationLine } + + if hasEmptyContinuationLine { + warning := "[WARNING]: Empty continuation line found in:\n " + line + warnings = append(warnings, warning) + } + + child, err := newNodeFromLine(line, d) + if err != nil { + return nil, err + } + root.AddChild(child, startLine, currentLine) } - return root, nil -} - -// covers comments and empty lines. Lines should be trimmed before passing to -// this function. -func stripComments(line string) string { - // string is already trimmed at this point - if tokenComment.MatchString(line) { - return tokenComment.ReplaceAllString(line, "") + if len(warnings) > 0 { + warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.") } - - return line + return &Result{ + AST: root, + Warnings: warnings, + EscapeToken: d.escapeToken, + Platform: d.platformToken, + }, nil +} + +func trimComments(src []byte) []byte { + return tokenComment.ReplaceAll(src, []byte{}) +} + +func trimWhitespace(src []byte) []byte { + return bytes.TrimLeftFunc(src, unicode.IsSpace) +} + +func isEmptyContinuationLine(line []byte) bool { + return len(trimComments(trimWhitespace(line))) == 0 +} + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +func trimContinuationCharacter(line string, d *Directive) (string, bool) { + if d.lineContinuationRegex.MatchString(line) { + line = d.lineContinuationRegex.ReplaceAllString(line, "") + return line, false + } + return line, true +} + +// TODO: remove stripLeftWhitespace after deprecation period. It seems silly +// to preserve whitespace on continuation lines. Why is that done? +func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) { + if stripLeftWhitespace { + token = trimWhitespace(token) + } + return trimComments(token), d.possibleParserDirective(string(token)) } diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go index ee9461c82..bb057ecab 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go @@ -8,6 +8,9 @@ import ( "path/filepath" "runtime" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const testDir = "testfiles" @@ -16,72 +19,47 @@ const testFileLineInfo = "testfile-line/Dockerfile" func getDirs(t *testing.T, dir string) []string { f, err := os.Open(dir) - if err != nil { - t.Fatal(err) - } - + require.NoError(t, err) defer f.Close() dirs, err := f.Readdirnames(0) - if err != nil { - t.Fatal(err) - } - + require.NoError(t, err) return dirs } -func TestTestNegative(t *testing.T) { +func TestParseErrorCases(t *testing.T) { for _, dir := range getDirs(t, negativeTestDir) { dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") df, err := os.Open(dockerfile) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", dir, err) - } + require.NoError(t, err, dockerfile) defer df.Close() - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - _, err = Parse(df, &d) - if err == nil { - t.Fatalf("No error parsing broken dockerfile for %s", dir) - } + _, err = Parse(df) + assert.Error(t, err, dockerfile) } } -func TestTestData(t *testing.T) { +func TestParseCases(t *testing.T) { for _, dir := range getDirs(t, testDir) { dockerfile := filepath.Join(testDir, dir, "Dockerfile") resultfile := filepath.Join(testDir, dir, "result") df, err := os.Open(dockerfile) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", dir, err) - } + require.NoError(t, err, dockerfile) defer df.Close() - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - ast, err := Parse(df, &d) - if err != nil { - t.Fatalf("Error parsing %s's dockerfile: %v", dir, err) - } + result, err := Parse(df) + require.NoError(t, err, dockerfile) content, err := ioutil.ReadFile(resultfile) - if err != nil { - t.Fatalf("Error reading %s's result file: %v", dir, err) - } + require.NoError(t, err, resultfile) if runtime.GOOS == "windows" { // CRLF --> CR to match Unix behavior content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) } - - if ast.Dump()+"\n" != string(content) { - fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) - fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) - t.Fatalf("%s: AST dump of dockerfile does not match result", dir) - } + assert.Equal(t, result.AST.Dump()+"\n", string(content), "In "+dockerfile) } } @@ -122,52 +100,55 @@ func TestParseWords(t *testing.T) { } for _, test := range tests { - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - words := parseWords(test["input"][0], &d) - if len(words) != len(test["expect"]) { - t.Fatalf("length check failed. input: %v, expect: %q, output: %q", test["input"][0], test["expect"], words) - } - for i, word := range words { - if word != test["expect"][i] { - t.Fatalf("word check failed for word: %q. input: %q, expect: %q, output: %q", word, test["input"][0], test["expect"], words) - } - } + words := parseWords(test["input"][0], NewDefaultDirective()) + assert.Equal(t, test["expect"], words) } } -func TestLineInformation(t *testing.T) { +func TestParseIncludesLineNumbers(t *testing.T) { df, err := os.Open(testFileLineInfo) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", testFileLineInfo, err) - } + require.NoError(t, err) defer df.Close() - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - ast, err := Parse(df, &d) - if err != nil { - t.Fatalf("Error parsing dockerfile %s: %v", testFileLineInfo, err) - } + result, err := Parse(df) + require.NoError(t, err) - if ast.StartLine != 5 || ast.EndLine != 31 { - fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 5, 31, ast.StartLine, ast.EndLine) - t.Fatal("Root line information doesn't match result.") - } - if len(ast.Children) != 3 { - fmt.Fprintf(os.Stderr, "Wrong number of child: expected(%d), actual(%d)\n", 3, len(ast.Children)) - t.Fatalf("Root line information doesn't match result for %s", testFileLineInfo) - } + ast := result.AST + assert.Equal(t, 5, ast.StartLine) + assert.Equal(t, 31, ast.endLine) + assert.Len(t, ast.Children, 3) expected := [][]int{ {5, 5}, {11, 12}, {17, 31}, } for i, child := range ast.Children { - if child.StartLine != expected[i][0] || child.EndLine != expected[i][1] { - t.Logf("Wrong line information for child %d: expected(%d-%d), actual(%d-%d)\n", - i, expected[i][0], expected[i][1], child.StartLine, child.EndLine) - t.Fatal("Root line information doesn't match result.") - } + msg := fmt.Sprintf("Child %d", i) + assert.Equal(t, expected[i], []int{child.StartLine, child.endLine}, msg) } } + +func TestParseWarnsOnEmptyContinutationLine(t *testing.T) { + dockerfile := bytes.NewBufferString(` +FROM alpine:3.6 + +RUN something \ + + following \ + + more + +RUN another \ + + thing + `) + + result, err := Parse(dockerfile) + require.NoError(t, err) + warnings := result.Warnings + assert.Len(t, warnings, 3) + assert.Contains(t, warnings[0], "Empty continuation line found in") + assert.Contains(t, warnings[0], "RUN something following more") + assert.Contains(t, warnings[1], "RUN another thing") + assert.Contains(t, warnings[2], "will become errors in a future release") +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile index 00b444cba..035b4e8bb 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile @@ -1,5 +1,5 @@ FROM ubuntu:14.04 -MAINTAINER Seongyeol Lim +LABEL maintainer Seongyeol Lim COPY . /go/src/github.com/docker/docker ADD . / diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result index 85aee6401..d1f71ecc5 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result @@ -1,5 +1,5 @@ (from "ubuntu:14.04") -(maintainer "Seongyeol Lim ") +(label "maintainer" "Seongyeol Lim ") (copy "." "/go/src/github.com/docker/docker") (add "." "/") (add "null" "/") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile index 0364ef9d9..9c0952acb 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile @@ -1,7 +1,7 @@ #escape=\ FROM brimstone/ubuntu:14.04 -MAINTAINER brimstone@the.narro.ws +LABEL maintainer brimstone@the.narro.ws # TORUN -v /var/run/docker.sock:/var/run/docker.sock diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result index 227f748cd..3b45db62b 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result @@ -1,5 +1,5 @@ (from "brimstone/ubuntu:14.04") -(maintainer "brimstone@the.narro.ws") +(label "maintainer" "brimstone@the.narro.ws") (env "GOPATH" "/go") (entrypoint "/usr/local/bin/consuldock") (run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile new file mode 100644 index 000000000..a8ec369ad --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile @@ -0,0 +1,3 @@ +FROM alpine:3.5 + +RUN something \ \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continue-at-eof/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continue-at-eof/result new file mode 100644 index 000000000..14e4f0932 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continue-at-eof/result @@ -0,0 +1,2 @@ +(from "alpine:3.5") +(run "something") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile index b191f7554..5153453ff 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile @@ -24,7 +24,7 @@ # FROM ubuntu:14.04 -MAINTAINER Tianon Gravi (@tianon) +LABEL maintainer Tianon Gravi (@tianon) # Packaged dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result index 85011ed41..0c2f22991 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result @@ -1,5 +1,5 @@ (from "ubuntu:14.04") -(maintainer "Tianon Gravi (@tianon)") +(label "maintainer" "Tianon Gravi (@tianon)") (run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") (run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") (run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile index 6def7efdc..18e9a474f 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile @@ -4,6 +4,6 @@ # escape = ` FROM image -MAINTAINER foo@bar.com +LABEL maintainer foo@bar.com ENV GOPATH \ \go \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result index 21522a880..9ab119c41 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result @@ -1,3 +1,3 @@ (from "image") -(maintainer "foo@bar.com") +(label "maintainer" "foo@bar.com") (env "GOPATH" "\\go") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile index 08a8cc432..366ee3c36 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile @@ -2,6 +2,6 @@ # There is no white space line after the directives. This still succeeds, but goes # against best practices. FROM image -MAINTAINER foo@bar.com +LABEL maintainer foo@bar.com ENV GOPATH ` \go \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result index 21522a880..9ab119c41 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result @@ -1,3 +1,3 @@ (from "image") -(maintainer "foo@bar.com") +(label "maintainer" "foo@bar.com") (env "GOPATH" "\\go") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile index ef30414a5..a515af152 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile @@ -1,6 +1,6 @@ #escape = ` FROM image -MAINTAINER foo@bar.com +LABEL maintainer foo@bar.com ENV GOPATH ` \go \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result index 21522a880..9ab119c41 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result @@ -1,3 +1,3 @@ (from "image") -(maintainer "foo@bar.com") +(label "maintainer" "foo@bar.com") (env "GOPATH" "\\go") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile index 1ffb17ef0..03062394a 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile @@ -1,5 +1,5 @@ FROM ubuntu:14.04 -MAINTAINER Erik \\Hollensbe \" +LABEL maintainer Erik \\Hollensbe \" RUN apt-get \update && \ apt-get \"install znc -y diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result index 13e409cb1..98e3e3b73 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result @@ -1,5 +1,5 @@ (from "ubuntu:14.04") -(maintainer "Erik \\\\Hollensbe \\\"") +(label "maintainer" "Erik \\\\Hollensbe \\\"") (run "apt-get \\update && apt-get \\\"install znc -y") (add "\\conf\\\\\"" "/.znc") (run "foo bar baz") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile index 35f9c24aa..728ec9a78 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -1,5 +1,5 @@ FROM ubuntu:14.04 -MAINTAINER James Turnbull "james@example.com" +LABEL maintainer James Turnbull "james@example.com" ENV REFRESHED_AT 2014-06-01 RUN apt-get update RUN apt-get -y install redis-server redis-tools diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result index b5ac6fe44..e774bc4f9 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result @@ -1,5 +1,5 @@ (from "ubuntu:14.04") -(maintainer "James Turnbull \"james@example.com\"") +(label "maintainer" "James Turnbull \"james@example.com\"") (env "REFRESHED_AT" "2014-06-01") (run "apt-get update") (run "apt-get -y install redis-server redis-tools") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile index 188395fe8..27f28cb92 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -1,6 +1,6 @@ FROM busybox:buildroot-2014.02 -MAINTAINER docker +LABEL maintainer docker ONBUILD RUN ["echo", "test"] ONBUILD RUN echo test diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result index 6f7d57a39..8a499ff94 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -1,5 +1,5 @@ (from "busybox:buildroot-2014.02") -(maintainer "docker ") +(label "maintainer" "docker ") (onbuild (run "echo" "test")) (onbuild (run "echo test")) (onbuild (copy "." "/")) diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile index bf8368e1c..0a35e2c6b 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile @@ -1,5 +1,5 @@ FROM ubuntu:14.04 -MAINTAINER Erik Hollensbe +LABEL maintainer Erik Hollensbe RUN apt-get update && apt-get install nginx-full -y RUN rm -rf /etc/nginx diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result index 56ddb6f25..a895fadbb 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result @@ -1,5 +1,5 @@ (from "ubuntu:14.04") -(maintainer "Erik Hollensbe ") +(label "maintainer" "Erik Hollensbe ") (run "apt-get update && apt-get install nginx-full -y") (run "rm -rf /etc/nginx") (add "etc" "/etc/nginx") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile index 3a4da6e91..626b126d8 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile @@ -1,5 +1,5 @@ FROM ubuntu:14.04 -MAINTAINER Erik Hollensbe +LABEL maintainer Erik Hollensbe RUN apt-get update && apt-get install znc -y ADD conf /.znc diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result index 5493b255f..bfc7f6513 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result @@ -1,5 +1,5 @@ (from "ubuntu:14.04") -(maintainer "Erik Hollensbe ") +(label "maintainer" "Erik Hollensbe ") (run "apt-get update && apt-get install znc -y") (add "conf" "/.znc") (cmd "/usr/bin/znc" "-f" "-r") diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go index 8e210ad6a..b72ac291d 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go @@ -1,31 +1,34 @@ package dockerfile -// This will take a single word and an array of env variables and +import ( + "bytes" + "strings" + "text/scanner" + "unicode" + + "github.com/pkg/errors" +) + +// ShellLex performs shell word splitting and variable expansion. +// +// ShellLex takes a string and an array of env variables and // process all quotes (" and ') as well as $xxx and ${xxx} env variable // tokens. Tries to mimic bash shell process. // It doesn't support all flavors of ${xx:...} formats but new ones can // be added by adding code to the "special ${} format processing" section - -import ( - "fmt" - "runtime" - "strings" - "text/scanner" - "unicode" -) - -type shellWord struct { - word string - scanner scanner.Scanner - envs []string - pos int +type ShellLex struct { escapeToken rune } +// NewShellLex creates a new ShellLex which uses escapeToken to escape quotes. +func NewShellLex(escapeToken rune) *ShellLex { + return &ShellLex{escapeToken: escapeToken} +} + // ProcessWord will use the 'env' list of environment variables, // and replace any env var references in 'word'. -func ProcessWord(word string, env []string, escapeToken rune) (string, error) { - word, _, err := process(word, env, escapeToken) +func (s *ShellLex) ProcessWord(word string, env []string) (string, error) { + word, _, err := s.process(word, env) return word, err } @@ -36,24 +39,32 @@ func ProcessWord(word string, env []string, escapeToken rune) (string, error) { // this splitting is done **after** the env var substitutions are done. // Note, each one is trimmed to remove leading and trailing spaces (unless // they are quoted", but ProcessWord retains spaces between words. -func ProcessWords(word string, env []string, escapeToken rune) ([]string, error) { - _, words, err := process(word, env, escapeToken) +func (s *ShellLex) ProcessWords(word string, env []string) ([]string, error) { + _, words, err := s.process(word, env) return words, err } -func process(word string, env []string, escapeToken rune) (string, []string, error) { +func (s *ShellLex) process(word string, env []string) (string, []string, error) { sw := &shellWord{ - word: word, envs: env, - pos: 0, - escapeToken: escapeToken, + escapeToken: s.escapeToken, } sw.scanner.Init(strings.NewReader(word)) - return sw.process() + return sw.process(word) } -func (sw *shellWord) process() (string, []string, error) { - return sw.processStopOn(scanner.EOF) +type shellWord struct { + scanner scanner.Scanner + envs []string + escapeToken rune +} + +func (sw *shellWord) process(source string) (string, []string, error) { + word, words, err := sw.processStopOn(scanner.EOF) + if err != nil { + err = errors.Wrapf(err, "failed to process %q", source) + } + return word, words, err } type wordsStruct struct { @@ -106,7 +117,7 @@ func (w *wordsStruct) getWords() []string { // Process the word, starting at 'pos', and stop when we get to the // end of the word or the 'stopChar' character func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { - var result string + var result bytes.Buffer var words wordsStruct var charFuncMapping = map[rune]func() (string, error){ @@ -128,7 +139,7 @@ func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { if err != nil { return "", []string{}, err } - result += tmp + result.WriteString(tmp) if ch == rune('$') { words.addString(tmp) @@ -141,7 +152,6 @@ func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { if ch == sw.escapeToken { // '\' (default escape token, but ` allowed) escapes, except end of line - ch = sw.scanner.Next() if ch == scanner.EOF { @@ -153,135 +163,153 @@ func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { words.addChar(ch) } - result += string(ch) + result.WriteRune(ch) } } - return result, words.getWords(), nil + return result.String(), words.getWords(), nil } func (sw *shellWord) processSingleQuote() (string, error) { // All chars between single quotes are taken as-is // Note, you can't escape ' - var result string + // + // From the "sh" man page: + // Single Quotes + // Enclosing characters in single quotes preserves the literal meaning of + // all the characters (except single quotes, making it impossible to put + // single-quotes in a single-quoted string). + + var result bytes.Buffer sw.scanner.Next() for { ch := sw.scanner.Next() - if ch == '\'' || ch == scanner.EOF { - break + switch ch { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching single-quote") + case '\'': + return result.String(), nil } - result += string(ch) + result.WriteRune(ch) } - - return result, nil } func (sw *shellWord) processDoubleQuote() (string, error) { // All chars up to the next " are taken as-is, even ', except any $ chars // But you can escape " with a \ (or ` if escape token set accordingly) - var result string + // + // From the "sh" man page: + // Double Quotes + // Enclosing characters within double quotes preserves the literal meaning + // of all characters except dollarsign ($), backquote (`), and backslash + // (\). The backslash inside double quotes is historically weird, and + // serves to quote only the following characters: + // $ ` " \ . + // Otherwise it remains literal. + + var result bytes.Buffer sw.scanner.Next() - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - if ch == '"' { + for { + switch sw.scanner.Peek() { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching double-quote") + case '"': sw.scanner.Next() - break - } - if ch == '$' { - tmp, err := sw.processDollar() + return result.String(), nil + case '$': + value, err := sw.processDollar() if err != nil { return "", err } - result += tmp - } else { - ch = sw.scanner.Next() + result.WriteString(value) + default: + ch := sw.scanner.Next() if ch == sw.escapeToken { - chNext := sw.scanner.Peek() - - if chNext == scanner.EOF { + switch sw.scanner.Peek() { + case scanner.EOF: // Ignore \ at end of word continue - } - - if chNext == '"' || chNext == '$' { - // \" and \$ can be escaped, all other \'s are left as-is + case '"', '$', sw.escapeToken: + // These chars can be escaped, all other \'s are left as-is + // Note: for now don't do anything special with ` chars. + // Not sure what to do with them anyway since we're not going + // to execute the text in there (not now anyway). ch = sw.scanner.Next() } } - result += string(ch) + result.WriteRune(ch) } } - - return result, nil } func (sw *shellWord) processDollar() (string, error) { sw.scanner.Next() - ch := sw.scanner.Peek() - if ch == '{' { - sw.scanner.Next() - name := sw.processName() - ch = sw.scanner.Peek() - if ch == '}' { - // Normal ${xx} case - sw.scanner.Next() - return sw.getEnv(name), nil - } - if ch == ':' { - // Special ${xx:...} format processing - // Yes it allows for recursive $'s in the ... spot - sw.scanner.Next() // skip over : - modifier := sw.scanner.Next() - - word, _, err := sw.processStopOn('}') - if err != nil { - return "", err - } - - // Grab the current value of the variable in question so we - // can use to to determine what to do based on the modifier - newValue := sw.getEnv(name) - - switch modifier { - case '+': - if newValue != "" { - newValue = word - } - return newValue, nil - - case '-': - if newValue == "" { - newValue = word - } - return newValue, nil - - default: - return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) - } - } - return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) - } // $xxx case - name := sw.processName() - if name == "" { - return "$", nil + if sw.scanner.Peek() != '{' { + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil } - return sw.getEnv(name), nil + + sw.scanner.Next() + name := sw.processName() + ch := sw.scanner.Peek() + if ch == '}' { + // Normal ${xx} case + sw.scanner.Next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.scanner.Next() // skip over : + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier) + } + } + return "", errors.Errorf("missing ':' in substitution") } func (sw *shellWord) processName() string { // Read in a name (alphanumeric or _) // If it starts with a numeric then just return $# - var name string + var name bytes.Buffer for sw.scanner.Peek() != scanner.EOF { ch := sw.scanner.Peek() - if len(name) == 0 && unicode.IsDigit(ch) { + if name.Len() == 0 && unicode.IsDigit(ch) { ch = sw.scanner.Next() return string(ch) } @@ -289,24 +317,17 @@ func (sw *shellWord) processName() string { break } ch = sw.scanner.Next() - name += string(ch) + name.WriteRune(ch) } - return name + return name.String() } func (sw *shellWord) getEnv(name string) string { - if runtime.GOOS == "windows" { - // Case-insensitive environment variables on Windows - name = strings.ToUpper(name) - } for _, env := range sw.envs { i := strings.Index(env, "=") if i < 0 { - if runtime.GOOS == "windows" { - env = strings.ToUpper(env) - } - if name == env { + if equalEnvKeys(name, env) { // Should probably never get here, but just in case treat // it like "var" and "var=" are the same return "" @@ -314,10 +335,7 @@ func (sw *shellWord) getEnv(name string) string { continue } compareName := env[:i] - if runtime.GOOS == "windows" { - compareName = strings.ToUpper(compareName) - } - if name != compareName { + if !equalEnvKeys(name, compareName) { continue } return env[i+1:] diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go b/fn/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go index 6c3fff5b6..c4f7e0efd 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/stretchr/testify/assert" ) func TestShellParser4EnvVars(t *testing.T) { @@ -15,9 +15,10 @@ func TestShellParser4EnvVars(t *testing.T) { lineCount := 0 file, err := os.Open(fn) - assert.NilError(t, err) + assert.NoError(t, err) defer file.Close() + shlex := NewShellLex('\\') scanner := bufio.NewScanner(file) envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} for scanner.Scan() { @@ -36,7 +37,7 @@ func TestShellParser4EnvVars(t *testing.T) { } words := strings.Split(line, "|") - assert.Equal(t, len(words), 3) + assert.Len(t, words, 3) platform := strings.TrimSpace(words[0]) source := strings.TrimSpace(words[1]) @@ -49,11 +50,11 @@ func TestShellParser4EnvVars(t *testing.T) { if ((platform == "W" || platform == "A") && runtime.GOOS == "windows") || ((platform == "U" || platform == "A") && runtime.GOOS != "windows") { - newWord, err := ProcessWord(source, envs, '\\') + newWord, err := shlex.ProcessWord(source, envs) if expected == "error" { - assert.Error(t, err, "") + assert.Error(t, err) } else { - assert.NilError(t, err) + assert.NoError(t, err) assert.Equal(t, newWord, expected) } } @@ -69,10 +70,13 @@ func TestShellParser4Words(t *testing.T) { } defer file.Close() + shlex := NewShellLex('\\') envs := []string{} scanner := bufio.NewScanner(file) + lineNum := 0 for scanner.Scan() { line := scanner.Text() + lineNum = lineNum + 1 if strings.HasPrefix(line, "#") { continue @@ -86,34 +90,30 @@ func TestShellParser4Words(t *testing.T) { words := strings.Split(line, "|") if len(words) != 2 { - t.Fatalf("Error in '%s' - should be exactly one | in: %q", fn, line) + t.Fatalf("Error in '%s'(line %d) - should be exactly one | in: %q", fn, lineNum, line) } test := strings.TrimSpace(words[0]) expected := strings.Split(strings.TrimLeft(words[1], " "), ",") - result, err := ProcessWords(test, envs, '\\') + result, err := shlex.ProcessWords(test, envs) if err != nil { result = []string{"error"} } if len(result) != len(expected) { - t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) + t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) } for i, w := range expected { if w != result[i] { - t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) + t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) } } } } func TestGetEnv(t *testing.T) { - sw := &shellWord{ - word: "", - envs: nil, - pos: 0, - } + sw := &shellWord{envs: nil} sw.envs = []string{} if sw.getEnv("foo") != "" { diff --git a/fn/vendor/github.com/docker/docker/builder/dockerfile/wordsTest b/fn/vendor/github.com/docker/docker/builder/dockerfile/wordsTest index fa916c67f..1fd9f1943 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerfile/wordsTest +++ b/fn/vendor/github.com/docker/docker/builder/dockerfile/wordsTest @@ -21,5 +21,10 @@ hel"lo${trailing}" | helloab c hello" there " | hello there hello there | hello,there hello\ there | hello there -hello" there | hello there +hello" there | error hello\" there | hello",there +hello"\\there" | hello\there +hello"\there" | hello\there +hello'\\there' | hello\\there +hello'\there' | hello\there +hello'$there' | hello$there diff --git a/fn/vendor/github.com/docker/docker/builder/dockerignore.go b/fn/vendor/github.com/docker/docker/builder/dockerignore.go deleted file mode 100644 index 3da791336..000000000 --- a/fn/vendor/github.com/docker/docker/builder/dockerignore.go +++ /dev/null @@ -1,48 +0,0 @@ -package builder - -import ( - "os" - - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/pkg/fileutils" -) - -// DockerIgnoreContext wraps a ModifiableContext to add a method -// for handling the .dockerignore file at the root of the context. -type DockerIgnoreContext struct { - ModifiableContext -} - -// Process reads the .dockerignore file at the root of the embedded context. -// If .dockerignore does not exist in the context, then nil is returned. -// -// It can take a list of files to be removed after .dockerignore is removed. -// This is used for server-side implementations of builders that need to send -// the .dockerignore file as well as the special files specified in filesToRemove, -// but expect them to be excluded from the context after they were processed. -// -// For example, server-side Dockerfile builders are expected to pass in the name -// of the Dockerfile to be removed after it was parsed. -// -// TODO: Don't require a ModifiableContext (use Context instead) and don't remove -// files, instead handle a list of files to be excluded from the context. -func (c DockerIgnoreContext) Process(filesToRemove []string) error { - f, err := c.Open(".dockerignore") - // Note that a missing .dockerignore file isn't treated as an error - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - excludes, _ := dockerignore.ReadAll(f) - f.Close() - filesToRemove = append([]string{".dockerignore"}, filesToRemove...) - for _, fileToRemove := range filesToRemove { - rm, _ := fileutils.Matches(fileToRemove, excludes) - if rm { - c.Remove(fileToRemove) - } - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/fn/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go index 2db67be79..cc2238133 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go @@ -38,8 +38,23 @@ func ReadAll(reader io.Reader) ([]string, error) { if pattern == "" { continue } - pattern = filepath.Clean(pattern) - pattern = filepath.ToSlash(pattern) + // normalize absolute paths to paths relative to the context + // (taking care of '!' prefix) + invert := pattern[0] == '!' + if invert { + pattern = strings.TrimSpace(pattern[1:]) + } + if len(pattern) > 0 { + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + if len(pattern) > 1 && pattern[0] == '/' { + pattern = pattern[1:] + } + } + if invert { + pattern = "!" + pattern + } + excludes = append(excludes, pattern) } if err := scanner.Err(); err != nil { diff --git a/fn/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go b/fn/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go index 948f9d886..bda38745c 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go +++ b/fn/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go @@ -25,7 +25,7 @@ func TestReadAll(t *testing.T) { } diName := filepath.Join(tmpDir, ".dockerignore") - content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile\n# this is a comment\n! /inverted/abs/path\n!\n! \n") err = ioutil.WriteFile(diName, []byte(content), 0777) if err != nil { t.Fatal(err) @@ -42,16 +42,28 @@ func TestReadAll(t *testing.T) { t.Fatal(err) } + if len(di) != 7 { + t.Fatalf("Expected 5 entries, got %v", len(di)) + } if di[0] != "test1" { t.Fatal("First element is not test1") } - if di[1] != "/test2" { - t.Fatal("Second element is not /test2") + if di[1] != "test2" { // according to https://docs.docker.com/engine/reference/builder/#dockerignore-file, /foo/bar should be treated as foo/bar + t.Fatal("Second element is not test2") } - if di[2] != "/a/file/here" { - t.Fatal("Third element is not /a/file/here") + if di[2] != "a/file/here" { // according to https://docs.docker.com/engine/reference/builder/#dockerignore-file, /foo/bar should be treated as foo/bar + t.Fatal("Third element is not a/file/here") } if di[3] != "lastfile" { t.Fatal("Fourth element is not lastfile") } + if di[4] != "!inverted/abs/path" { + t.Fatal("Fifth element is not !inverted/abs/path") + } + if di[5] != "!" { + t.Fatalf("Sixth element is not !, but %s", di[5]) + } + if di[6] != "!" { + t.Fatalf("Sixth element is not !, but %s", di[6]) + } } diff --git a/fn/vendor/github.com/docker/docker/builder/fscache/fscache.go b/fn/vendor/github.com/docker/docker/builder/fscache/fscache.go new file mode 100644 index 000000000..802db96de --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/fscache/fscache.go @@ -0,0 +1,602 @@ +package fscache + +import ( + "encoding/json" + "os" + "path/filepath" + "sort" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/client/session/filesync" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + "golang.org/x/net/context" + "golang.org/x/sync/singleflight" +) + +const dbFile = "fscache.db" +const cacheKey = "cache" +const metaKey = "meta" + +// Backend is a backing implementation for FSCache +type Backend interface { + Get(id string) (string, error) + Remove(id string) error +} + +// FSCache allows syncing remote resources to cached snapshots +type FSCache struct { + opt Opt + transports map[string]Transport + mu sync.Mutex + g singleflight.Group + store *fsCacheStore +} + +// Opt defines options for initializing FSCache +type Opt struct { + Backend Backend + Root string // for storing local metadata + GCPolicy GCPolicy +} + +// GCPolicy defines policy for garbage collection +type GCPolicy struct { + MaxSize uint64 + MaxKeepDuration time.Duration +} + +// NewFSCache returns new FSCache object +func NewFSCache(opt Opt) (*FSCache, error) { + store, err := newFSCacheStore(opt) + if err != nil { + return nil, err + } + return &FSCache{ + store: store, + opt: opt, + transports: make(map[string]Transport), + }, nil +} + +// Transport defines a method for syncing remote data to FSCache +type Transport interface { + Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error +} + +// RemoteIdentifier identifies a transfer request +type RemoteIdentifier interface { + Key() string + SharedKey() string + Transport() string +} + +// RegisterTransport registers a new transport method +func (fsc *FSCache) RegisterTransport(id string, transport Transport) error { + fsc.mu.Lock() + defer fsc.mu.Unlock() + if _, ok := fsc.transports[id]; ok { + return errors.Errorf("transport %v already exists", id) + } + fsc.transports[id] = transport + return nil +} + +// SyncFrom returns a source based on a remote identifier +func (fsc *FSCache) SyncFrom(ctx context.Context, id RemoteIdentifier) (builder.Source, error) { // cacheOpt + trasportID := id.Transport() + fsc.mu.Lock() + transport, ok := fsc.transports[id.Transport()] + if !ok { + fsc.mu.Unlock() + return nil, errors.Errorf("invalid transport %s", trasportID) + } + + logrus.Debugf("SyncFrom %s %s", id.Key(), id.SharedKey()) + fsc.mu.Unlock() + sourceRef, err, _ := fsc.g.Do(id.Key(), func() (interface{}, error) { + var sourceRef *cachedSourceRef + sourceRef, err := fsc.store.Get(id.Key()) + if err == nil { + return sourceRef, nil + } + + // check for unused shared cache + sharedKey := id.SharedKey() + if sharedKey != "" { + r, err := fsc.store.Rebase(sharedKey, id.Key()) + if err == nil { + sourceRef = r + } + } + + if sourceRef == nil { + var err error + sourceRef, err = fsc.store.New(id.Key(), sharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create remote context") + } + } + + if err := syncFrom(ctx, sourceRef, transport, id); err != nil { + sourceRef.Release() + return nil, err + } + if err := sourceRef.resetSize(-1); err != nil { + return nil, err + } + return sourceRef, nil + }) + if err != nil { + return nil, err + } + ref := sourceRef.(*cachedSourceRef) + if ref.src == nil { // failsafe + return nil, errors.Errorf("invalid empty pull") + } + wc := &wrappedContext{Source: ref.src, closer: func() error { + ref.Release() + return nil + }} + return wc, nil +} + +// DiskUsage reports how much data is allocated by the cache +func (fsc *FSCache) DiskUsage() (int64, error) { + return fsc.store.DiskUsage() +} + +// Prune allows manually cleaning up the cache +func (fsc *FSCache) Prune() (uint64, error) { + return fsc.store.Prune() +} + +// Close stops the gc and closes the persistent db +func (fsc *FSCache) Close() error { + return fsc.store.Close() +} + +func syncFrom(ctx context.Context, cs *cachedSourceRef, transport Transport, id RemoteIdentifier) (retErr error) { + src := cs.src + if src == nil { + src = remotecontext.NewCachableSource(cs.Dir()) + } + + if !cs.cached { + if err := cs.storage.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(id.Key())) + dt := b.Get([]byte(cacheKey)) + if dt != nil { + if err := src.UnmarshalBinary(dt); err != nil { + return err + } + } else { + return errors.Wrap(src.Scan(), "failed to scan cache records") + } + return nil + }); err != nil { + return err + } + } + + dc := &detectChanges{f: src.HandleChange} + + // todo: probably send a bucket to `Copy` and let it return source + // but need to make sure that tx is safe + if err := transport.Copy(ctx, id, cs.Dir(), dc); err != nil { + return errors.Wrapf(err, "failed to copy to %s", cs.Dir()) + } + + if !dc.supported { + if err := src.Scan(); err != nil { + return errors.Wrap(err, "failed to scan cache records after transfer") + } + } + cs.cached = true + cs.src = src + return cs.storage.db.Update(func(tx *bolt.Tx) error { + dt, err := src.MarshalBinary() + if err != nil { + return err + } + b := tx.Bucket([]byte(id.Key())) + return b.Put([]byte(cacheKey), dt) + }) +} + +type fsCacheStore struct { + root string + mu sync.Mutex + sources map[string]*cachedSource + db *bolt.DB + fs Backend + gcTimer *time.Timer + gcPolicy GCPolicy +} + +// CachePolicy defines policy for keeping a resource in cache +type CachePolicy struct { + Priority int + LastUsed time.Time +} + +func defaultCachePolicy() CachePolicy { + return CachePolicy{Priority: 10, LastUsed: time.Now()} +} + +func newFSCacheStore(opt Opt) (*fsCacheStore, error) { + if err := os.MkdirAll(opt.Root, 0700); err != nil { + return nil, err + } + p := filepath.Join(opt.Root, dbFile) + db, err := bolt.Open(p, 0600, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to open database file %s") + } + s := &fsCacheStore{db: db, sources: make(map[string]*cachedSource), fs: opt.Backend, gcPolicy: opt.GCPolicy} + db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + dt := b.Get([]byte(metaKey)) + if dt == nil { + return nil + } + var sm sourceMeta + if err := json.Unmarshal(dt, &sm); err != nil { + return err + } + dir, err := s.fs.Get(sm.BackendID) + if err != nil { + return err // TODO: handle gracefully + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: string(name), + dir: dir, + sourceMeta: sm, + storage: s, + } + s.sources[string(name)] = source + return nil + }) + }) + + s.gcTimer = s.startPeriodicGC(5 * time.Minute) + return s, nil +} + +func (s *fsCacheStore) startPeriodicGC(interval time.Duration) *time.Timer { + var t *time.Timer + t = time.AfterFunc(interval, func() { + if err := s.GC(); err != nil { + logrus.Errorf("build gc error: %v", err) + } + t.Reset(interval) + }) + return t +} + +func (s *fsCacheStore) Close() error { + s.gcTimer.Stop() + return s.db.Close() +} + +func (s *fsCacheStore) New(id, sharedKey string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + if err := s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte(id)) + if err != nil { + return err + } + backendID := stringid.GenerateRandomID() + dir, err := s.fs.Get(backendID) + if err != nil { + return err + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: id, + dir: dir, + sourceMeta: sourceMeta{ + BackendID: backendID, + SharedKey: sharedKey, + CachePolicy: defaultCachePolicy(), + }, + storage: s, + } + dt, err := json.Marshal(source.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + s.sources[id] = source + ret = source + return nil + }); err != nil { + return nil, err + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Rebase(sharedKey, newid string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + for id, snap := range s.sources { + if snap.SharedKey == sharedKey && len(snap.refs) == 0 { + if err := s.db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte(id)); err != nil { + return err + } + b, err := tx.CreateBucket([]byte(newid)) + if err != nil { + return err + } + snap.id = newid + snap.CachePolicy = defaultCachePolicy() + dt, err := json.Marshal(snap.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + delete(s.sources, id) + s.sources[newid] = snap + return nil + }); err != nil { + return nil, err + } + ret = snap + break + } + } + if ret == nil { + return nil, errors.Errorf("no candidate for rebase") + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Get(id string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + src, ok := s.sources[id] + if !ok { + return nil, errors.Errorf("not found") + } + return src.getRef(), nil +} + +// DiskUsage reports how much data is allocated by the cache +func (s *fsCacheStore) DiskUsage() (int64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size int64 + + for _, snap := range s.sources { + if len(snap.refs) == 0 { + ss, err := snap.getSize() + if err != nil { + return 0, err + } + size += ss + } + } + return size, nil +} + +// Prune allows manually cleaning up the cache +func (s *fsCacheStore) Prune() (uint64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + for id, snap := range s.sources { + if len(snap.refs) == 0 { + ss, err := snap.getSize() + if err != nil { + return size, err + } + if err := s.delete(id); err != nil { + return size, errors.Wrapf(err, "failed to delete %s", id) + } + size += uint64(ss) + } + } + return size, nil +} + +// GC runs a garbage collector on FSCache +func (s *fsCacheStore) GC() error { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + cutoff := time.Now().Add(-s.gcPolicy.MaxKeepDuration) + var blacklist []*cachedSource + + for id, snap := range s.sources { + if len(snap.refs) == 0 { + if cutoff.After(snap.CachePolicy.LastUsed) { + if err := s.delete(id); err != nil { + return errors.Wrapf(err, "failed to delete %s", id) + } + } else { + ss, err := snap.getSize() + if err != nil { + return err + } + size += uint64(ss) + blacklist = append(blacklist, snap) + } + } + } + + sort.Sort(sortableCacheSources(blacklist)) + for _, snap := range blacklist { + if size <= s.gcPolicy.MaxSize { + break + } + ss, err := snap.getSize() + if err != nil { + return err + } + if err := s.delete(snap.id); err != nil { + return errors.Wrapf(err, "failed to delete %s", snap.id) + } + size -= uint64(ss) + } + return nil +} + +// keep mu while calling this +func (s *fsCacheStore) delete(id string) error { + src, ok := s.sources[id] + if !ok { + return nil + } + if len(src.refs) > 0 { + return errors.Errorf("can't delete %s because it has active references", id) + } + delete(s.sources, id) + if err := s.db.Update(func(tx *bolt.Tx) error { + return tx.DeleteBucket([]byte(id)) + }); err != nil { + return err + } + if err := s.fs.Remove(src.BackendID); err != nil { + return err + } + return nil +} + +type sourceMeta struct { + SharedKey string + BackendID string + CachePolicy CachePolicy + Size int64 +} + +type cachedSource struct { + sourceMeta + refs map[*cachedSourceRef]struct{} + id string + dir string + src *remotecontext.CachableSource + storage *fsCacheStore + cached bool // keep track if cache is up to date +} + +type cachedSourceRef struct { + *cachedSource +} + +func (cs *cachedSource) Dir() string { + return cs.dir +} + +// hold storage lock before calling +func (cs *cachedSource) getRef() *cachedSourceRef { + ref := &cachedSourceRef{cachedSource: cs} + cs.refs[ref] = struct{}{} + return ref +} + +// hold storage lock before calling +func (cs *cachedSource) getSize() (int64, error) { + if cs.sourceMeta.Size < 0 { + ss, err := directory.Size(cs.dir) + if err != nil { + return 0, err + } + if err := cs.resetSize(ss); err != nil { + return 0, err + } + return ss, nil + } + return cs.sourceMeta.Size, nil +} + +func (cs *cachedSource) resetSize(val int64) error { + cs.sourceMeta.Size = val + return cs.saveMeta() +} +func (cs *cachedSource) saveMeta() error { + return cs.storage.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(cs.id)) + dt, err := json.Marshal(cs.sourceMeta) + if err != nil { + return err + } + return b.Put([]byte(metaKey), dt) + }) +} + +func (csr *cachedSourceRef) Release() error { + csr.cachedSource.storage.mu.Lock() + defer csr.cachedSource.storage.mu.Unlock() + delete(csr.cachedSource.refs, csr) + if len(csr.cachedSource.refs) == 0 { + go csr.cachedSource.storage.GC() + } + return nil +} + +type detectChanges struct { + f fsutil.ChangeFunc + supported bool +} + +func (dc *detectChanges) HandleChange(kind fsutil.ChangeKind, path string, fi os.FileInfo, err error) error { + if dc == nil { + return nil + } + return dc.f(kind, path, fi, err) +} + +func (dc *detectChanges) MarkSupported(v bool) { + if dc == nil { + return + } + dc.supported = v +} + +type wrappedContext struct { + builder.Source + closer func() error +} + +func (wc *wrappedContext) Close() error { + if err := wc.Source.Close(); err != nil { + return err + } + return wc.closer() +} + +type sortableCacheSources []*cachedSource + +// Len is the number of elements in the collection. +func (s sortableCacheSources) Len() int { + return len(s) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (s sortableCacheSources) Less(i, j int) bool { + return s[i].CachePolicy.LastUsed.Before(s[j].CachePolicy.LastUsed) +} + +// Swap swaps the elements with indexes i and j. +func (s sortableCacheSources) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/fn/vendor/github.com/docker/docker/builder/fscache/fscache_test.go b/fn/vendor/github.com/docker/docker/builder/fscache/fscache_test.go new file mode 100644 index 000000000..c7c0531f2 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/fscache/fscache_test.go @@ -0,0 +1,131 @@ +package fscache + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/client/session/filesync" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestFSCache(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "fscache") + assert.Nil(t, err) + defer os.RemoveAll(tmpDir) + + backend := NewNaiveCacheBackend(filepath.Join(tmpDir, "backend")) + + opt := Opt{ + Root: tmpDir, + Backend: backend, + GCPolicy: GCPolicy{MaxSize: 15, MaxKeepDuration: time.Hour}, + } + + fscache, err := NewFSCache(opt) + assert.Nil(t, err) + + defer fscache.Close() + + err = fscache.RegisterTransport("test", &testTransport{}) + assert.Nil(t, err) + + src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"}) + assert.Nil(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(src1.Root(), "foo")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data") + + // same id doesn't recalculate anything + src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"}) + assert.Nil(t, err) + assert.Equal(t, src1.Root(), src2.Root()) + + dt, err = ioutil.ReadFile(filepath.Join(src1.Root(), "foo")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data") + assert.Nil(t, src2.Close()) + + src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"}) + assert.Nil(t, err) + assert.NotEqual(t, src1.Root(), src3.Root()) + + dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo2")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data2") + + s, err := fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(0)) + + assert.Nil(t, src3.Close()) + + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(5)) + + // new upload with the same shared key shoutl overwrite + src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"}) + assert.Nil(t, err) + assert.NotEqual(t, src1.Root(), src3.Root()) + + dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo3")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data3") + assert.Equal(t, src4.Root(), src3.Root()) + assert.Nil(t, src4.Close()) + + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(10)) + + // this one goes over the GC limit + src5, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo4", "datadata", "baz"}) + assert.Nil(t, err) + assert.Nil(t, src5.Close()) + + // GC happens async + time.Sleep(100 * time.Millisecond) + + // only last insertion after GC + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(8)) + + // prune deletes everything + released, err := fscache.Prune() + assert.Nil(t, err) + assert.Equal(t, released, uint64(8)) + + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(0)) +} + +type testTransport struct { +} + +func (t *testTransport) Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error { + testid := id.(*testIdentifier) + return ioutil.WriteFile(filepath.Join(dest, testid.filename), []byte(testid.data), 0600) +} + +type testIdentifier struct { + filename string + data string + sharedKey string +} + +func (t *testIdentifier) Key() string { + return t.filename +} +func (t *testIdentifier) SharedKey() string { + return t.sharedKey +} +func (t *testIdentifier) Transport() string { + return "test" +} diff --git a/fn/vendor/github.com/docker/docker/builder/fscache/naivedriver.go b/fn/vendor/github.com/docker/docker/builder/fscache/naivedriver.go new file mode 100644 index 000000000..f40ee570f --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/fscache/naivedriver.go @@ -0,0 +1,28 @@ +package fscache + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// NewNaiveCacheBackend is a basic backend implementation for fscache +func NewNaiveCacheBackend(root string) Backend { + return &naiveCacheBackend{root: root} +} + +type naiveCacheBackend struct { + root string +} + +func (tcb *naiveCacheBackend) Get(id string) (string, error) { + d := filepath.Join(tcb.root, id) + if err := os.MkdirAll(d, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create tmp dir for %s", d) + } + return d, nil +} +func (tcb *naiveCacheBackend) Remove(id string) error { + return errors.WithStack(os.RemoveAll(filepath.Join(tcb.root, id))) +} diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/archive.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/archive.go new file mode 100644 index 000000000..f48cafecd --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/archive.go @@ -0,0 +1,128 @@ +package remotecontext + +import ( + "io" + "os" + "path/filepath" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/tarsum" + "github.com/pkg/errors" +) + +type archiveContext struct { + root string + sums tarsum.FileInfoSums +} + +func (c *archiveContext) Close() error { + return os.RemoveAll(c.root) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +type modifiableContext interface { + builder.Source + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// FromArchive returns a build source from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func FromArchive(tarStream io.Reader) (builder.Source, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + tsc := &archiveContext{root: root} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + err = chrootarchive.Untar(sum, root, nil) + if err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + + return tsc, nil +} + +func (c *archiveContext) Root() string { + return c.root +} + +func (c *archiveContext) Remove(path string) error { + _, fullpath, err := normalize(path, c.root) + if err != nil { + return err + } + return os.RemoveAll(fullpath) +} + +func (c *archiveContext) Hash(path string) (string, error) { + cleanpath, fullpath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return "", convertPathError(err, cleanpath) + } + + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + return tsInfo.Sum(), nil + } + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + return path, nil // backwards compat TODO: see if really needed +} + +func normalize(path, root string) (cleanPath, fullPath string, err error) { + cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root) + if err != nil { + return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) + } + if _, err := os.Lstat(fullPath); err != nil { + return "", "", errors.WithStack(convertPathError(err, path)) + } + return +} diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/detect.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/detect.go new file mode 100644 index 000000000..434573680 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/detect.go @@ -0,0 +1,184 @@ +package remotecontext + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +// ClientSessionRemote is identifier for client-session context transport +const ClientSessionRemote = "client-session" + +// Detect returns a context and dockerfile from remote location or local +// archive. progressReader is only used if remoteURL is actually a URL +// (not empty, and not a Git endpoint). +func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) { + remoteURL := config.Options.RemoteContext + dockerfilePath := config.Options.Dockerfile + + switch { + case remoteURL == "": + remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath) + case remoteURL == ClientSessionRemote: + res, err := parser.Parse(config.Source) + if err != nil { + return nil, nil, err + } + return nil, res, nil + case urlutil.IsGitURL(remoteURL): + remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath) + case urlutil.IsURL(remoteURL): + remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) { + defer rc.Close() + c, err := FromArchive(rc) + if err != nil { + return nil, nil, err + } + + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) { + df, err := openAt(c, dockerfilePath) + if err != nil { + if os.IsNotExist(err) { + if dockerfilePath == builder.DefaultDockerfileName { + lowercase := strings.ToLower(dockerfilePath) + if _, err := StatAt(c, lowercase); err == nil { + return withDockerfileFromContext(c, lowercase) + } + } + return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error + } + c.Close() + return nil, nil, err + } + + res, err := readAndParseDockerfile(dockerfilePath, df) + if err != nil { + return nil, nil, err + } + + df.Close() + + if err := removeDockerfile(c, dockerfilePath); err != nil { + c.Close() + return nil, nil, err + } + + return c, res, nil +} + +func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) { + c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource + if err != nil { + return nil, nil, err + } + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) { + var dockerfile io.ReadCloser + dockerfileFoundErr := errors.New("found-dockerfile") + c, err := MakeRemoteContext(url, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + mimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile = rc + return nil, dockerfileFoundErr + }, + // fallback handler (tar context) + "": func(rc io.ReadCloser) (io.ReadCloser, error) { + return progressReader(rc), nil + }, + }) + switch { + case err == dockerfileFoundErr: + res, err := parser.Parse(dockerfile) + return nil, res, err + case err != nil: + return nil, nil, err + } + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func removeDockerfile(c modifiableContext, filesToRemove ...string) error { + f, err := openAt(c, ".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + switch { + case os.IsNotExist(err): + return nil + case err != nil: + return err + } + excludes, err := dockerignore.ReadAll(f) + if err != nil { + f.Close() + return err + } + f.Close() + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + if rm, _ := fileutils.Matches(fileToRemove, excludes); rm { + if err := c.Remove(fileToRemove); err != nil { + logrus.Errorf("failed to remove %s: %v", fileToRemove, err) + } + } + } + return nil +} + +func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) { + br := bufio.NewReader(rc) + if _, err := br.Peek(1); err != nil { + if err == io.EOF { + return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name) + } + return nil, errors.Wrap(err, "unexpected error reading Dockerfile") + } + return parser.Parse(br) +} + +func openAt(remote builder.Source, path string) (*os.File, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return os.Open(fullPath) +} + +// StatAt is a helper for calling Stat on a path from a source +func StatAt(remote builder.Source, path string) (os.FileInfo, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return os.Stat(fullPath) +} + +// FullPath is a helper for getting a full path for a path from a source +func FullPath(remote builder.Source, path string) (string, error) { + fullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root()) + if err != nil { + return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error + } + return fullPath, nil +} diff --git a/fn/vendor/github.com/docker/docker/builder/dockerignore_test.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/detect_test.go similarity index 62% rename from fn/vendor/github.com/docker/docker/builder/dockerignore_test.go rename to fn/vendor/github.com/docker/docker/builder/remotecontext/detect_test.go index 3c0ceda4c..6b47ac227 100644 --- a/fn/vendor/github.com/docker/docker/builder/dockerignore_test.go +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/detect_test.go @@ -1,11 +1,21 @@ -package builder +package remotecontext import ( + "errors" "io/ioutil" "log" "os" + "path/filepath" "sort" "testing" + + "github.com/docker/docker/builder" +) + +const ( + dockerfileContents = "FROM busybox" + dockerignoreFilename = ".dockerignore" + testfileContents = "test" ) const shouldStayFilename = "should_stay" @@ -43,10 +53,9 @@ func checkDirectory(t *testing.T, dir string, expectedFiles []string) { } func executeProcess(t *testing.T, contextDir string) { - modifiableCtx := &tarSumContext{root: contextDir} - ctx := DockerIgnoreContext{ModifiableContext: modifiableCtx} + modifiableCtx := &stubRemote{root: contextDir} - err := ctx.Process([]string{DefaultDockerfileName}) + err := removeDockerfile(modifiableCtx, builder.DefaultDockerfileName) if err != nil { t.Fatalf("Error when executing Process: %s", err) @@ -59,7 +68,7 @@ func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) { createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) createTestTempFile(t, contextDir, dockerignoreFilename, "Dockerfile\n.dockerignore", 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) executeProcess(t, contextDir) @@ -72,11 +81,11 @@ func TestProcessNoDockerignore(t *testing.T) { defer cleanup() createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) executeProcess(t, contextDir) - checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName}) + checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName}) } @@ -85,11 +94,30 @@ func TestProcessShouldLeaveAllFiles(t *testing.T) { defer cleanup() createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) createTestTempFile(t, contextDir, dockerignoreFilename, "input1\ninput2", 0777) executeProcess(t, contextDir) - checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName, dockerignoreFilename}) + checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName, dockerignoreFilename}) } + +// TODO: remove after moving to a separate pkg +type stubRemote struct { + root string +} + +func (r *stubRemote) Hash(path string) (string, error) { + return "", errors.New("not implemented") +} + +func (r *stubRemote) Root() string { + return r.root +} +func (r *stubRemote) Close() error { + return errors.New("not implemented") +} +func (r *stubRemote) Remove(p string) error { + return os.Remove(filepath.Join(r.root, p)) +} diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/filehash.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/filehash.go index a9b324272..417230297 100644 --- a/fn/vendor/github.com/docker/docker/builder/remotecontext/filehash.go +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/filehash.go @@ -12,10 +12,21 @@ import ( // NewFileHash returns new hash that is used for the builder cache keys func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) { - hdr, err := archive.FileInfoHeader(path, name, fi) + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return nil, err + } + } + hdr, err := archive.FileInfoHeader(name, fi, link) if err != nil { return nil, err } + if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return nil, err + } tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} tsh.Reset() // initialize header return tsh, nil diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/generate.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/generate.go new file mode 100644 index 000000000..0b52d4992 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/generate.go @@ -0,0 +1,3 @@ +package remotecontext + +//go:generate protoc --gogoslick_out=. tarsum.proto diff --git a/fn/vendor/github.com/docker/docker/builder/git.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/git.go similarity index 61% rename from fn/vendor/github.com/docker/docker/builder/git.go rename to fn/vendor/github.com/docker/docker/builder/remotecontext/git.go index 74df24461..158bb5ad4 100644 --- a/fn/vendor/github.com/docker/docker/builder/git.go +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/git.go @@ -1,15 +1,16 @@ -package builder +package remotecontext import ( "os" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext/git" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/gitutils" ) // MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. -func MakeGitContext(gitURL string) (ModifiableContext, error) { - root, err := gitutils.Clone(gitURL) +func MakeGitContext(gitURL string) (builder.Source, error) { + root, err := git.Clone(gitURL) if err != nil { return nil, err } @@ -24,5 +25,5 @@ func MakeGitContext(gitURL string) (ModifiableContext, error) { c.Close() os.RemoveAll(root) }() - return MakeTarSumContext(c) + return FromArchive(c) } diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go new file mode 100644 index 000000000..b94d462cd --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go @@ -0,0 +1,159 @@ +package git + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +type gitRepo struct { + remote string + ref string + subdir string +} + +// Clone clones a repository into a newly created directory which +// will be under "docker-build-git" +func Clone(remoteURL string) (string, error) { + repo, err := parseRemoteURL(remoteURL) + + if err != nil { + return "", err + } + + fetch := fetchArgs(repo.remote, repo.ref) + + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + if out, err := gitWithinDir(root, "init"); err != nil { + return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out) + } + + // Add origin remote for compatibility with previous implementation that + // used "git clone" and also to make sure local refs are created for branches + if out, err := gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil { + return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out) + } + + if output, err := gitWithinDir(root, fetch...); err != nil { + return "", errors.Wrapf(err, "error fetching: %s", output) + } + + return checkoutGit(root, repo.ref, repo.subdir) +} + +func parseRemoteURL(remoteURL string) (gitRepo, error) { + repo := gitRepo{} + + if !isGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + + var fragment string + if strings.HasPrefix(remoteURL, "git@") { + // git@.. is not an URL, so cannot be parsed as URL + parts := strings.SplitN(remoteURL, "#", 2) + + repo.remote = parts[0] + if len(parts) == 2 { + fragment = parts[1] + } + repo.ref, repo.subdir = getRefAndSubdir(fragment) + } else { + u, err := url.Parse(remoteURL) + if err != nil { + return repo, err + } + + repo.ref, repo.subdir = getRefAndSubdir(u.Fragment) + u.Fragment = "" + repo.remote = u.String() + } + return repo, nil +} + +func getRefAndSubdir(fragment string) (ref string, subdir string) { + refAndDir := strings.SplitN(fragment, ":", 2) + ref = "master" + if len(refAndDir[0]) != 0 { + ref = refAndDir[0] + } + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + subdir = refAndDir[1] + } + return +} + +func fetchArgs(remoteURL string, ref string) []string { + args := []string{"fetch", "--recurse-submodules=yes"} + shallow := true + + if urlutil.IsURL(remoteURL) { + res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) + if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + shallow = false + } + } + + if shallow { + args = append(args, "--depth", "1") + } + + return append(args, "origin", ref) +} + +func checkoutGit(root, ref, subdir string) (string, error) { + // Try checking out by ref name first. This will work on branches and sets + // .git/HEAD to the current branch name + if output, err := gitWithinDir(root, "checkout", ref); err != nil { + // If checking out by branch name fails check out the last fetched ref + if _, err2 := gitWithinDir(root, "checkout", "FETCH_HEAD"); err2 != nil { + return "", errors.Wrapf(err, "error checking out %s: %s", ref, output) + } + } + + if subdir != "" { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, subdir), root) + if err != nil { + return "", errors.Wrapf(err, "error setting git context, %q not within git root", subdir) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", errors.Errorf("error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} + +// isGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func isGitTransport(str string) bool { + return urlutil.IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go new file mode 100644 index 000000000..c638a498f --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go @@ -0,0 +1,238 @@ +package git + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseRemoteURL(t *testing.T) { + dir, err := parseRemoteURL("git://github.com/user/repo.git") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git://github.com/user/repo.git", "master", ""}, dir) + + dir, err = parseRemoteURL("git://github.com/user/repo.git#mybranch:mydir/mysubdir/") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) + + dir, err = parseRemoteURL("https://github.com/user/repo.git") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"https://github.com/user/repo.git", "master", ""}, dir) + + dir, err = parseRemoteURL("https://github.com/user/repo.git#mybranch:mydir/mysubdir/") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"https://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) + + dir, err = parseRemoteURL("git@github.com:user/repo.git") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git@github.com:user/repo.git", "master", ""}, dir) + + dir, err = parseRemoteURL("git@github.com:user/repo.git#mybranch:mydir/mysubdir/") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git@github.com:user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) +} + +func TestCloneArgsSmartHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query().Get("service") + w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) + }) + + args := fetchArgs(serverURL.String(), "master") + exp := []string{"fetch", "--recurse-submodules=yes", "--depth", "1", "origin", "master"} + assert.Equal(t, exp, args) +} + +func TestCloneArgsDumbHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + }) + + args := fetchArgs(serverURL.String(), "master") + exp := []string{"fetch", "--recurse-submodules=yes", "origin", "master"} + assert.Equal(t, exp, args) +} + +func TestCloneArgsGit(t *testing.T) { + args := fetchArgs("git://github.com/docker/docker", "master") + exp := []string{"fetch", "--recurse-submodules=yes", "--depth", "1", "origin", "master"} + assert.Equal(t, exp, args) +} + +func gitGetConfig(name string) string { + b, err := git([]string{"config", "--get", name}...) + if err != nil { + // since we are interested in empty or non empty string, + // we can safely ignore the err here. + return "" + } + return strings.TrimSpace(string(b)) +} + +func TestCheckoutGit(t *testing.T) { + root, err := ioutil.TempDir("", "docker-build-git-checkout") + require.NoError(t, err) + defer os.RemoveAll(root) + + autocrlf := gitGetConfig("core.autocrlf") + if !(autocrlf == "true" || autocrlf == "false" || + autocrlf == "input" || autocrlf == "") { + t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) + } + eol := "\n" + if autocrlf == "true" { + eol = "\r\n" + } + + gitDir := filepath.Join(root, "repo") + _, err = git("init", gitDir) + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test") + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644) + require.NoError(t, err) + + subDir := filepath.Join(gitDir, "subdir") + require.NoError(t, os.Mkdir(subDir, 0755)) + + err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644) + require.NoError(t, err) + + if runtime.GOOS != "windows" { + if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { + t.Fatal(err) + } + } + + _, err = gitWithinDir(gitDir, "add", "-A") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "commit", "-am", "First commit") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "checkout", "-b", "test") + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644) + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644) + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "add", "-A") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "checkout", "master") + require.NoError(t, err) + + type singleCase struct { + frag string + exp string + fail bool + } + + cases := []singleCase{ + {"", "FROM scratch", false}, + {"master", "FROM scratch", false}, + {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {":nosubdir", "", true}, // missing directory error + {":Dockerfile", "", true}, // not a directory error + {"master:nosubdir", "", true}, + {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {"master:../subdir", "", true}, + {"test", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false}, + } + + if runtime.GOOS != "windows" { + // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below + // git --work-tree .\repo --git-dir .\repo\.git add -A + // error: readlink("absolutelink"): Function not implemented + // error: unable to index file absolutelink + // fatal: adding files failed + cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + } + + for _, c := range cases { + ref, subdir := getRefAndSubdir(c.frag) + r, err := checkoutGit(gitDir, ref, subdir) + + if c.fail { + assert.Error(t, err) + continue + } + + b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) + require.NoError(t, err) + assert.Equal(t, c.exp, string(b)) + } +} + +func TestValidGitTransport(t *testing.T) { + gitUrls := []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls := []string{ + "github.com/docker/docker", + } + + for _, url := range gitUrls { + if !isGitTransport(url) { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if isGitTransport(url) { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go index 1f89c8d88..b29c413fa 100644 --- a/fn/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go @@ -2,7 +2,6 @@ package remotecontext import ( "encoding/hex" - "io" "os" "path/filepath" "runtime" @@ -10,108 +9,60 @@ import ( "github.com/docker/docker/builder" "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/symlink" "github.com/pkg/errors" ) -// NewLazyContext creates a new LazyContext. LazyContext defines a hashed build +// NewLazySource creates a new LazyContext. LazyContext defines a hashed build // context based on a root directory. Individual files are hashed first time // they are asked. It is not safe to call methods of LazyContext concurrently. -func NewLazyContext(root string) (builder.Context, error) { - return &lazyContext{ +func NewLazySource(root string) (builder.Source, error) { + return &lazySource{ root: root, sums: make(map[string]string), }, nil } -type lazyContext struct { +type lazySource struct { root string sums map[string]string } -func (c *lazyContext) Close() error { +func (c *lazySource) Root() string { + return c.root +} + +func (c *lazySource) Close() error { return nil } -func (c *lazyContext) Open(path string) (io.ReadCloser, error) { - cleanPath, fullPath, err := c.normalize(path) +func (c *lazySource) Hash(path string) (string, error) { + cleanPath, fullPath, err := normalize(path, c.root) if err != nil { - return nil, err + return "", err } - r, err := os.Open(fullPath) + fi, err := os.Lstat(fullPath) if err != nil { - return nil, errors.WithStack(convertPathError(err, cleanPath)) - } - return r, nil -} - -func (c *lazyContext) Stat(path string) (string, builder.FileInfo, error) { - // TODO: although stat returns builder.FileInfo it builder.Context actually requires Hashed - cleanPath, fullPath, err := c.normalize(path) - if err != nil { - return "", nil, err + return "", errors.WithStack(err) } - st, err := os.Lstat(fullPath) + relPath, err := Rel(c.root, fullPath) if err != nil { - return "", nil, errors.WithStack(convertPathError(err, cleanPath)) - } - - relPath, err := rel(c.root, fullPath) - if err != nil { - return "", nil, errors.WithStack(convertPathError(err, cleanPath)) + return "", errors.WithStack(convertPathError(err, cleanPath)) } sum, ok := c.sums[relPath] if !ok { - sum, err = c.prepareHash(relPath, st) + sum, err = c.prepareHash(relPath, fi) if err != nil { - return "", nil, err + return "", err } } - fi := &builder.HashedFileInfo{ - builder.PathFileInfo{st, fullPath, filepath.Base(cleanPath)}, - sum, - } - return relPath, fi, nil + return sum, nil } -func (c *lazyContext) Walk(root string, walkFn builder.WalkFunc) error { - _, fullPath, err := c.normalize(root) - if err != nil { - return err - } - return filepath.Walk(fullPath, func(fullPath string, fi os.FileInfo, err error) error { - relPath, err := rel(c.root, fullPath) - if err != nil { - return errors.WithStack(err) - } - if relPath == "." { - return nil - } - - sum, ok := c.sums[relPath] - if !ok { - sum, err = c.prepareHash(relPath, fi) - if err != nil { - return err - } - } - - hfi := &builder.HashedFileInfo{ - builder.PathFileInfo{FileInfo: fi, FilePath: fullPath}, - sum, - } - if err := walkFn(relPath, hfi, nil); err != nil { - return err - } - return nil - }) -} - -func (c *lazyContext) prepareHash(relPath string, fi os.FileInfo) (string, error) { +func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) { p := filepath.Join(c.root, relPath) h, err := NewFileHash(p, relPath, fi) if err != nil { @@ -132,25 +83,9 @@ func (c *lazyContext) prepareHash(relPath string, fi os.FileInfo) (string, error return sum, nil } -func (c *lazyContext) normalize(path string) (cleanPath, fullPath string, err error) { - // todo: combine these helpers with tarsum after they are moved to same package - cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:] - fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) - if err != nil { - return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, fullPath) - } - return -} - -func convertPathError(err error, cleanpath string) error { - if err, ok := err.(*os.PathError); ok { - err.Path = cleanpath - return err - } - return err -} - -func rel(basepath, targpath string) (string, error) { +// Rel makes a path relative to base path. Same as `filepath.Rel` but can also +// handle UUID paths in windows. +func Rel(basepath, targpath string) (string, error) { // filepath.Rel can't handle UUID paths in windows if runtime.GOOS == "windows" { pfx := basepath + `\` diff --git a/fn/vendor/github.com/docker/docker/pkg/httputils/mimetype.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go similarity index 73% rename from fn/vendor/github.com/docker/docker/pkg/httputils/mimetype.go rename to fn/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go index abef9e9e8..083d60997 100644 --- a/fn/vendor/github.com/docker/docker/pkg/httputils/mimetype.go +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go @@ -1,29 +1,27 @@ -package httputils +package remotecontext import ( "mime" "net/http" ) -// MimeTypes stores the MIME content type. -var MimeTypes = struct { +// mimeTypes stores the MIME content type. +var mimeTypes = struct { TextPlain string OctetStream string }{"text/plain", "application/octet-stream"} -// DetectContentType returns a best guess representation of the MIME +// detectContentType returns a best guess representation of the MIME // content type for the bytes at c. The value detected by // http.DetectContentType is guaranteed not be nil, defaulting to // application/octet-stream when a better guess cannot be made. The // result of this detection is then run through mime.ParseMediaType() // which separates the actual MIME string from any parameters. -func DetectContentType(c []byte) (string, map[string]string, error) { - +func detectContentType(c []byte) (string, map[string]string, error) { ct := http.DetectContentType(c) contentType, args, err := mime.ParseMediaType(ct) if err != nil { return "", nil, err } - return contentType, args, nil } diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go new file mode 100644 index 000000000..8c00ec286 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go @@ -0,0 +1,16 @@ +package remotecontext + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDetectContentType(t *testing.T) { + input := []byte("That is just a plain text") + + contentType, _, err := detectContentType(input) + require.NoError(t, err) + assert.Equal(t, "text/plain", contentType) +} diff --git a/fn/vendor/github.com/docker/docker/builder/remote.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/remote.go similarity index 63% rename from fn/vendor/github.com/docker/docker/builder/remote.go rename to fn/vendor/github.com/docker/docker/builder/remotecontext/remote.go index b79030161..4afd516be 100644 --- a/fn/vendor/github.com/docker/docker/builder/remote.go +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/remote.go @@ -1,16 +1,15 @@ -package builder +package remotecontext import ( "bytes" - "errors" "fmt" "io" "io/ioutil" + "net/http" "regexp" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/builder" + "github.com/pkg/errors" ) // When downloading remote contexts, limit the amount (in bytes) @@ -29,9 +28,9 @@ var mimeRe = regexp.MustCompile(acceptableRemoteMIME) // // If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected // to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). -// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. -func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { - f, err := httputils.Download(remoteURL) +// In either case, an (assumed) tar stream is passed to FromArchive whose result is returned. +func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (builder.Source, error) { + f, err := GetWithStatusError(remoteURL) if err != nil { return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) } @@ -64,47 +63,25 @@ func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io. // Pass through - this is a pre-packaged context, presumably // with a Dockerfile with the right name inside it. - return MakeTarSumContext(contextReader) + return FromArchive(contextReader) } -// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used -// irrespective of user input. -// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). -func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) { - switch { - case remoteURL == "": - context, err = MakeTarSumContext(r) - case urlutil.IsGitURL(remoteURL): - context, err = MakeGitContext(remoteURL) - case urlutil.IsURL(remoteURL): - context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ - httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { - dockerfile, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller - // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. - dockerfileName = DefaultDockerfileName - - // TODO: return a context without tarsum - r, err := archive.Generate(dockerfileName, string(dockerfile)) - if err != nil { - return nil, err - } - - return ioutil.NopCloser(r), nil - }, - // fallback handler (tar context) - "": func(rc io.ReadCloser) (io.ReadCloser, error) { - return createProgressReader(rc), nil - }, - }) - default: - err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) +// GetWithStatusError does an http.Get() and returns an error if the +// status code is 4xx or 5xx. +func GetWithStatusError(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err } - return + if resp.StatusCode < 400 { + return resp, nil + } + msg := fmt.Sprintf("failed to GET %s with status %s", url, resp.Status) + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, errors.Wrapf(err, msg+": error reading body") + } + return nil, errors.Errorf(msg+": %s", bytes.TrimSpace(body)) } // inspectResponse looks into the http response data at r to determine whether its @@ -135,8 +112,8 @@ func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadClo // content type for files without an extension (e.g. 'Dockerfile') // so if we receive this value we better check for text content contentType := ct - if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { - contentType, _, err = httputils.DetectContentType(preamble) + if len(ct) == 0 || ct == mimeTypes.OctetStream { + contentType, _, err = detectContentType(preamble) if err != nil { return contentType, bodyReader, err } diff --git a/fn/vendor/github.com/docker/docker/builder/remote_test.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/remote_test.go similarity index 75% rename from fn/vendor/github.com/docker/docker/builder/remote_test.go rename to fn/vendor/github.com/docker/docker/builder/remotecontext/remote_test.go index b44b59fbe..c698726e8 100644 --- a/fn/vendor/github.com/docker/docker/builder/remote_test.go +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/remote_test.go @@ -1,4 +1,4 @@ -package builder +package remotecontext import ( "bytes" @@ -9,8 +9,11 @@ import ( "net/url" "testing" + "github.com/docker/docker/builder" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic @@ -175,25 +178,25 @@ func TestMakeRemoteContext(t *testing.T) { contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") defer cleanup() - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) mux := http.NewServeMux() server := httptest.NewServer(mux) serverURL, _ := url.Parse(server.URL) - serverURL.Path = "/" + DefaultDockerfileName + serverURL.Path = "/" + builder.DefaultDockerfileName remoteURL := serverURL.String() mux.Handle("/", http.FileServer(http.Dir(contextDir))) remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ - httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + mimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { dockerfile, err := ioutil.ReadAll(rc) if err != nil { return nil, err } - r, err := archive.Generate(DefaultDockerfileName, string(dockerfile)) + r, err := archive.Generate(builder.DefaultDockerfileName, string(dockerfile)) if err != nil { return nil, err } @@ -209,25 +212,52 @@ func TestMakeRemoteContext(t *testing.T) { t.Fatal("Remote context should not be nil") } - tarSumCtx, ok := remoteContext.(*tarSumContext) - - if !ok { - t.Fatal("Cast error, remote context should be casted to tarSumContext") + h, err := remoteContext.Hash(builder.DefaultDockerfileName) + if err != nil { + t.Fatalf("failed to compute hash %s", err) } - fileInfoSums := tarSumCtx.sums - - if fileInfoSums.Len() != 1 { - t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len()) - } - - fileInfo := fileInfoSums.GetFile(DefaultDockerfileName) - - if fileInfo == nil { - t.Fatalf("There should be file named %s in fileInfoSums", DefaultDockerfileName) - } - - if fileInfo.Pos() != 0 { - t.Fatalf("File %s should have position 0, got %d", DefaultDockerfileName, fileInfo.Pos()) + if expected, actual := "7b6b6b66bee9e2102fbdc2228be6c980a2a23adf371962a37286a49f7de0f7cc", h; expected != actual { + t.Fatalf("There should be file named %s %s in fileInfoSums", expected, actual) + } +} + +func TestGetWithStatusError(t *testing.T) { + var testcases = []struct { + err error + statusCode int + expectedErr string + expectedBody string + }{ + { + statusCode: 200, + expectedBody: "THE BODY", + }, + { + statusCode: 400, + expectedErr: "with status 400 Bad Request: broke", + expectedBody: "broke", + }, + } + for _, testcase := range testcases { + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buffer := bytes.NewBufferString(testcase.expectedBody) + w.WriteHeader(testcase.statusCode) + w.Write(buffer.Bytes()) + }), + ) + defer ts.Close() + response, err := GetWithStatusError(ts.URL) + + if testcase.expectedErr == "" { + require.NoError(t, err) + + body, err := testutil.ReadBody(response.Body) + require.NoError(t, err) + assert.Contains(t, string(body), testcase.expectedBody) + } else { + testutil.ErrorContains(t, err, testcase.expectedErr) + } } } diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go new file mode 100644 index 000000000..3ae9d8242 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go @@ -0,0 +1,174 @@ +package remotecontext + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/symlink" + iradix "github.com/hashicorp/go-immutable-radix" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" +) + +type hashed interface { + Hash() string +} + +// CachableSource is a source that contains cache records for its contents +type CachableSource struct { + mu sync.Mutex + root string + tree *iradix.Tree + txn *iradix.Txn +} + +// NewCachableSource creates new CachableSource +func NewCachableSource(root string) *CachableSource { + ts := &CachableSource{ + tree: iradix.New(), + root: root, + } + return ts +} + +// MarshalBinary marshals current cache information to a byte array +func (cs *CachableSource) MarshalBinary() ([]byte, error) { + b := TarsumBackup{Hashes: make(map[string]string)} + root := cs.getRoot() + root.Walk(func(k []byte, v interface{}) bool { + b.Hashes[string(k)] = v.(*fileInfo).sum + return false + }) + return b.Marshal() +} + +// UnmarshalBinary decodes cache information for presented byte array +func (cs *CachableSource) UnmarshalBinary(data []byte) error { + var b TarsumBackup + if err := b.Unmarshal(data); err != nil { + return err + } + txn := iradix.New().Txn() + for p, v := range b.Hashes { + txn.Insert([]byte(p), &fileInfo{sum: v}) + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// Scan rescans the cache information from the file system +func (cs *CachableSource) Scan() error { + lc, err := NewLazySource(cs.root) + if err != nil { + return err + } + txn := iradix.New().Txn() + err = filepath.Walk(cs.root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return errors.Wrapf(err, "failed to walk %s", path) + } + rel, err := Rel(cs.root, path) + if err != nil { + return err + } + h, err := lc.Hash(rel) + if err != nil { + return err + } + txn.Insert([]byte(rel), &fileInfo{sum: h}) + return nil + }) + if err != nil { + return err + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// HandleChange notifies the source about a modification operation +func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + cs.mu.Lock() + if cs.txn == nil { + cs.txn = cs.tree.Txn() + } + if kind == fsutil.ChangeKindDelete { + cs.txn.Delete([]byte(p)) + cs.mu.Unlock() + return + } + + h, ok := fi.(hashed) + if !ok { + cs.mu.Unlock() + return errors.Errorf("invalid fileinfo: %s", p) + } + + hfi := &fileInfo{ + sum: h.Hash(), + } + cs.txn.Insert([]byte(p), hfi) + cs.mu.Unlock() + return nil +} + +func (cs *CachableSource) getRoot() *iradix.Node { + cs.mu.Lock() + if cs.txn != nil { + cs.tree = cs.txn.Commit() + cs.txn = nil + } + t := cs.tree + cs.mu.Unlock() + return t.Root() +} + +// Close closes the source +func (cs *CachableSource) Close() error { + return nil +} + +func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, err error) { + cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(cs.root, path), cs.root) + if err != nil { + return "", "", fmt.Errorf("Forbidden path outside the context: %s (%s)", path, fullpath) + } + _, err = os.Lstat(fullpath) + if err != nil { + return "", "", convertPathError(err, path) + } + return +} + +// Hash returns a hash for a single file in the source +func (cs *CachableSource) Hash(path string) (string, error) { + n := cs.getRoot() + sum := "" + // TODO: check this for symlinks + v, ok := n.Get([]byte(path)) + if !ok { + sum = path + } else { + sum = v.(*fileInfo).sum + } + return sum, nil +} + +// Root returns a root directory for the source +func (cs *CachableSource) Root() string { + return cs.root +} + +type fileInfo struct { + sum string +} + +func (fi *fileInfo) Hash() string { + return fi.sum +} diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go new file mode 100644 index 000000000..561a7f636 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go @@ -0,0 +1,525 @@ +// Code generated by protoc-gen-gogo. +// source: tarsum.proto +// DO NOT EDIT! + +/* +Package remotecontext is a generated protocol buffer package. + +It is generated from these files: + tarsum.proto + +It has these top-level messages: + TarsumBackup +*/ +package remotecontext + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type TarsumBackup struct { + Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *TarsumBackup) Reset() { *m = TarsumBackup{} } +func (*TarsumBackup) ProtoMessage() {} +func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} } + +func (m *TarsumBackup) GetHashes() map[string]string { + if m != nil { + return m.Hashes + } + return nil +} + +func init() { + proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup") +} +func (this *TarsumBackup) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TarsumBackup) + if !ok { + that2, ok := that.(TarsumBackup) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Hashes) != len(that1.Hashes) { + return false + } + for i := range this.Hashes { + if this.Hashes[i] != that1.Hashes[i] { + return false + } + } + return true +} +func (this *TarsumBackup) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&remotecontext.TarsumBackup{") + keysForHashes := make([]string, 0, len(this.Hashes)) + for k, _ := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + if this.Hashes != nil { + s = append(s, "Hashes: "+mapStringForHashes+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTarsum(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *TarsumBackup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hashes) > 0 { + for k, _ := range m.Hashes { + dAtA[i] = 0xa + i++ + v := m.Hashes[k] + mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + i = encodeVarintTarsum(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *TarsumBackup) Size() (n int) { + var l int + _ = l + if len(m.Hashes) > 0 { + for k, v := range m.Hashes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize)) + } + } + return n +} + +func sovTarsum(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTarsum(x uint64) (n int) { + return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TarsumBackup) String() string { + if this == nil { + return "nil" + } + keysForHashes := make([]string, 0, len(this.Hashes)) + for k, _ := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + s := strings.Join([]string{`&TarsumBackup{`, + `Hashes:` + mapStringForHashes + `,`, + `}`, + }, "") + return s +} +func valueToStringTarsum(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TarsumBackup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTarsum + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Hashes == nil { + m.Hashes = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Hashes[mapkey] = mapvalue + } else { + var mapvalue string + m.Hashes[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTarsum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTarsum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTarsum(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTarsum + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTarsum(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) } + +var fileDescriptorTarsum = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a, + 0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49, + 0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b, + 0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b, + 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51, + 0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24, + 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, + 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, + 0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, + 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, + 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, + 0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f, + 0xe0, 0x00, 0x00, 0x00, +} diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto b/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto new file mode 100644 index 000000000..cb94240ba --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package remotecontext; // no namespace because only used internally + +message TarsumBackup { + map Hashes = 1; +} \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum_test.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum_test.go new file mode 100644 index 000000000..8a9d69bb7 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/tarsum_test.go @@ -0,0 +1,157 @@ +package remotecontext + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/pkg/errors" +) + +const ( + filename = "test" + contents = "contents test" +) + +func init() { + reexec.Init() +} + +func TestCloseRootDirectory(t *testing.T) { + contextDir, err := ioutil.TempDir("", "builder-tarsum-test") + defer os.RemoveAll(contextDir) + if err != nil { + t.Fatalf("Error with creating temporary directory: %s", err) + } + + src := makeTestArchiveContext(t, contextDir) + err = src.Close() + + if err != nil { + t.Fatalf("Error while executing Close: %s", err) + } + + _, err = os.Stat(src.Root()) + + if !os.IsNotExist(err) { + t.Fatal("Directory should not exist at this point") + } +} + +func TestHashFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0755) + + tarSum := makeTestArchiveContext(t, contextDir) + + sum, err := tarSum.Hash(filename) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if len(sum) == 0 { + t.Fatalf("Hash returned empty sum") + } + + expected := "1149ab94af7be6cc1da1335e398f24ee1cf4926b720044d229969dfc248ae7ec" + + if actual := sum; expected != actual { + t.Fatalf("invalid checksum. expected %s, got %s", expected, actual) + } +} + +func TestHashSubdir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := filepath.Join(contextDir, "builder-tarsum-test-subdir") + err := os.Mkdir(contextSubdir, 0755) + if err != nil { + t.Fatalf("Failed to make directory: %s", contextSubdir) + } + + testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0755) + + tarSum := makeTestArchiveContext(t, contextDir) + + relativePath, err := filepath.Rel(contextDir, testFilename) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + sum, err := tarSum.Hash(relativePath) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if len(sum) == 0 { + t.Fatalf("Hash returned empty sum") + } + + expected := "d7f8d6353dee4816f9134f4156bf6a9d470fdadfb5d89213721f7e86744a4e69" + + if actual := sum; expected != actual { + t.Fatalf("invalid checksum. expected %s, got %s", expected, actual) + } +} + +func TestStatNotExisting(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + src := makeTestArchiveContext(t, contextDir) + _, err := src.Hash("not-existing") + if !os.IsNotExist(errors.Cause(err)) { + t.Fatalf("This file should not exist: %s", err) + } +} + +func TestRemoveDirectory(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + relativePath, err := filepath.Rel(contextDir, contextSubdir) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + src := makeTestArchiveContext(t, contextDir) + + tarSum := src.(modifiableContext) + + err = tarSum.Remove(relativePath) + if err != nil { + t.Fatalf("Error when executing Remove: %s", err) + } + + _, err = src.Hash(contextSubdir) + + if !os.IsNotExist(errors.Cause(err)) { + t.Fatal("Directory should not exist at this point") + } +} + +func makeTestArchiveContext(t *testing.T, dir string) builder.Source { + tarStream, err := archive.Tar(dir, archive.Uncompressed) + if err != nil { + t.Fatalf("error: %s", err) + } + defer tarStream.Close() + tarSum, err := FromArchive(tarStream) + if err != nil { + t.Fatalf("Error when executing FromArchive: %s", err) + } + return tarSum +} diff --git a/fn/vendor/github.com/docker/docker/builder/utils_test.go b/fn/vendor/github.com/docker/docker/builder/remotecontext/utils_test.go similarity index 92% rename from fn/vendor/github.com/docker/docker/builder/utils_test.go rename to fn/vendor/github.com/docker/docker/builder/remotecontext/utils_test.go index adc264539..1e23ab4f7 100644 --- a/fn/vendor/github.com/docker/docker/builder/utils_test.go +++ b/fn/vendor/github.com/docker/docker/builder/remotecontext/utils_test.go @@ -1,4 +1,4 @@ -package builder +package remotecontext import ( "io/ioutil" @@ -7,12 +7,6 @@ import ( "testing" ) -const ( - dockerfileContents = "FROM busybox" - dockerignoreFilename = ".dockerignore" - testfileContents = "test" -) - // createTestTempDir creates a temporary directory for testing. // It returns the created path and a cleanup function which is meant to be used as deferred call. // When an error occurs, it terminates the test. diff --git a/fn/vendor/github.com/docker/docker/builder/tarsum.go b/fn/vendor/github.com/docker/docker/builder/tarsum.go deleted file mode 100644 index 77d3142e7..000000000 --- a/fn/vendor/github.com/docker/docker/builder/tarsum.go +++ /dev/null @@ -1,159 +0,0 @@ -package builder - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/tarsum" -) - -type tarSumContext struct { - root string - sums tarsum.FileInfoSums -} - -func (c *tarSumContext) Close() error { - return os.RemoveAll(c.root) -} - -func convertPathError(err error, cleanpath string) error { - if err, ok := err.(*os.PathError); ok { - err.Path = cleanpath - return err - } - return err -} - -func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { - cleanpath, fullpath, err := c.normalize(path) - if err != nil { - return nil, err - } - r, err := os.Open(fullpath) - if err != nil { - return nil, convertPathError(err, cleanpath) - } - return r, nil -} - -func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { - cleanpath, fullpath, err := c.normalize(path) - if err != nil { - return "", nil, err - } - - st, err := os.Lstat(fullpath) - if err != nil { - return "", nil, convertPathError(err, cleanpath) - } - - rel, err := filepath.Rel(c.root, fullpath) - if err != nil { - return "", nil, convertPathError(err, cleanpath) - } - - // We set sum to path by default for the case where GetFile returns nil. - // The usual case is if relative path is empty. - sum := path - // Use the checksum of the followed path(not the possible symlink) because - // this is the file that is actually copied. - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { - sum = tsInfo.Sum() - } - fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} - return rel, fi, nil -} - -// MakeTarSumContext returns a build Context from a tar stream. -// -// It extracts the tar stream to a temporary folder that is deleted as soon as -// the Context is closed. -// As the extraction happens, a tarsum is calculated for every file, and the set of -// all those sums then becomes the source of truth for all operations on this Context. -// -// Closing tarStream has to be done by the caller. -func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { - root, err := ioutils.TempDir("", "docker-builder") - if err != nil { - return nil, err - } - - tsc := &tarSumContext{root: root} - - // Make sure we clean-up upon error. In the happy case the caller - // is expected to manage the clean-up - defer func() { - if err != nil { - tsc.Close() - } - }() - - decompressedStream, err := archive.DecompressStream(tarStream) - if err != nil { - return nil, err - } - - sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) - if err != nil { - return nil, err - } - - err = chrootarchive.Untar(sum, root, nil) - if err != nil { - return nil, err - } - - tsc.sums = sum.GetSums() - - return tsc, nil -} - -func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) { - cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] - fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) - if err != nil { - return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) - } - _, err = os.Lstat(fullpath) - if err != nil { - return "", "", convertPathError(err, path) - } - return -} - -func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { - root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) - return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { - rel, err := filepath.Rel(c.root, fullpath) - if err != nil { - return err - } - if rel == "." { - return nil - } - - sum := rel - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { - sum = tsInfo.Sum() - } - fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} - if err := walkFn(rel, fi, nil); err != nil { - return err - } - return nil - }) -} - -func (c *tarSumContext) Remove(path string) error { - _, fullpath, err := c.normalize(path) - if err != nil { - return err - } - return os.RemoveAll(fullpath) -} diff --git a/fn/vendor/github.com/docker/docker/builder/tarsum_test.go b/fn/vendor/github.com/docker/docker/builder/tarsum_test.go deleted file mode 100644 index b3a0876b3..000000000 --- a/fn/vendor/github.com/docker/docker/builder/tarsum_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package builder - -import ( - "bufio" - "bytes" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" -) - -const ( - filename = "test" - contents = "contents test" -) - -func init() { - reexec.Init() -} - -func TestCloseRootDirectory(t *testing.T) { - contextDir, err := ioutil.TempDir("", "builder-tarsum-test") - - if err != nil { - t.Fatalf("Error with creating temporary directory: %s", err) - } - - tarsum := &tarSumContext{root: contextDir} - - err = tarsum.Close() - - if err != nil { - t.Fatalf("Error while executing Close: %s", err) - } - - _, err = os.Stat(contextDir) - - if !os.IsNotExist(err) { - t.Fatal("Directory should not exist at this point") - defer os.RemoveAll(contextDir) - } -} - -func TestOpenFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - createTestTempFile(t, contextDir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - file, err := tarSum.Open(filename) - - if err != nil { - t.Fatalf("Error when executing Open: %s", err) - } - - defer file.Close() - - scanner := bufio.NewScanner(file) - buff := bytes.NewBufferString("") - - for scanner.Scan() { - buff.WriteString(scanner.Text()) - } - - if contents != buff.String() { - t.Fatalf("Contents are not equal. Expected: %s, got: %s", contents, buff.String()) - } - -} - -func TestOpenNotExisting(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - tarSum := &tarSumContext{root: contextDir} - - file, err := tarSum.Open("not-existing") - - if file != nil { - t.Fatal("Opened file should be nil") - } - - if !os.IsNotExist(err) { - t.Fatalf("Error when executing Open: %s", err) - } -} - -func TestStatFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - testFilename := createTestTempFile(t, contextDir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - relPath, fileInfo, err := tarSum.Stat(filename) - - if err != nil { - t.Fatalf("Error when executing Stat: %s", err) - } - - if relPath != filename { - t.Fatalf("Relative path should be equal to %s, got %s", filename, relPath) - } - - if fileInfo.Path() != testFilename { - t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) - } -} - -func TestStatSubdir(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - relativePath, err := filepath.Rel(contextDir, testFilename) - - if err != nil { - t.Fatalf("Error when getting relative path: %s", err) - } - - relPath, fileInfo, err := tarSum.Stat(relativePath) - - if err != nil { - t.Fatalf("Error when executing Stat: %s", err) - } - - if relPath != relativePath { - t.Fatalf("Relative path should be equal to %s, got %s", relativePath, relPath) - } - - if fileInfo.Path() != testFilename { - t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) - } -} - -func TestStatNotExisting(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - tarSum := &tarSumContext{root: contextDir} - - relPath, fileInfo, err := tarSum.Stat("not-existing") - - if relPath != "" { - t.Fatal("Relative path should be nil") - } - - if fileInfo != nil { - t.Fatal("File info should be nil") - } - - if !os.IsNotExist(err) { - t.Fatalf("This file should not exist: %s", err) - } -} - -func TestRemoveDirectory(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - relativePath, err := filepath.Rel(contextDir, contextSubdir) - - if err != nil { - t.Fatalf("Error when getting relative path: %s", err) - } - - tarSum := &tarSumContext{root: contextDir} - - err = tarSum.Remove(relativePath) - - if err != nil { - t.Fatalf("Error when executing Remove: %s", err) - } - - _, err = os.Stat(contextSubdir) - - if !os.IsNotExist(err) { - t.Fatal("Directory should not exist at this point") - } -} - -func TestMakeTarSumContext(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - createTestTempFile(t, contextDir, filename, contents, 0777) - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("error: %s", err) - } - - defer tarStream.Close() - - tarSum, err := MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when executing MakeTarSumContext: %s", err) - } - - if tarSum == nil { - t.Fatal("Tar sum context should not be nil") - } -} - -func TestWalkWithoutError(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - createTestTempFile(t, contextSubdir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - walkFun := func(path string, fi FileInfo, err error) error { - return nil - } - - err := tarSum.Walk(contextSubdir, walkFun) - - if err != nil { - t.Fatalf("Error when executing Walk: %s", err) - } -} - -type WalkError struct { -} - -func (we WalkError) Error() string { - return "Error when executing Walk" -} - -func TestWalkWithError(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - tarSum := &tarSumContext{root: contextDir} - - walkFun := func(path string, fi FileInfo, err error) error { - return WalkError{} - } - - err := tarSum.Walk(contextSubdir, walkFun) - - if err == nil { - t.Fatal("Error should not be nil") - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go b/fn/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go deleted file mode 100644 index 07e2c8b08..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go +++ /dev/null @@ -1,70 +0,0 @@ -package bundlefile - -import ( - "encoding/json" - "io" - - "github.com/pkg/errors" -) - -// Bundlefile stores the contents of a bundlefile -type Bundlefile struct { - Version string - Services map[string]Service -} - -// Service is a service from a bundlefile -type Service struct { - Image string - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Env []string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Ports []Port `json:",omitempty"` - WorkingDir *string `json:",omitempty"` - User *string `json:",omitempty"` - Networks []string `json:",omitempty"` -} - -// Port is a port as defined in a bundlefile -type Port struct { - Protocol string - Port uint32 -} - -// LoadFile loads a bundlefile from a path to the file -func LoadFile(reader io.Reader) (*Bundlefile, error) { - bundlefile := &Bundlefile{} - - decoder := json.NewDecoder(reader) - if err := decoder.Decode(bundlefile); err != nil { - switch jsonErr := err.(type) { - case *json.SyntaxError: - return nil, errors.Errorf( - "JSON syntax error at byte %v: %s", - jsonErr.Offset, - jsonErr.Error()) - case *json.UnmarshalTypeError: - return nil, errors.Errorf( - "Unexpected type at byte %v. Expected %s but received %s.", - jsonErr.Offset, - jsonErr.Type, - jsonErr.Value) - } - return nil, err - } - - return bundlefile, nil -} - -// Print writes the contents of the bundlefile to the output writer -// as human readable json -func Print(out io.Writer, bundle *Bundlefile) error { - bytes, err := json.MarshalIndent(*bundle, "", " ") - if err != nil { - return err - } - - _, err = out.Write(bytes) - return err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go b/fn/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go deleted file mode 100644 index c343410df..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package bundlefile - -import ( - "bytes" - "strings" - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestLoadFileV01Success(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": { - "redis": { - "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", - "Networks": ["default"] - }, - "web": { - "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", - "Networks": ["default"], - "User": "web" - } - } - }`) - - bundle, err := LoadFile(reader) - assert.NilError(t, err) - assert.Equal(t, bundle.Version, "0.1") - assert.Equal(t, len(bundle.Services), 2) -} - -func TestLoadFileSyntaxError(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": unquoted string - }`) - - _, err := LoadFile(reader) - assert.Error(t, err, "syntax error at byte 37: invalid character 'u'") -} - -func TestLoadFileTypeError(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": { - "web": { - "Image": "redis", - "Networks": "none" - } - } - }`) - - _, err := LoadFile(reader) - assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string") -} - -func TestPrint(t *testing.T) { - var buffer bytes.Buffer - bundle := &Bundlefile{ - Version: "0.1", - Services: map[string]Service{ - "web": { - Image: "image", - Command: []string{"echo", "something"}, - }, - }, - } - assert.NilError(t, Print(&buffer, bundle)) - output := buffer.String() - assert.Contains(t, output, "\"Image\": \"image\"") - assert.Contains(t, output, - `"Command": [ - "echo", - "something" - ]`) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go deleted file mode 100644 index d5705a4da..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go +++ /dev/null @@ -1,24 +0,0 @@ -package checkpoint - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -// NewCheckpointCommand returns the `checkpoint` subcommand (only in experimental) -func NewCheckpointCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "checkpoint", - Short: "Manage checkpoints", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - Tags: map[string]string{"experimental": "", "version": "1.25"}, - } - cmd.AddCommand( - newCreateCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - ) - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/checkpoint/create.go b/fn/vendor/github.com/docker/docker/cli/command/checkpoint/create.go deleted file mode 100644 index 473a94173..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/checkpoint/create.go +++ /dev/null @@ -1,58 +0,0 @@ -package checkpoint - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type createOptions struct { - container string - checkpoint string - checkpointDir string - leaveRunning bool -} - -func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts createOptions - - cmd := &cobra.Command{ - Use: "create [OPTIONS] CONTAINER CHECKPOINT", - Short: "Create a checkpoint from a running container", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - opts.checkpoint = args[1] - return runCreate(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&opts.leaveRunning, "leave-running", false, "Leave the container running after checkpoint") - flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") - - return cmd -} - -func runCreate(dockerCli *command.DockerCli, opts createOptions) error { - client := dockerCli.Client() - - checkpointOpts := types.CheckpointCreateOptions{ - CheckpointID: opts.checkpoint, - CheckpointDir: opts.checkpointDir, - Exit: !opts.leaveRunning, - } - - err := client.CheckpointCreate(context.Background(), opts.container, checkpointOpts) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", opts.checkpoint) - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/checkpoint/list.go b/fn/vendor/github.com/docker/docker/cli/command/checkpoint/list.go deleted file mode 100644 index 20e7d6d73..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/checkpoint/list.go +++ /dev/null @@ -1,54 +0,0 @@ -package checkpoint - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/spf13/cobra" -) - -type listOptions struct { - checkpointDir string -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts listOptions - - cmd := &cobra.Command{ - Use: "ls [OPTIONS] CONTAINER", - Aliases: []string{"list"}, - Short: "List checkpoints for a container", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, args[0], opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") - - return cmd - -} - -func runList(dockerCli *command.DockerCli, container string, opts listOptions) error { - client := dockerCli.Client() - - listOpts := types.CheckpointListOptions{ - CheckpointDir: opts.checkpointDir, - } - - checkpoints, err := client.CheckpointList(context.Background(), container, listOpts) - if err != nil { - return err - } - - cpCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewCheckpointFormat(formatter.TableFormatKey), - } - return formatter.CheckpointWrite(cpCtx, checkpoints) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go b/fn/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go deleted file mode 100644 index ec39fa7b5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go +++ /dev/null @@ -1,44 +0,0 @@ -package checkpoint - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type removeOptions struct { - checkpointDir string -} - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] CONTAINER CHECKPOINT", - Aliases: []string{"remove"}, - Short: "Remove a checkpoint", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args[0], args[1], opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") - - return cmd -} - -func runRemove(dockerCli *command.DockerCli, container string, checkpoint string, opts removeOptions) error { - client := dockerCli.Client() - - removeOpts := types.CheckpointDeleteOptions{ - CheckpointID: checkpoint, - CheckpointDir: opts.checkpointDir, - } - - return client.CheckpointDelete(context.Background(), container, removeOpts) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/cli.go b/fn/vendor/github.com/docker/docker/cli/command/cli.go deleted file mode 100644 index 45752d7d5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/cli.go +++ /dev/null @@ -1,303 +0,0 @@ -package command - -import ( - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "runtime" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - cliconfig "github.com/docker/docker/cli/config" - "github.com/docker/docker/cli/config/configfile" - "github.com/docker/docker/cli/config/credentials" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/client" - "github.com/docker/docker/dockerversion" - dopts "github.com/docker/docker/opts" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/notary/passphrase" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -// Streams is an interface which exposes the standard input and output streams -type Streams interface { - In() *InStream - Out() *OutStream - Err() io.Writer -} - -// Cli represents the docker command line client. -type Cli interface { - Client() client.APIClient - Out() *OutStream - Err() io.Writer - In() *InStream - ConfigFile() *configfile.ConfigFile -} - -// DockerCli is an instance the docker command line client. -// Instances of the client can be returned from NewDockerCli. -type DockerCli struct { - configFile *configfile.ConfigFile - in *InStream - out *OutStream - err io.Writer - keyFile string - client client.APIClient - defaultVersion string - server ServerInfo -} - -// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified. -func (cli *DockerCli) DefaultVersion() string { - return cli.defaultVersion -} - -// Client returns the APIClient -func (cli *DockerCli) Client() client.APIClient { - return cli.client -} - -// Out returns the writer used for stdout -func (cli *DockerCli) Out() *OutStream { - return cli.out -} - -// Err returns the writer used for stderr -func (cli *DockerCli) Err() io.Writer { - return cli.err -} - -// In returns the reader used for stdin -func (cli *DockerCli) In() *InStream { - return cli.in -} - -// ShowHelp shows the command help. -func (cli *DockerCli) ShowHelp(cmd *cobra.Command, args []string) error { - cmd.SetOutput(cli.err) - cmd.HelpFunc()(cmd, args) - return nil -} - -// ConfigFile returns the ConfigFile -func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { - return cli.configFile -} - -// ServerInfo returns the server version details for the host this client is -// connected to -func (cli *DockerCli) ServerInfo() ServerInfo { - return cli.server -} - -// GetAllCredentials returns all of the credentials stored in all of the -// configured credential stores. -func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) { - auths := make(map[string]types.AuthConfig) - for registry := range cli.configFile.CredentialHelpers { - helper := cli.CredentialsStore(registry) - newAuths, err := helper.GetAll() - if err != nil { - return nil, err - } - addAll(auths, newAuths) - } - defaultStore := cli.CredentialsStore("") - newAuths, err := defaultStore.GetAll() - if err != nil { - return nil, err - } - addAll(auths, newAuths) - return auths, nil -} - -func addAll(to, from map[string]types.AuthConfig) { - for reg, ac := range from { - to[reg] = ac - } -} - -// CredentialsStore returns a new credentials store based -// on the settings provided in the configuration file. Empty string returns -// the default credential store. -func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store { - if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" { - return credentials.NewNativeStore(cli.configFile, helper) - } - return credentials.NewFileStore(cli.configFile) -} - -// getConfiguredCredentialStore returns the credential helper configured for the -// given registry, the default credsStore, or the empty string if neither are -// configured. -func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string { - if c.CredentialHelpers != nil && serverAddress != "" { - if helper, exists := c.CredentialHelpers[serverAddress]; exists { - return helper - } - } - return c.CredentialsStore -} - -// Initialize the dockerCli runs initialization that must happen after command -// line flags are parsed. -func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { - cli.configFile = LoadDefaultConfigFile(cli.err) - - var err error - cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) - if tlsconfig.IsErrEncryptedKey(err) { - var ( - passwd string - giveup bool - ) - passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil) - - for attempts := 0; tlsconfig.IsErrEncryptedKey(err); attempts++ { - // some code and comments borrowed from notary/trustmanager/keystore.go - passwd, giveup, err = passRetriever("private", "encrypted TLS private", false, attempts) - // Check if the passphrase retriever got an error or if it is telling us to give up - if giveup || err != nil { - return errors.Wrap(err, "private key is encrypted, but could not get passphrase") - } - - opts.Common.TLSOptions.Passphrase = passwd - cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) - } - } - - if err != nil { - return err - } - - cli.defaultVersion = cli.client.ClientVersion() - - if opts.Common.TrustKey == "" { - cli.keyFile = filepath.Join(cliconfig.Dir(), cliflags.DefaultTrustKeyFile) - } else { - cli.keyFile = opts.Common.TrustKey - } - - if ping, err := cli.client.Ping(context.Background()); err == nil { - cli.server = ServerInfo{ - HasExperimental: ping.Experimental, - OSType: ping.OSType, - } - - // since the new header was added in 1.25, assume server is 1.24 if header is not present. - if ping.APIVersion == "" { - ping.APIVersion = "1.24" - } - - // if server version is lower than the current cli, downgrade - if versions.LessThan(ping.APIVersion, cli.client.ClientVersion()) { - cli.client.UpdateClientVersion(ping.APIVersion) - } - } - - return nil -} - -// ServerInfo stores details about the supported features and platform of the -// server -type ServerInfo struct { - HasExperimental bool - OSType string -} - -// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. -func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli { - return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err} -} - -// LoadDefaultConfigFile attempts to load the default config file and returns -// an initialized ConfigFile struct if none is found. -func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { - configFile, e := cliconfig.Load(cliconfig.Dir()) - if e != nil { - fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) - } - if !configFile.ContainsAuth() { - credentials.DetectDefaultStore(configFile) - } - return configFile -} - -// NewAPIClientFromFlags creates a new APIClient from command line flags -func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { - host, err := getServerHost(opts.Hosts, opts.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - customHeaders := configFile.HTTPHeaders - if customHeaders == nil { - customHeaders = map[string]string{} - } - customHeaders["User-Agent"] = UserAgent() - - verStr := api.DefaultVersion - if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { - verStr = tmpStr - } - - httpClient, err := newHTTPClient(host, opts.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - return client.NewClient(host, verStr, httpClient, customHeaders) -} - -func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { - switch len(hosts) { - case 0: - host = os.Getenv("DOCKER_HOST") - case 1: - host = hosts[0] - default: - return "", errors.New("Please specify only one -H") - } - - host, err = dopts.ParseHost(tlsOptions != nil, host) - return -} - -func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { - if tlsOptions == nil { - // let the api client configure the default transport. - return nil, nil - } - opts := *tlsOptions - opts.ExclusiveRootPools = true - config, err := tlsconfig.Client(opts) - if err != nil { - return nil, err - } - tr := &http.Transport{ - TLSClientConfig: config, - } - proto, addr, _, err := client.ParseHost(host) - if err != nil { - return nil, err - } - - sockets.ConfigureTransport(tr, proto, addr) - - return &http.Client{ - Transport: tr, - }, nil -} - -// UserAgent returns the user agent string used for making API requests -func UserAgent() string { - return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/commands/commands.go b/fn/vendor/github.com/docker/docker/cli/command/commands/commands.go deleted file mode 100644 index 0db7f3a40..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/commands/commands.go +++ /dev/null @@ -1,121 +0,0 @@ -package commands - -import ( - "os" - - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/checkpoint" - "github.com/docker/docker/cli/command/container" - "github.com/docker/docker/cli/command/image" - "github.com/docker/docker/cli/command/network" - "github.com/docker/docker/cli/command/node" - "github.com/docker/docker/cli/command/plugin" - "github.com/docker/docker/cli/command/registry" - "github.com/docker/docker/cli/command/secret" - "github.com/docker/docker/cli/command/service" - "github.com/docker/docker/cli/command/stack" - "github.com/docker/docker/cli/command/swarm" - "github.com/docker/docker/cli/command/system" - "github.com/docker/docker/cli/command/volume" - "github.com/spf13/cobra" -) - -// AddCommands adds all the commands from cli/command to the root command -func AddCommands(cmd *cobra.Command, dockerCli *command.DockerCli) { - cmd.AddCommand( - // checkpoint - checkpoint.NewCheckpointCommand(dockerCli), - - // container - container.NewContainerCommand(dockerCli), - container.NewRunCommand(dockerCli), - - // image - image.NewImageCommand(dockerCli), - image.NewBuildCommand(dockerCli), - - // node - node.NewNodeCommand(dockerCli), - - // network - network.NewNetworkCommand(dockerCli), - - // plugin - plugin.NewPluginCommand(dockerCli), - - // registry - registry.NewLoginCommand(dockerCli), - registry.NewLogoutCommand(dockerCli), - registry.NewSearchCommand(dockerCli), - - // secret - secret.NewSecretCommand(dockerCli), - - // service - service.NewServiceCommand(dockerCli), - - // system - system.NewSystemCommand(dockerCli), - system.NewVersionCommand(dockerCli), - - // stack - stack.NewStackCommand(dockerCli), - stack.NewTopLevelDeployCommand(dockerCli), - - // swarm - swarm.NewSwarmCommand(dockerCli), - - // volume - volume.NewVolumeCommand(dockerCli), - - // legacy commands may be hidden - hide(system.NewEventsCommand(dockerCli)), - hide(system.NewInfoCommand(dockerCli)), - hide(system.NewInspectCommand(dockerCli)), - hide(container.NewAttachCommand(dockerCli)), - hide(container.NewCommitCommand(dockerCli)), - hide(container.NewCopyCommand(dockerCli)), - hide(container.NewCreateCommand(dockerCli)), - hide(container.NewDiffCommand(dockerCli)), - hide(container.NewExecCommand(dockerCli)), - hide(container.NewExportCommand(dockerCli)), - hide(container.NewKillCommand(dockerCli)), - hide(container.NewLogsCommand(dockerCli)), - hide(container.NewPauseCommand(dockerCli)), - hide(container.NewPortCommand(dockerCli)), - hide(container.NewPsCommand(dockerCli)), - hide(container.NewRenameCommand(dockerCli)), - hide(container.NewRestartCommand(dockerCli)), - hide(container.NewRmCommand(dockerCli)), - hide(container.NewStartCommand(dockerCli)), - hide(container.NewStatsCommand(dockerCli)), - hide(container.NewStopCommand(dockerCli)), - hide(container.NewTopCommand(dockerCli)), - hide(container.NewUnpauseCommand(dockerCli)), - hide(container.NewUpdateCommand(dockerCli)), - hide(container.NewWaitCommand(dockerCli)), - hide(image.NewHistoryCommand(dockerCli)), - hide(image.NewImagesCommand(dockerCli)), - hide(image.NewImportCommand(dockerCli)), - hide(image.NewLoadCommand(dockerCli)), - hide(image.NewPullCommand(dockerCli)), - hide(image.NewPushCommand(dockerCli)), - hide(image.NewRemoveCommand(dockerCli)), - hide(image.NewSaveCommand(dockerCli)), - hide(image.NewTagCommand(dockerCli)), - ) - -} - -func hide(cmd *cobra.Command) *cobra.Command { - // If the environment variable with name "DOCKER_HIDE_LEGACY_COMMANDS" is not empty, - // these legacy commands (such as `docker ps`, `docker exec`, etc) - // will not be shown in output console. - if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" { - return cmd - } - cmdCopy := *cmd - cmdCopy.Hidden = true - cmdCopy.Aliases = []string{} - return &cmdCopy -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/attach.go b/fn/vendor/github.com/docker/docker/cli/command/container/attach.go deleted file mode 100644 index 0564bdcd0..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/attach.go +++ /dev/null @@ -1,129 +0,0 @@ -package container - -import ( - "io" - "net/http/httputil" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/signal" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type attachOptions struct { - noStdin bool - proxy bool - detachKeys string - - container string -} - -// NewAttachCommand creates a new cobra.Command for `docker attach` -func NewAttachCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts attachOptions - - cmd := &cobra.Command{ - Use: "attach [OPTIONS] CONTAINER", - Short: "Attach local standard input, output, and error streams to a running container", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runAttach(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") - flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - return cmd -} - -func runAttach(dockerCli *command.DockerCli, opts *attachOptions) error { - ctx := context.Background() - client := dockerCli.Client() - - c, err := client.ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if !c.State.Running { - return errors.New("You cannot attach to a stopped container, start it first") - } - - if c.State.Paused { - return errors.New("You cannot attach to a paused container, unpause it first") - } - - if err := dockerCli.In().CheckTty(!opts.noStdin, c.Config.Tty); err != nil { - return err - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: !opts.noStdin && c.Config.OpenStdin, - Stdout: true, - Stderr: true, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - var in io.ReadCloser - if options.Stdin { - in = dockerCli.In() - } - - if opts.proxy && !c.Config.Tty { - sigc := ForwardAllSignals(ctx, dockerCli, opts.container) - defer signal.StopCatch(sigc) - } - - resp, errAttach := client.ContainerAttach(ctx, opts.container, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach returns an ErrPersistEOF (connection closed) - // means server met an error and put it in Hijacked connection - // keep the error and read detailed error message from hijacked connection later - return errAttach - } - defer resp.Close() - - if c.Config.Tty && dockerCli.Out().IsTerminal() { - height, width := dockerCli.Out().GetTtySize() - // To handle the case where a user repeatedly attaches/detaches without resizing their - // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially - // resize it, then go back to normal. Without this, every attach after the first will - // require the user to manually resize or hit enter. - resizeTtyTo(ctx, client, opts.container, height+1, width+1, false) - - // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back - // to the actual size. - if err := MonitorTtySize(ctx, dockerCli, opts.container, false); err != nil { - logrus.Debugf("Error monitoring TTY size: %s", err) - } - } - if err := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp); err != nil { - return err - } - - if errAttach != nil { - return errAttach - } - - _, status, err := getExitCode(ctx, dockerCli, opts.container) - if err != nil { - return err - } - if status != 0 { - return cli.StatusError{StatusCode: status} - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/container/cmd.go deleted file mode 100644 index b78411e0a..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/cmd.go +++ /dev/null @@ -1,45 +0,0 @@ -package container - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -// NewContainerCommand returns a cobra command for `container` subcommands -func NewContainerCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "container", - Short: "Manage containers", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - NewAttachCommand(dockerCli), - NewCommitCommand(dockerCli), - NewCopyCommand(dockerCli), - NewCreateCommand(dockerCli), - NewDiffCommand(dockerCli), - NewExecCommand(dockerCli), - NewExportCommand(dockerCli), - NewKillCommand(dockerCli), - NewLogsCommand(dockerCli), - NewPauseCommand(dockerCli), - NewPortCommand(dockerCli), - NewRenameCommand(dockerCli), - NewRestartCommand(dockerCli), - NewRmCommand(dockerCli), - NewRunCommand(dockerCli), - NewStartCommand(dockerCli), - NewStatsCommand(dockerCli), - NewStopCommand(dockerCli), - NewTopCommand(dockerCli), - NewUnpauseCommand(dockerCli), - NewUpdateCommand(dockerCli), - NewWaitCommand(dockerCli), - newListCommand(dockerCli), - newInspectCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/commit.go b/fn/vendor/github.com/docker/docker/cli/command/container/commit.go deleted file mode 100644 index 8f67d96d8..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/commit.go +++ /dev/null @@ -1,75 +0,0 @@ -package container - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - dockeropts "github.com/docker/docker/opts" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type commitOptions struct { - container string - reference string - - pause bool - comment string - author string - changes dockeropts.ListOpts -} - -// NewCommitCommand creates a new cobra.Command for `docker commit` -func NewCommitCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts commitOptions - - cmd := &cobra.Command{ - Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]", - Short: "Create a new image from a container's changes", - Args: cli.RequiresRangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - if len(args) > 1 { - opts.reference = args[1] - } - return runCommit(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - flags.BoolVarP(&opts.pause, "pause", "p", true, "Pause container during commit") - flags.StringVarP(&opts.comment, "message", "m", "", "Commit message") - flags.StringVarP(&opts.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith \")") - - opts.changes = dockeropts.NewListOpts(nil) - flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") - - return cmd -} - -func runCommit(dockerCli *command.DockerCli, opts *commitOptions) error { - ctx := context.Background() - - name := opts.container - reference := opts.reference - - options := types.ContainerCommitOptions{ - Reference: reference, - Comment: opts.comment, - Author: opts.author, - Changes: opts.changes.GetAll(), - Pause: opts.pause, - } - - response, err := dockerCli.Client().ContainerCommit(ctx, name, options) - if err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), response.ID) - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/cp.go b/fn/vendor/github.com/docker/docker/cli/command/container/cp.go deleted file mode 100644 index a1d7110a6..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/cp.go +++ /dev/null @@ -1,302 +0,0 @@ -package container - -import ( - "io" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type copyOptions struct { - source string - destination string - followLink bool -} - -type copyDirection int - -const ( - fromContainer copyDirection = (1 << iota) - toContainer - acrossContainers = fromContainer | toContainer -) - -type cpConfig struct { - followLink bool -} - -// NewCopyCommand creates a new `docker cp` command -func NewCopyCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts copyOptions - - cmd := &cobra.Command{ - Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- - docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`, - Short: "Copy files/folders between a container and the local filesystem", - Long: strings.Join([]string{ - "Copy files/folders between a container and the local filesystem\n", - "\nUse '-' as the source to read a tar archive from stdin\n", - "and extract it to a directory destination in a container.\n", - "Use '-' as the destination to stream a tar archive of a\n", - "container source to stdout.", - }, ""), - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - if args[0] == "" { - return errors.New("source can not be empty") - } - if args[1] == "" { - return errors.New("destination can not be empty") - } - opts.source = args[0] - opts.destination = args[1] - return runCopy(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH") - - return cmd -} - -func runCopy(dockerCli *command.DockerCli, opts copyOptions) error { - srcContainer, srcPath := splitCpArg(opts.source) - dstContainer, dstPath := splitCpArg(opts.destination) - - var direction copyDirection - if srcContainer != "" { - direction |= fromContainer - } - if dstContainer != "" { - direction |= toContainer - } - - cpParam := &cpConfig{ - followLink: opts.followLink, - } - - ctx := context.Background() - - switch direction { - case fromContainer: - return copyFromContainer(ctx, dockerCli, srcContainer, srcPath, dstPath, cpParam) - case toContainer: - return copyToContainer(ctx, dockerCli, srcPath, dstContainer, dstPath, cpParam) - case acrossContainers: - // Copying between containers isn't supported. - return errors.New("copying between containers is not supported") - default: - // User didn't specify any container. - return errors.New("must specify at least one container source") - } -} - -func statContainerPath(ctx context.Context, dockerCli *command.DockerCli, containerName, path string) (types.ContainerPathStat, error) { - return dockerCli.Client().ContainerStatPath(ctx, containerName, path) -} - -func resolveLocalPath(localPath string) (absPath string, err error) { - if absPath, err = filepath.Abs(localPath); err != nil { - return - } - - return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil -} - -func copyFromContainer(ctx context.Context, dockerCli *command.DockerCli, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { - if dstPath != "-" { - // Get an absolute destination path. - dstPath, err = resolveLocalPath(dstPath) - if err != nil { - return err - } - } - - // if client requests to follow symbol link, then must decide target file to be copied - var rebaseName string - if cpParam.followLink { - srcStat, err := statContainerPath(ctx, dockerCli, srcContainer, srcPath) - - // If the destination is a symbolic link, we should follow it. - if err == nil && srcStat.Mode&os.ModeSymlink != 0 { - linkTarget := srcStat.LinkTarget - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - srcParent, _ := archive.SplitPathDirEntry(srcPath) - linkTarget = filepath.Join(srcParent, linkTarget) - } - - linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) - srcPath = linkTarget - } - - } - - content, stat, err := dockerCli.Client().CopyFromContainer(ctx, srcContainer, srcPath) - if err != nil { - return err - } - defer content.Close() - - if dstPath == "-" { - // Send the response to STDOUT. - _, err = io.Copy(os.Stdout, content) - - return err - } - - // Prepare source copy info. - srcInfo := archive.CopyInfo{ - Path: srcPath, - Exists: true, - IsDir: stat.Mode.IsDir(), - RebaseName: rebaseName, - } - - preArchive := content - if len(srcInfo.RebaseName) != 0 { - _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) - preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) - } - // See comments in the implementation of `archive.CopyTo` for exactly what - // goes into deciding how and whether the source archive needs to be - // altered for the correct copy behavior. - return archive.CopyTo(preArchive, srcInfo, dstPath) -} - -func copyToContainer(ctx context.Context, dockerCli *command.DockerCli, srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { - if srcPath != "-" { - // Get an absolute source path. - srcPath, err = resolveLocalPath(srcPath) - if err != nil { - return err - } - } - - // In order to get the copy behavior right, we need to know information - // about both the source and destination. The API is a simple tar - // archive/extract API but we can use the stat info header about the - // destination to be more informed about exactly what the destination is. - - // Prepare destination copy info by stat-ing the container path. - dstInfo := archive.CopyInfo{Path: dstPath} - dstStat, err := statContainerPath(ctx, dockerCli, dstContainer, dstPath) - - // If the destination is a symbolic link, we should evaluate it. - if err == nil && dstStat.Mode&os.ModeSymlink != 0 { - linkTarget := dstStat.LinkTarget - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := archive.SplitPathDirEntry(dstPath) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - dstInfo.Path = linkTarget - dstStat, err = statContainerPath(ctx, dockerCli, dstContainer, linkTarget) - } - - // Ignore any error and assume that the parent directory of the destination - // path exists, in which case the copy may still succeed. If there is any - // type of conflict (e.g., non-directory overwriting an existing directory - // or vice versa) the extraction will fail. If the destination simply did - // not exist, but the parent directory does, the extraction will still - // succeed. - if err == nil { - dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() - } - - var ( - content io.Reader - resolvedDstPath string - ) - - if srcPath == "-" { - // Use STDIN. - content = os.Stdin - resolvedDstPath = dstInfo.Path - if !dstInfo.IsDir { - return errors.Errorf("destination \"%s:%s\" must be a directory", dstContainer, dstPath) - } - } else { - // Prepare source copy info. - srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) - if err != nil { - return err - } - - srcArchive, err := archive.TarResource(srcInfo) - if err != nil { - return err - } - defer srcArchive.Close() - - // With the stat info about the local source as well as the - // destination, we have enough information to know whether we need to - // alter the archive that we upload so that when the server extracts - // it to the specified directory in the container we get the desired - // copy behavior. - - // See comments in the implementation of `archive.PrepareArchiveCopy` - // for exactly what goes into deciding how and whether the source - // archive needs to be altered for the correct copy behavior when it is - // extracted. This function also infers from the source and destination - // info which directory to extract to, which may be the parent of the - // destination that the user specified. - dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) - if err != nil { - return err - } - defer preparedArchive.Close() - - resolvedDstPath = dstDir - content = preparedArchive - } - - options := types.CopyToContainerOptions{ - AllowOverwriteDirWithFile: false, - } - - return dockerCli.Client().CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options) -} - -// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be -// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by -// requiring a LOCALPATH with a `:` to be made explicit with a relative or -// absolute path: -// `/path/to/file:name.txt` or `./file:name.txt` -// -// This is apparently how `scp` handles this as well: -// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ -// -// We can't simply check for a filepath separator because container names may -// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, -// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows -// client, a `:` could be part of an absolute Windows path, in which case it -// is immediately proceeded by a backslash. -func splitCpArg(arg string) (container, path string) { - if system.IsAbs(arg) { - // Explicit local absolute path, e.g., `C:\foo` or `/foo`. - return "", arg - } - - parts := strings.SplitN(arg, ":", 2) - - if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { - // Either there's no `:` in the arg - // OR it's an explicit local relative path like `./file:name.txt`. - return "", arg - } - - return parts[0], parts[1] -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/create.go b/fn/vendor/github.com/docker/docker/cli/command/container/create.go deleted file mode 100644 index 9222b4060..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/create.go +++ /dev/null @@ -1,224 +0,0 @@ -package container - -import ( - "fmt" - "io" - "os" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/image" - apiclient "github.com/docker/docker/client" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/registry" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -type createOptions struct { - name string -} - -// NewCreateCommand creates a new cobra.Command for `docker create` -func NewCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts createOptions - var copts *containerOptions - - cmd := &cobra.Command{ - Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Create a new container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - copts.Image = args[0] - if len(args) > 1 { - copts.Args = args[1:] - } - return runCreate(dockerCli, cmd.Flags(), &opts, copts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - flags.StringVar(&opts.name, "name", "", "Assign a name to the container") - - // Add an explicit help that doesn't have a `-h` to prevent the conflict - // with hostname - flags.Bool("help", false, "Print usage") - - command.AddTrustVerificationFlags(flags) - copts = addFlags(flags) - return cmd -} - -func runCreate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *createOptions, copts *containerOptions) error { - containerConfig, err := parse(flags, copts) - if err != nil { - reportError(dockerCli.Err(), "create", err.Error(), true) - return cli.StatusError{StatusCode: 125} - } - response, err := createContainer(context.Background(), dockerCli, containerConfig, opts.name) - if err != nil { - return err - } - fmt.Fprintln(dockerCli.Out(), response.ID) - return nil -} - -func pullImage(ctx context.Context, dockerCli *command.DockerCli, image string, out io.Writer) error { - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return err - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - - authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - options := types.ImageCreateOptions{ - RegistryAuth: encodedAuth, - } - - responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream( - responseBody, - out, - dockerCli.Out().FD(), - dockerCli.Out().IsTerminal(), - nil) -} - -type cidFile struct { - path string - file *os.File - written bool -} - -func (cid *cidFile) Close() error { - cid.file.Close() - - if cid.written { - return nil - } - if err := os.Remove(cid.path); err != nil { - return errors.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) - } - - return nil -} - -func (cid *cidFile) Write(id string) error { - if _, err := cid.file.Write([]byte(id)); err != nil { - return errors.Errorf("Failed to write the container ID to the file: %s", err) - } - cid.written = true - return nil -} - -func newCIDFile(path string) (*cidFile, error) { - if _, err := os.Stat(path); err == nil { - return nil, errors.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) - } - - f, err := os.Create(path) - if err != nil { - return nil, errors.Errorf("Failed to create the container ID file: %s", err) - } - - return &cidFile{path: path, file: f}, nil -} - -func createContainer(ctx context.Context, dockerCli *command.DockerCli, containerConfig *containerConfig, name string) (*container.ContainerCreateCreatedBody, error) { - config := containerConfig.Config - hostConfig := containerConfig.HostConfig - networkingConfig := containerConfig.NetworkingConfig - stderr := dockerCli.Err() - - var ( - containerIDFile *cidFile - trustedRef reference.Canonical - namedRef reference.Named - ) - - cidfile := hostConfig.ContainerIDFile - if cidfile != "" { - var err error - if containerIDFile, err = newCIDFile(cidfile); err != nil { - return nil, err - } - defer containerIDFile.Close() - } - - ref, err := reference.ParseAnyReference(config.Image) - if err != nil { - return nil, err - } - if named, ok := ref.(reference.Named); ok { - namedRef = reference.TagNameOnly(named) - - if taggedRef, ok := namedRef.(reference.NamedTagged); ok && command.IsTrusted() { - var err error - trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef, nil) - if err != nil { - return nil, err - } - config.Image = reference.FamiliarString(trustedRef) - } - } - - //create the container - response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) - - //if image not found try to pull it - if err != nil { - if apiclient.IsErrImageNotFound(err) && namedRef != nil { - fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef)) - - // we don't want to write to stdout anything apart from container.ID - if err = pullImage(ctx, dockerCli, config.Image, stderr); err != nil { - return nil, err - } - if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil { - if err := image.TagTrusted(ctx, dockerCli, trustedRef, taggedRef); err != nil { - return nil, err - } - } - // Retry - var retryErr error - response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) - if retryErr != nil { - return nil, retryErr - } - } else { - return nil, err - } - } - - for _, warning := range response.Warnings { - fmt.Fprintf(stderr, "WARNING: %s\n", warning) - } - if containerIDFile != nil { - if err = containerIDFile.Write(response.ID); err != nil { - return nil, err - } - } - return &response, nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/diff.go b/fn/vendor/github.com/docker/docker/cli/command/container/diff.go deleted file mode 100644 index 816a0a56a..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/diff.go +++ /dev/null @@ -1,46 +0,0 @@ -package container - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type diffOptions struct { - container string -} - -// NewDiffCommand creates a new cobra.Command for `docker diff` -func NewDiffCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts diffOptions - - return &cobra.Command{ - Use: "diff CONTAINER", - Short: "Inspect changes to files or directories on a container's filesystem", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runDiff(dockerCli, &opts) - }, - } -} - -func runDiff(dockerCli *command.DockerCli, opts *diffOptions) error { - if opts.container == "" { - return errors.New("Container name cannot be empty") - } - ctx := context.Background() - - changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container) - if err != nil { - return err - } - diffCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewDiffFormat("{{.Type}} {{.Path}}"), - } - return formatter.DiffWrite(diffCtx, changes) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/exec.go b/fn/vendor/github.com/docker/docker/cli/command/container/exec.go deleted file mode 100644 index 676708c77..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/exec.go +++ /dev/null @@ -1,205 +0,0 @@ -package container - -import ( - "fmt" - "io" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - apiclient "github.com/docker/docker/client" - options "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/promise" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type execOptions struct { - detachKeys string - interactive bool - tty bool - detach bool - user string - privileged bool - env *options.ListOpts -} - -func newExecOptions() *execOptions { - var values []string - return &execOptions{ - env: options.NewListOptsRef(&values, options.ValidateEnv), - } -} - -// NewExecCommand creates a new cobra.Command for `docker exec` -func NewExecCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := newExecOptions() - - cmd := &cobra.Command{ - Use: "exec [OPTIONS] CONTAINER COMMAND [ARG...]", - Short: "Run a command in a running container", - Args: cli.RequiresMinArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - container := args[0] - execCmd := args[1:] - return runExec(dockerCli, opts, container, execCmd) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - flags.StringVarP(&opts.detachKeys, "detach-keys", "", "", "Override the key sequence for detaching a container") - flags.BoolVarP(&opts.interactive, "interactive", "i", false, "Keep STDIN open even if not attached") - flags.BoolVarP(&opts.tty, "tty", "t", false, "Allocate a pseudo-TTY") - flags.BoolVarP(&opts.detach, "detach", "d", false, "Detached mode: run command in the background") - flags.StringVarP(&opts.user, "user", "u", "", "Username or UID (format: [:])") - flags.BoolVarP(&opts.privileged, "privileged", "", false, "Give extended privileges to the command") - flags.VarP(opts.env, "env", "e", "Set environment variables") - flags.SetAnnotation("env", "version", []string{"1.25"}) - - return cmd -} - -func runExec(dockerCli *command.DockerCli, opts *execOptions, container string, execCmd []string) error { - execConfig, err := parseExec(opts, execCmd) - // just in case the ParseExec does not exit - if container == "" || err != nil { - return cli.StatusError{StatusCode: 1} - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - // Send client escape keys - execConfig.DetachKeys = dockerCli.ConfigFile().DetachKeys - - ctx := context.Background() - client := dockerCli.Client() - - response, err := client.ContainerExecCreate(ctx, container, *execConfig) - if err != nil { - return err - } - - execID := response.ID - if execID == "" { - fmt.Fprintln(dockerCli.Out(), "exec ID empty") - return nil - } - - //Temp struct for execStart so that we don't need to transfer all the execConfig - if !execConfig.Detach { - if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil { - return err - } - } else { - execStartCheck := types.ExecStartCheck{ - Detach: execConfig.Detach, - Tty: execConfig.Tty, - } - - if err := client.ContainerExecStart(ctx, execID, execStartCheck); err != nil { - return err - } - // For now don't print this - wait for when we support exec wait() - // fmt.Fprintf(dockerCli.Out(), "%s\n", execID) - return nil - } - - // Interactive exec requested. - var ( - out, stderr io.Writer - in io.ReadCloser - errCh chan error - ) - - if execConfig.AttachStdin { - in = dockerCli.In() - } - if execConfig.AttachStdout { - out = dockerCli.Out() - } - if execConfig.AttachStderr { - if execConfig.Tty { - stderr = dockerCli.Out() - } else { - stderr = dockerCli.Err() - } - } - - resp, err := client.ContainerExecAttach(ctx, execID, *execConfig) - if err != nil { - return err - } - defer resp.Close() - errCh = promise.Go(func() error { - return holdHijackedConnection(ctx, dockerCli, execConfig.Tty, in, out, stderr, resp) - }) - - if execConfig.Tty && dockerCli.In().IsTerminal() { - if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil { - fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err) - } - } - - if err := <-errCh; err != nil { - logrus.Debugf("Error hijack: %s", err) - return err - } - - var status int - if _, status, err = getExecExitCode(ctx, client, execID); err != nil { - return err - } - - if status != 0 { - return cli.StatusError{StatusCode: status} - } - - return nil -} - -// getExecExitCode perform an inspect on the exec command. It returns -// the running state and the exit code. -func getExecExitCode(ctx context.Context, client apiclient.ContainerAPIClient, execID string) (bool, int, error) { - resp, err := client.ContainerExecInspect(ctx, execID) - if err != nil { - // If we can't connect, then the daemon probably died. - if !apiclient.IsErrConnectionFailed(err) { - return false, -1, err - } - return false, -1, nil - } - - return resp.Running, resp.ExitCode, nil -} - -// parseExec parses the specified args for the specified command and generates -// an ExecConfig from it. -func parseExec(opts *execOptions, execCmd []string) (*types.ExecConfig, error) { - execConfig := &types.ExecConfig{ - User: opts.user, - Privileged: opts.privileged, - Tty: opts.tty, - Cmd: execCmd, - Detach: opts.detach, - } - - // If -d is not set, attach to everything by default - if !opts.detach { - execConfig.AttachStdout = true - execConfig.AttachStderr = true - if opts.interactive { - execConfig.AttachStdin = true - } - } - - if opts.env != nil { - execConfig.Env = opts.env.GetAll() - } - - return execConfig, nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/exec_test.go b/fn/vendor/github.com/docker/docker/cli/command/container/exec_test.go deleted file mode 100644 index baeeaf190..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/exec_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package container - -import ( - "testing" - - "github.com/docker/docker/api/types" -) - -type arguments struct { - options execOptions - execCmd []string -} - -func TestParseExec(t *testing.T) { - valids := map[*arguments]*types.ExecConfig{ - &arguments{ - execCmd: []string{"command"}, - }: { - Cmd: []string{"command"}, - AttachStdout: true, - AttachStderr: true, - }, - &arguments{ - execCmd: []string{"command1", "command2"}, - }: { - Cmd: []string{"command1", "command2"}, - AttachStdout: true, - AttachStderr: true, - }, - &arguments{ - options: execOptions{ - interactive: true, - tty: true, - user: "uid", - }, - execCmd: []string{"command"}, - }: { - User: "uid", - AttachStdin: true, - AttachStdout: true, - AttachStderr: true, - Tty: true, - Cmd: []string{"command"}, - }, - &arguments{ - options: execOptions{ - detach: true, - }, - execCmd: []string{"command"}, - }: { - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Detach: true, - Cmd: []string{"command"}, - }, - &arguments{ - options: execOptions{ - tty: true, - interactive: true, - detach: true, - }, - execCmd: []string{"command"}, - }: { - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Detach: true, - Tty: true, - Cmd: []string{"command"}, - }, - } - - for valid, expectedExecConfig := range valids { - execConfig, err := parseExec(&valid.options, valid.execCmd) - if err != nil { - t.Fatal(err) - } - if !compareExecConfig(expectedExecConfig, execConfig) { - t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) - } - } -} - -func compareExecConfig(config1 *types.ExecConfig, config2 *types.ExecConfig) bool { - if config1.AttachStderr != config2.AttachStderr { - return false - } - if config1.AttachStdin != config2.AttachStdin { - return false - } - if config1.AttachStdout != config2.AttachStdout { - return false - } - if config1.Detach != config2.Detach { - return false - } - if config1.Privileged != config2.Privileged { - return false - } - if config1.Tty != config2.Tty { - return false - } - if config1.User != config2.User { - return false - } - if len(config1.Cmd) != len(config2.Cmd) { - return false - } - for index, value := range config1.Cmd { - if value != config2.Cmd[index] { - return false - } - } - return true -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/export.go b/fn/vendor/github.com/docker/docker/cli/command/container/export.go deleted file mode 100644 index cb0ddfe7a..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/export.go +++ /dev/null @@ -1,58 +0,0 @@ -package container - -import ( - "io" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type exportOptions struct { - container string - output string -} - -// NewExportCommand creates a new `docker export` command -func NewExportCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts exportOptions - - cmd := &cobra.Command{ - Use: "export [OPTIONS] CONTAINER", - Short: "Export a container's filesystem as a tar archive", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runExport(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") - - return cmd -} - -func runExport(dockerCli *command.DockerCli, opts exportOptions) error { - if opts.output == "" && dockerCli.Out().IsTerminal() { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - clnt := dockerCli.Client() - - responseBody, err := clnt.ContainerExport(context.Background(), opts.container) - if err != nil { - return err - } - defer responseBody.Close() - - if opts.output == "" { - _, err := io.Copy(dockerCli.Out(), responseBody) - return err - } - - return command.CopyToFile(opts.output, responseBody) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/hijack.go b/fn/vendor/github.com/docker/docker/cli/command/container/hijack.go deleted file mode 100644 index 11acf114f..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/hijack.go +++ /dev/null @@ -1,124 +0,0 @@ -package container - -import ( - "io" - "runtime" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/stdcopy" - "golang.org/x/net/context" -) - -// holdHijackedConnection handles copying input to and output from streams to the -// connection -func holdHijackedConnection(ctx context.Context, streams command.Streams, tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { - var ( - err error - restoreOnce sync.Once - ) - if inputStream != nil && tty { - if err := setRawTerminal(streams); err != nil { - return err - } - defer func() { - restoreOnce.Do(func() { - restoreTerminal(streams, inputStream) - }) - }() - } - - receiveStdout := make(chan error, 1) - if outputStream != nil || errorStream != nil { - go func() { - // When TTY is ON, use regular copy - if tty && outputStream != nil { - _, err = io.Copy(outputStream, resp.Reader) - // we should restore the terminal as soon as possible once connection end - // so any following print messages will be in normal type. - if inputStream != nil { - restoreOnce.Do(func() { - restoreTerminal(streams, inputStream) - }) - } - } else { - _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) - } - - logrus.Debug("[hijack] End of stdout") - receiveStdout <- err - }() - } - - stdinDone := make(chan struct{}) - go func() { - if inputStream != nil { - io.Copy(resp.Conn, inputStream) - // we should restore the terminal as soon as possible once connection end - // so any following print messages will be in normal type. - if tty { - restoreOnce.Do(func() { - restoreTerminal(streams, inputStream) - }) - } - logrus.Debug("[hijack] End of stdin") - } - - if err := resp.CloseWrite(); err != nil { - logrus.Debugf("Couldn't send EOF: %s", err) - } - close(stdinDone) - }() - - select { - case err := <-receiveStdout: - if err != nil { - logrus.Debugf("Error receiveStdout: %s", err) - return err - } - case <-stdinDone: - if outputStream != nil || errorStream != nil { - select { - case err := <-receiveStdout: - if err != nil { - logrus.Debugf("Error receiveStdout: %s", err) - return err - } - case <-ctx.Done(): - } - } - case <-ctx.Done(): - } - - return nil -} - -func setRawTerminal(streams command.Streams) error { - if err := streams.In().SetRawTerminal(); err != nil { - return err - } - return streams.Out().SetRawTerminal() -} - -func restoreTerminal(streams command.Streams, in io.Closer) error { - streams.In().RestoreTerminal() - streams.Out().RestoreTerminal() - // WARNING: DO NOT REMOVE THE OS CHECKS !!! - // For some reason this Close call blocks on darwin.. - // As the client exits right after, simply discard the close - // until we find a better solution. - // - // This can also cause the client on Windows to get stuck in Win32 CloseHandle() - // in some cases. See https://github.com/docker/docker/issues/28267#issuecomment-288237442 - // Tracked internally at Microsoft by VSO #11352156. In the - // Windows case, you hit this if you are using the native/v2 console, - // not the "legacy" console, and you start the client in a new window. eg - // `start docker run --rm -it microsoft/nanoserver cmd /s /c echo foobar` - // will hang. Remove start, and it won't repro. - if in != nil && runtime.GOOS != "darwin" && runtime.GOOS != "windows" { - return in.Close() - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/inspect.go b/fn/vendor/github.com/docker/docker/cli/command/container/inspect.go deleted file mode 100644 index d08b38dc9..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/inspect.go +++ /dev/null @@ -1,46 +0,0 @@ -package container - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - format string - size bool - refs []string -} - -// newInspectCommand creates a new cobra.Command for `docker container inspect` -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Display detailed information on one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.refs = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") - - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - getRefFunc := func(ref string) (interface{}, []byte, error) { - return client.ContainerInspectWithRaw(ctx, ref, opts.size) - } - return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/kill.go b/fn/vendor/github.com/docker/docker/cli/command/container/kill.go deleted file mode 100644 index 4cc3ee0fc..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/kill.go +++ /dev/null @@ -1,56 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type killOptions struct { - signal string - - containers []string -} - -// NewKillCommand creates a new cobra.Command for `docker kill` -func NewKillCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts killOptions - - cmd := &cobra.Command{ - Use: "kill [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Kill one or more running containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runKill(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container") - return cmd -} - -func runKill(dockerCli *command.DockerCli, opts *killOptions) error { - var errs []string - ctx := context.Background() - errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { - return dockerCli.Client().ContainerKill(ctx, container, opts.signal) - }) - for _, name := range opts.containers { - if err := <-errChan; err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintln(dockerCli.Out(), name) - } - } - if len(errs) > 0 { - return errors.New(strings.Join(errs, "\n")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/list.go b/fn/vendor/github.com/docker/docker/cli/command/container/list.go deleted file mode 100644 index e0f4fdf21..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/list.go +++ /dev/null @@ -1,140 +0,0 @@ -package container - -import ( - "io/ioutil" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/templates" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type psOptions struct { - quiet bool - size bool - all bool - noTrunc bool - nLatest bool - last int - format string - filter opts.FilterOpt -} - -// NewPsCommand creates a new cobra.Command for `docker ps` -func NewPsCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS]", - Short: "List containers", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runPs(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display numeric IDs") - flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") - flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.BoolVarP(&opts.nLatest, "latest", "l", false, "Show the latest created container (includes all states)") - flags.IntVarP(&opts.last, "last", "n", -1, "Show n last created containers (includes all states)") - flags.StringVarP(&opts.format, "format", "", "", "Pretty-print containers using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := *NewPsCommand(dockerCli) - cmd.Aliases = []string{"ps", "list"} - cmd.Use = "ls [OPTIONS]" - return &cmd -} - -// listOptionsProcessor is used to set any container list options which may only -// be embedded in the format template. -// This is passed directly into tmpl.Execute in order to allow the preprocessor -// to set any list options that were not provided by flags (e.g. `.Size`). -// It is using a `map[string]bool` so that unknown fields passed into the -// template format do not cause errors. These errors will get picked up when -// running through the actual template processor. -type listOptionsProcessor map[string]bool - -// Size sets the size of the map when called by a template execution. -func (o listOptionsProcessor) Size() bool { - o["size"] = true - return true -} - -// Label is needed here as it allows the correct pre-processing -// because Label() is a method with arguments -func (o listOptionsProcessor) Label(name string) string { - return "" -} - -func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) { - options := &types.ContainerListOptions{ - All: opts.all, - Limit: opts.last, - Size: opts.size, - Filters: opts.filter.Value(), - } - - if opts.nLatest && opts.last == -1 { - options.Limit = 1 - } - - tmpl, err := templates.Parse(opts.format) - - if err != nil { - return nil, err - } - - optionsProcessor := listOptionsProcessor{} - // This shouldn't error out but swallowing the error makes it harder - // to track down if preProcessor issues come up. Ref #24696 - if err := tmpl.Execute(ioutil.Discard, optionsProcessor); err != nil { - return nil, err - } - // At the moment all we need is to capture .Size for preprocessor - options.Size = opts.size || optionsProcessor["size"] - - return options, nil -} - -func runPs(dockerCli *command.DockerCli, opts *psOptions) error { - ctx := context.Background() - - listOptions, err := buildContainerListOptions(opts) - if err != nil { - return err - } - - containers, err := dockerCli.Client().ContainerList(ctx, *listOptions) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().PsFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().PsFormat - } else { - format = formatter.TableFormatKey - } - } - - containerCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewContainerFormat(format, opts.quiet, listOptions.Size), - Trunc: !opts.noTrunc, - } - return formatter.ContainerWrite(containerCtx, containers) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/logs.go b/fn/vendor/github.com/docker/docker/cli/command/container/logs.go deleted file mode 100644 index d8cafaf74..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/logs.go +++ /dev/null @@ -1,76 +0,0 @@ -package container - -import ( - "io" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/stdcopy" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type logsOptions struct { - follow bool - since string - timestamps bool - details bool - tail string - - container string -} - -// NewLogsCommand creates a new cobra.Command for `docker logs` -func NewLogsCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts logsOptions - - cmd := &cobra.Command{ - Use: "logs [OPTIONS] CONTAINER", - Short: "Fetch the logs of a container", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runLogs(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") - flags.StringVar(&opts.since, "since", "", "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") - flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") - flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") - flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") - return cmd -} - -func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { - ctx := context.Background() - - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Since: opts.since, - Timestamps: opts.timestamps, - Follow: opts.follow, - Tail: opts.tail, - Details: opts.details, - } - responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options) - if err != nil { - return err - } - defer responseBody.Close() - - c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if c.Config.Tty { - _, err = io.Copy(dockerCli.Out(), responseBody) - } else { - _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) - } - return err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/opts.go b/fn/vendor/github.com/docker/docker/cli/command/container/opts.go deleted file mode 100644 index 2c717431e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/opts.go +++ /dev/null @@ -1,900 +0,0 @@ -package container - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "path" - "regexp" - "strconv" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/signal" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/go-connections/nat" - "github.com/pkg/errors" - "github.com/spf13/pflag" -) - -var ( - deviceCgroupRuleRegexp = regexp.MustCompile("^[acb] ([0-9]+|\\*):([0-9]+|\\*) [rwm]{1,3}$") -) - -// containerOptions is a data object with all the options for creating a container -type containerOptions struct { - attach opts.ListOpts - volumes opts.ListOpts - tmpfs opts.ListOpts - mounts opts.MountOpt - blkioWeightDevice opts.WeightdeviceOpt - deviceReadBps opts.ThrottledeviceOpt - deviceWriteBps opts.ThrottledeviceOpt - links opts.ListOpts - aliases opts.ListOpts - linkLocalIPs opts.ListOpts - deviceReadIOps opts.ThrottledeviceOpt - deviceWriteIOps opts.ThrottledeviceOpt - env opts.ListOpts - labels opts.ListOpts - deviceCgroupRules opts.ListOpts - devices opts.ListOpts - ulimits *opts.UlimitOpt - sysctls *opts.MapOpts - publish opts.ListOpts - expose opts.ListOpts - dns opts.ListOpts - dnsSearch opts.ListOpts - dnsOptions opts.ListOpts - extraHosts opts.ListOpts - volumesFrom opts.ListOpts - envFile opts.ListOpts - capAdd opts.ListOpts - capDrop opts.ListOpts - groupAdd opts.ListOpts - securityOpt opts.ListOpts - storageOpt opts.ListOpts - labelsFile opts.ListOpts - loggingOpts opts.ListOpts - privileged bool - pidMode string - utsMode string - usernsMode string - publishAll bool - stdin bool - tty bool - oomKillDisable bool - oomScoreAdj int - containerIDFile string - entrypoint string - hostname string - memory opts.MemBytes - memoryReservation opts.MemBytes - memorySwap opts.MemSwapBytes - kernelMemory opts.MemBytes - user string - workingDir string - cpuCount int64 - cpuShares int64 - cpuPercent int64 - cpuPeriod int64 - cpuRealtimePeriod int64 - cpuRealtimeRuntime int64 - cpuQuota int64 - cpus opts.NanoCPUs - cpusetCpus string - cpusetMems string - blkioWeight uint16 - ioMaxBandwidth opts.MemBytes - ioMaxIOps uint64 - swappiness int64 - netMode string - macAddress string - ipv4Address string - ipv6Address string - ipcMode string - pidsLimit int64 - restartPolicy string - readonlyRootfs bool - loggingDriver string - cgroupParent string - volumeDriver string - stopSignal string - stopTimeout int - isolation string - shmSize opts.MemBytes - noHealthcheck bool - healthCmd string - healthInterval time.Duration - healthTimeout time.Duration - healthStartPeriod time.Duration - healthRetries int - runtime string - autoRemove bool - init bool - - Image string - Args []string -} - -// addFlags adds all command line flags that will be used by parse to the FlagSet -func addFlags(flags *pflag.FlagSet) *containerOptions { - copts := &containerOptions{ - aliases: opts.NewListOpts(nil), - attach: opts.NewListOpts(validateAttach), - blkioWeightDevice: opts.NewWeightdeviceOpt(opts.ValidateWeightDevice), - capAdd: opts.NewListOpts(nil), - capDrop: opts.NewListOpts(nil), - dns: opts.NewListOpts(opts.ValidateIPAddress), - dnsOptions: opts.NewListOpts(nil), - dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), - deviceCgroupRules: opts.NewListOpts(validateDeviceCgroupRule), - deviceReadBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice), - deviceReadIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice), - deviceWriteBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice), - deviceWriteIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice), - devices: opts.NewListOpts(validateDevice), - env: opts.NewListOpts(opts.ValidateEnv), - envFile: opts.NewListOpts(nil), - expose: opts.NewListOpts(nil), - extraHosts: opts.NewListOpts(opts.ValidateExtraHost), - groupAdd: opts.NewListOpts(nil), - labels: opts.NewListOpts(opts.ValidateEnv), - labelsFile: opts.NewListOpts(nil), - linkLocalIPs: opts.NewListOpts(nil), - links: opts.NewListOpts(opts.ValidateLink), - loggingOpts: opts.NewListOpts(nil), - publish: opts.NewListOpts(nil), - securityOpt: opts.NewListOpts(nil), - storageOpt: opts.NewListOpts(nil), - sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), - tmpfs: opts.NewListOpts(nil), - ulimits: opts.NewUlimitOpt(nil), - volumes: opts.NewListOpts(nil), - volumesFrom: opts.NewListOpts(nil), - } - - // General purpose flags - flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") - flags.Var(&copts.deviceCgroupRules, "device-cgroup-rule", "Add a rule to the cgroup allowed devices list") - flags.Var(&copts.devices, "device", "Add a host device to the container") - flags.VarP(&copts.env, "env", "e", "Set environment variables") - flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables") - flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") - flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join") - flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name") - flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached") - flags.VarP(&copts.labels, "label", "l", "Set meta data on a container") - flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels") - flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") - flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits") - flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, "Signal to stop a container") - flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container") - flags.SetAnnotation("stop-timeout", "version", []string{"1.25"}) - flags.Var(copts.sysctls, "sysctl", "Sysctl options") - flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY") - flags.Var(copts.ulimits, "ulimit", "Ulimit options") - flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: [:])") - flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container") - flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits") - - // Security - flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities") - flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities") - flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container") - flags.Var(&copts.securityOpt, "security-opt", "Security Options") - flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use") - - // Network and port publishing flag - flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") - flags.Var(&copts.dns, "dns", "Set custom DNS servers") - // We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way. - // This is to be consistent with service create/update - flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options") - flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options") - flags.MarkHidden("dns-opt") - flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains") - flags.Var(&copts.expose, "expose", "Expose a port or a range of ports") - flags.StringVar(&copts.ipv4Address, "ip", "", "IPv4 address (e.g., 172.30.100.104)") - flags.StringVar(&copts.ipv6Address, "ip6", "", "IPv6 address (e.g., 2001:db8::33)") - flags.Var(&copts.links, "link", "Add link to another container") - flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") - flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g., 92:d0:c6:0a:29:33)") - flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host") - flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") - // We allow for both "--net" and "--network", although the latter is the recommended way. - flags.StringVar(&copts.netMode, "net", "default", "Connect a container to a network") - flags.StringVar(&copts.netMode, "network", "default", "Connect a container to a network") - flags.MarkHidden("net") - // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. - flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container") - flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container") - flags.MarkHidden("net-alias") - - // Logging and storage - flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container") - flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container") - flags.Var(&copts.loggingOpts, "log-opt", "Log driver options") - flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container") - flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory") - flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)") - flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume") - flags.Var(&copts.mounts, "mount", "Attach a filesystem mount to the container") - - // Health-checking - flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health") - flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ns|us|ms|s|m|h) (default 0s)") - flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") - flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s)") - flags.DurationVar(&copts.healthStartPeriod, "health-start-period", 0, "Start period for the container to initialize before starting health-retries countdown (ns|us|ms|s|m|h) (default 0s)") - flags.SetAnnotation("health-start-period", "version", []string{"1.29"}) - flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") - - // Resource management - flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") - flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") - flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file") - flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)") - flags.SetAnnotation("cpu-count", "ostype", []string{"windows"}) - flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)") - flags.SetAnnotation("cpu-percent", "ostype", []string{"windows"}) - flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds") - flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"}) - flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds") - flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"}) - flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.Var(&copts.cpus, "cpus", "Number of CPUs") - flags.SetAnnotation("cpus", "version", []string{"1.25"}) - flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") - flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") - flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") - flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") - flags.Var(&copts.ioMaxBandwidth, "io-maxbandwidth", "Maximum IO bandwidth limit for the system drive (Windows only)") - flags.SetAnnotation("io-maxbandwidth", "ostype", []string{"windows"}) - flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") - flags.SetAnnotation("io-maxiops", "ostype", []string{"windows"}) - flags.Var(&copts.kernelMemory, "kernel-memory", "Kernel memory limit") - flags.VarP(&copts.memory, "memory", "m", "Memory limit") - flags.Var(&copts.memoryReservation, "memory-reservation", "Memory soft limit") - flags.Var(&copts.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") - flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") - flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") - flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") - - // Low-level execution (cgroups, namespaces, ...) - flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.StringVar(&copts.ipcMode, "ipc", "", "IPC namespace to use") - flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology") - flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use") - flags.Var(&copts.shmSize, "shm-size", "Size of /dev/shm") - flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use") - flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container") - - flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes") - flags.SetAnnotation("init", "version", []string{"1.25"}) - return copts -} - -type containerConfig struct { - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *networktypes.NetworkingConfig -} - -// parse parses the args for the specified command and generates a Config, -// a HostConfig and returns them with the specified command. -// If the specified args are not valid, it will return an error. -func parse(flags *pflag.FlagSet, copts *containerOptions) (*containerConfig, error) { - var ( - attachStdin = copts.attach.Get("stdin") - attachStdout = copts.attach.Get("stdout") - attachStderr = copts.attach.Get("stderr") - ) - - // Validate the input mac address - if copts.macAddress != "" { - if _, err := opts.ValidateMACAddress(copts.macAddress); err != nil { - return nil, errors.Errorf("%s is not a valid mac address", copts.macAddress) - } - } - if copts.stdin { - attachStdin = true - } - // If -a is not set, attach to stdout and stderr - if copts.attach.Len() == 0 { - attachStdout = true - attachStderr = true - } - - var err error - - swappiness := copts.swappiness - if swappiness != -1 && (swappiness < 0 || swappiness > 100) { - return nil, errors.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) - } - - mounts := copts.mounts.Value() - if len(mounts) > 0 && copts.volumeDriver != "" { - logrus.Warn("`--volume-driver` is ignored for volumes specified via `--mount`. Use `--mount type=volume,volume-driver=...` instead.") - } - var binds []string - volumes := copts.volumes.GetMap() - // add any bind targets to the list of container volumes - for bind := range copts.volumes.GetMap() { - if arr := volumeSplitN(bind, 2); len(arr) > 1 { - // after creating the bind mount we want to delete it from the copts.volumes values because - // we do not want bind mounts being committed to image configs - binds = append(binds, bind) - // We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if - // there are duplicates entries. - delete(volumes, bind) - } - } - - // Can't evaluate options passed into --tmpfs until we actually mount - tmpfs := make(map[string]string) - for _, t := range copts.tmpfs.GetAll() { - if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { - tmpfs[arr[0]] = arr[1] - } else { - tmpfs[arr[0]] = "" - } - } - - var ( - runCmd strslice.StrSlice - entrypoint strslice.StrSlice - ) - - if len(copts.Args) > 0 { - runCmd = strslice.StrSlice(copts.Args) - } - - if copts.entrypoint != "" { - entrypoint = strslice.StrSlice{copts.entrypoint} - } else if flags.Changed("entrypoint") { - // if `--entrypoint=` is parsed then Entrypoint is reset - entrypoint = []string{""} - } - - ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll()) - if err != nil { - return nil, err - } - - // Merge in exposed ports to the map of published ports - for _, e := range copts.expose.GetAll() { - if strings.Contains(e, ":") { - return nil, errors.Errorf("invalid port format for --expose: %s", e) - } - //support two formats for expose, original format /[] or /[] - proto, port := nat.SplitProtoPort(e) - //parse the start and end port and create a sequence of ports to expose - //if expose a port, the start and end port are the same - start, end, err := nat.ParsePortRange(port) - if err != nil { - return nil, errors.Errorf("invalid range format for --expose: %s, error: %s", e, err) - } - for i := start; i <= end; i++ { - p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) - if err != nil { - return nil, err - } - if _, exists := ports[p]; !exists { - ports[p] = struct{}{} - } - } - } - - // parse device mappings - deviceMappings := []container.DeviceMapping{} - for _, device := range copts.devices.GetAll() { - deviceMapping, err := parseDevice(device) - if err != nil { - return nil, err - } - deviceMappings = append(deviceMappings, deviceMapping) - } - - // collect all the environment variables for the container - envVariables, err := runconfigopts.ReadKVStrings(copts.envFile.GetAll(), copts.env.GetAll()) - if err != nil { - return nil, err - } - - // collect all the labels for the container - labels, err := runconfigopts.ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll()) - if err != nil { - return nil, err - } - - ipcMode := container.IpcMode(copts.ipcMode) - if !ipcMode.Valid() { - return nil, errors.Errorf("--ipc: invalid IPC mode") - } - - pidMode := container.PidMode(copts.pidMode) - if !pidMode.Valid() { - return nil, errors.Errorf("--pid: invalid PID mode") - } - - utsMode := container.UTSMode(copts.utsMode) - if !utsMode.Valid() { - return nil, errors.Errorf("--uts: invalid UTS mode") - } - - usernsMode := container.UsernsMode(copts.usernsMode) - if !usernsMode.Valid() { - return nil, errors.Errorf("--userns: invalid USER mode") - } - - restartPolicy, err := runconfigopts.ParseRestartPolicy(copts.restartPolicy) - if err != nil { - return nil, err - } - - loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll()) - if err != nil { - return nil, err - } - - securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll()) - if err != nil { - return nil, err - } - - storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll()) - if err != nil { - return nil, err - } - - // Healthcheck - var healthConfig *container.HealthConfig - haveHealthSettings := copts.healthCmd != "" || - copts.healthInterval != 0 || - copts.healthTimeout != 0 || - copts.healthStartPeriod != 0 || - copts.healthRetries != 0 - if copts.noHealthcheck { - if haveHealthSettings { - return nil, errors.Errorf("--no-healthcheck conflicts with --health-* options") - } - test := strslice.StrSlice{"NONE"} - healthConfig = &container.HealthConfig{Test: test} - } else if haveHealthSettings { - var probe strslice.StrSlice - if copts.healthCmd != "" { - args := []string{"CMD-SHELL", copts.healthCmd} - probe = strslice.StrSlice(args) - } - if copts.healthInterval < 0 { - return nil, errors.Errorf("--health-interval cannot be negative") - } - if copts.healthTimeout < 0 { - return nil, errors.Errorf("--health-timeout cannot be negative") - } - if copts.healthRetries < 0 { - return nil, errors.Errorf("--health-retries cannot be negative") - } - if copts.healthStartPeriod < 0 { - return nil, fmt.Errorf("--health-start-period cannot be negative") - } - - healthConfig = &container.HealthConfig{ - Test: probe, - Interval: copts.healthInterval, - Timeout: copts.healthTimeout, - StartPeriod: copts.healthStartPeriod, - Retries: copts.healthRetries, - } - } - - resources := container.Resources{ - CgroupParent: copts.cgroupParent, - Memory: copts.memory.Value(), - MemoryReservation: copts.memoryReservation.Value(), - MemorySwap: copts.memorySwap.Value(), - MemorySwappiness: &copts.swappiness, - KernelMemory: copts.kernelMemory.Value(), - OomKillDisable: &copts.oomKillDisable, - NanoCPUs: copts.cpus.Value(), - CPUCount: copts.cpuCount, - CPUPercent: copts.cpuPercent, - CPUShares: copts.cpuShares, - CPUPeriod: copts.cpuPeriod, - CpusetCpus: copts.cpusetCpus, - CpusetMems: copts.cpusetMems, - CPUQuota: copts.cpuQuota, - CPURealtimePeriod: copts.cpuRealtimePeriod, - CPURealtimeRuntime: copts.cpuRealtimeRuntime, - PidsLimit: copts.pidsLimit, - BlkioWeight: copts.blkioWeight, - BlkioWeightDevice: copts.blkioWeightDevice.GetList(), - BlkioDeviceReadBps: copts.deviceReadBps.GetList(), - BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(), - BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(), - BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(), - IOMaximumIOps: copts.ioMaxIOps, - IOMaximumBandwidth: uint64(copts.ioMaxBandwidth), - Ulimits: copts.ulimits.GetList(), - DeviceCgroupRules: copts.deviceCgroupRules.GetAll(), - Devices: deviceMappings, - } - - config := &container.Config{ - Hostname: copts.hostname, - ExposedPorts: ports, - User: copts.user, - Tty: copts.tty, - // TODO: deprecated, it comes from -n, --networking - // it's still needed internally to set the network to disabled - // if e.g. bridge is none in daemon opts, and in inspect - NetworkDisabled: false, - OpenStdin: copts.stdin, - AttachStdin: attachStdin, - AttachStdout: attachStdout, - AttachStderr: attachStderr, - Env: envVariables, - Cmd: runCmd, - Image: copts.Image, - Volumes: volumes, - MacAddress: copts.macAddress, - Entrypoint: entrypoint, - WorkingDir: copts.workingDir, - Labels: runconfigopts.ConvertKVStringsToMap(labels), - Healthcheck: healthConfig, - } - if flags.Changed("stop-signal") { - config.StopSignal = copts.stopSignal - } - if flags.Changed("stop-timeout") { - config.StopTimeout = &copts.stopTimeout - } - - hostConfig := &container.HostConfig{ - Binds: binds, - ContainerIDFile: copts.containerIDFile, - OomScoreAdj: copts.oomScoreAdj, - AutoRemove: copts.autoRemove, - Privileged: copts.privileged, - PortBindings: portBindings, - Links: copts.links.GetAll(), - PublishAllPorts: copts.publishAll, - // Make sure the dns fields are never nil. - // New containers don't ever have those fields nil, - // but pre created containers can still have those nil values. - // See https://github.com/docker/docker/pull/17779 - // for a more detailed explanation on why we don't want that. - DNS: copts.dns.GetAllOrEmpty(), - DNSSearch: copts.dnsSearch.GetAllOrEmpty(), - DNSOptions: copts.dnsOptions.GetAllOrEmpty(), - ExtraHosts: copts.extraHosts.GetAll(), - VolumesFrom: copts.volumesFrom.GetAll(), - NetworkMode: container.NetworkMode(copts.netMode), - IpcMode: ipcMode, - PidMode: pidMode, - UTSMode: utsMode, - UsernsMode: usernsMode, - CapAdd: strslice.StrSlice(copts.capAdd.GetAll()), - CapDrop: strslice.StrSlice(copts.capDrop.GetAll()), - GroupAdd: copts.groupAdd.GetAll(), - RestartPolicy: restartPolicy, - SecurityOpt: securityOpts, - StorageOpt: storageOpts, - ReadonlyRootfs: copts.readonlyRootfs, - LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts}, - VolumeDriver: copts.volumeDriver, - Isolation: container.Isolation(copts.isolation), - ShmSize: copts.shmSize.Value(), - Resources: resources, - Tmpfs: tmpfs, - Sysctls: copts.sysctls.GetAll(), - Runtime: copts.runtime, - Mounts: mounts, - } - - if copts.autoRemove && !hostConfig.RestartPolicy.IsNone() { - return nil, errors.Errorf("Conflicting options: --restart and --rm") - } - - // only set this value if the user provided the flag, else it should default to nil - if flags.Changed("init") { - hostConfig.Init = &copts.init - } - - // When allocating stdin in attached mode, close stdin at client disconnect - if config.OpenStdin && config.AttachStdin { - config.StdinOnce = true - } - - networkingConfig := &networktypes.NetworkingConfig{ - EndpointsConfig: make(map[string]*networktypes.EndpointSettings), - } - - if copts.ipv4Address != "" || copts.ipv6Address != "" || copts.linkLocalIPs.Len() > 0 { - epConfig := &networktypes.EndpointSettings{} - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - - epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ - IPv4Address: copts.ipv4Address, - IPv6Address: copts.ipv6Address, - } - - if copts.linkLocalIPs.Len() > 0 { - epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len()) - copy(epConfig.IPAMConfig.LinkLocalIPs, copts.linkLocalIPs.GetAll()) - } - } - - if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { - epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] - if epConfig == nil { - epConfig = &networktypes.EndpointSettings{} - } - epConfig.Links = make([]string, len(hostConfig.Links)) - copy(epConfig.Links, hostConfig.Links) - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - } - - if copts.aliases.Len() > 0 { - epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] - if epConfig == nil { - epConfig = &networktypes.EndpointSettings{} - } - epConfig.Aliases = make([]string, copts.aliases.Len()) - copy(epConfig.Aliases, copts.aliases.GetAll()) - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - } - - return &containerConfig{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - }, nil -} - -func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { - loggingOptsMap := runconfigopts.ConvertKVStringsToMap(loggingOpts) - if loggingDriver == "none" && len(loggingOpts) > 0 { - return map[string]string{}, errors.Errorf("invalid logging opts for driver %s", loggingDriver) - } - return loggingOptsMap, nil -} - -// takes a local seccomp daemon, reads the file contents for sending to the daemon -func parseSecurityOpts(securityOpts []string) ([]string, error) { - for key, opt := range securityOpts { - con := strings.SplitN(opt, "=", 2) - if len(con) == 1 && con[0] != "no-new-privileges" { - if strings.Contains(opt, ":") { - con = strings.SplitN(opt, ":", 2) - } else { - return securityOpts, errors.Errorf("Invalid --security-opt: %q", opt) - } - } - if con[0] == "seccomp" && con[1] != "unconfined" { - f, err := ioutil.ReadFile(con[1]) - if err != nil { - return securityOpts, errors.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) - } - b := bytes.NewBuffer(nil) - if err := json.Compact(b, f); err != nil { - return securityOpts, errors.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) - } - securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) - } - } - - return securityOpts, nil -} - -// parses storage options per container into a map -func parseStorageOpts(storageOpts []string) (map[string]string, error) { - m := make(map[string]string) - for _, option := range storageOpts { - if strings.Contains(option, "=") { - opt := strings.SplitN(option, "=", 2) - m[opt[0]] = opt[1] - } else { - return nil, errors.Errorf("invalid storage option") - } - } - return m, nil -} - -// parseDevice parses a device mapping string to a container.DeviceMapping struct -func parseDevice(device string) (container.DeviceMapping, error) { - src := "" - dst := "" - permissions := "rwm" - arr := strings.Split(device, ":") - switch len(arr) { - case 3: - permissions = arr[2] - fallthrough - case 2: - if validDeviceMode(arr[1]) { - permissions = arr[1] - } else { - dst = arr[1] - } - fallthrough - case 1: - src = arr[0] - default: - return container.DeviceMapping{}, errors.Errorf("invalid device specification: %s", device) - } - - if dst == "" { - dst = src - } - - deviceMapping := container.DeviceMapping{ - PathOnHost: src, - PathInContainer: dst, - CgroupPermissions: permissions, - } - return deviceMapping, nil -} - -// validateDeviceCgroupRule validates a device cgroup rule string format -// It will make sure 'val' is in the form: -// 'type major:minor mode' -func validateDeviceCgroupRule(val string) (string, error) { - if deviceCgroupRuleRegexp.MatchString(val) { - return val, nil - } - - return val, errors.Errorf("invalid device cgroup format '%s'", val) -} - -// validDeviceMode checks if the mode for device is valid or not. -// Valid mode is a composition of r (read), w (write), and m (mknod). -func validDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, - } - if mode == "" { - return false - } - for _, c := range mode { - if !legalDeviceMode[c] { - return false - } - legalDeviceMode[c] = false - } - return true -} - -// validateDevice validates a path for devices -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:mode] -// It also validates the device mode. -func validateDevice(val string) (string, error) { - return validatePath(val, validDeviceMode) -} - -func validatePath(val string, validator func(string) bool) (string, error) { - var containerPath string - var mode string - - if strings.Count(val, ":") > 2 { - return val, errors.Errorf("bad format for path: %s", val) - } - - split := strings.SplitN(val, ":", 3) - if split[0] == "" { - return val, errors.Errorf("bad format for path: %s", val) - } - switch len(split) { - case 1: - containerPath = split[0] - val = path.Clean(containerPath) - case 2: - if isValid := validator(split[1]); isValid { - containerPath = split[0] - mode = split[1] - val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) - } else { - containerPath = split[1] - val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) - } - case 3: - containerPath = split[1] - mode = split[2] - if isValid := validator(split[2]); !isValid { - return val, errors.Errorf("bad mode specified: %s", mode) - } - val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) - } - - if !path.IsAbs(containerPath) { - return val, errors.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -// volumeSplitN splits raw into a maximum of n parts, separated by a separator colon. -// A separator colon is the last `:` character in the regex `[:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). -// In Windows driver letter appears in two situations: -// a. `^[a-zA-Z]:` (A colon followed by `^[a-zA-Z]:` is OK as colon is the separator in volume option) -// b. A string in the format like `\\?\C:\Windows\...` (UNC). -// Therefore, a driver letter can only follow either a `:` or `\\` -// This allows to correctly split strings such as `C:\foo:D:\:rw` or `/tmp/q:/foo`. -func volumeSplitN(raw string, n int) []string { - var array []string - if len(raw) == 0 || raw[0] == ':' { - // invalid - return nil - } - // numberOfParts counts the number of parts separated by a separator colon - numberOfParts := 0 - // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. - left := 0 - // right represents the right-most cursor in raw incremented with the loop. Note this - // starts at index 1 as index 0 is already handle above as a special case. - for right := 1; right < len(raw); right++ { - // stop parsing if reached maximum number of parts - if n >= 0 && numberOfParts >= n { - break - } - if raw[right] != ':' { - continue - } - potentialDriveLetter := raw[right-1] - if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { - if right > 1 { - beforePotentialDriveLetter := raw[right-2] - // Only `:` or `\\` are checked (`/` could fall into the case of `/tmp/q:/foo`) - if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '\\' { - // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. - } - // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. - } else { - // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - } - // need to take care of the last part - if left < len(raw) { - if n >= 0 && numberOfParts >= n { - // if the maximum number of parts is reached, just append the rest to the last part - // left-1 is at the last `:` that needs to be included since not considered a separator. - array[n-1] += raw[left-1:] - } else { - array = append(array, raw[left:]) - } - } - return array -} - -// validateAttach validates that the specified string is a valid attach option. -func validateAttach(val string) (string, error) { - s := strings.ToLower(val) - for _, str := range []string{"stdin", "stdout", "stderr"} { - if s == str { - return s, nil - } - } - return val, errors.Errorf("valid streams are STDIN, STDOUT and STDERR") -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/opts_test.go b/fn/vendor/github.com/docker/docker/cli/command/container/opts_test.go deleted file mode 100644 index 575b214ed..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/opts_test.go +++ /dev/null @@ -1,869 +0,0 @@ -package container - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/runconfig" - "github.com/docker/go-connections/nat" - "github.com/pkg/errors" - "github.com/spf13/pflag" -) - -func TestValidateAttach(t *testing.T) { - valid := []string{ - "stdin", - "stdout", - "stderr", - "STDIN", - "STDOUT", - "STDERR", - } - if _, err := validateAttach("invalid"); err == nil { - t.Fatal("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") - } - - for _, attach := range valid { - value, err := validateAttach(attach) - if err != nil { - t.Fatal(err) - } - if value != strings.ToLower(attach) { - t.Fatalf("Expected [%v], got [%v]", attach, value) - } - } -} - -func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - flags := pflag.NewFlagSet("run", pflag.ContinueOnError) - flags.SetOutput(ioutil.Discard) - flags.Usage = nil - copts := addFlags(flags) - if err := flags.Parse(args); err != nil { - return nil, nil, nil, err - } - // TODO: fix tests to accept ContainerConfig - containerConfig, err := parse(flags, copts) - if err != nil { - return nil, nil, nil, err - } - return containerConfig.Config, containerConfig.HostConfig, containerConfig.NetworkingConfig, err -} - -func parsetest(t *testing.T, args string) (*container.Config, *container.HostConfig, error) { - config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) - return config, hostConfig, err -} - -func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) { - config, hostConfig, err := parsetest(t, args) - if err != nil { - t.Fatal(err) - } - return config, hostConfig -} - -func TestParseRunLinks(t *testing.T) { - if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { - t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) - } -} - -func TestParseRunAttach(t *testing.T) { - if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - - if _, _, err := parsetest(t, "-a"); err == nil { - t.Fatal("Error parsing attach flags, `-a` should be an error but is not") - } - if _, _, err := parsetest(t, "-a invalid"); err == nil { - t.Fatal("Error parsing attach flags, `-a invalid` should be an error but is not") - } - if _, _, err := parsetest(t, "-a invalid -a stdout"); err == nil { - t.Fatal("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") - } - if _, _, err := parsetest(t, "-a stdout -a stderr -d"); err == nil { - t.Fatal("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") - } - if _, _, err := parsetest(t, "-a stdin -d"); err == nil { - t.Fatal("Error parsing attach flags, `-a stdin -d` should be an error but is not") - } - if _, _, err := parsetest(t, "-a stdout -d"); err == nil { - t.Fatal("Error parsing attach flags, `-a stdout -d` should be an error but is not") - } - if _, _, err := parsetest(t, "-a stderr -d"); err == nil { - t.Fatal("Error parsing attach flags, `-a stderr -d` should be an error but is not") - } - if _, _, err := parsetest(t, "-d --rm"); err == nil { - t.Fatal("Error parsing attach flags, `-d --rm` should be an error but is not") - } -} - -func TestParseRunVolumes(t *testing.T) { - - // A single volume - arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) - } else if _, exists := config.Volumes[arr[0]]; !exists { - t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) - } - - // Two volumes - arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) - } else if _, exists := config.Volumes[arr[0]]; !exists { - t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) - } else if _, exists := config.Volumes[arr[1]]; !exists { - t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) - } - - // A single bind-mount - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { - t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) - } - - // Two bind-mounts. - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - // Two bind-mounts, first read-only, second read-write. - // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - // Similar to previous test but with alternate modes which are only supported by Linux - if runtime.GOOS != "windows" { - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - } - - // One bind mount and one volume - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { - t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) - } else if _, exists := config.Volumes[arr[1]]; !exists { - t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) - } - - // Root to non-c: drive letter (Windows specific) - if runtime.GOOS == "windows" { - arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { - t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) - } - } - -} - -// setupPlatformVolume takes two arrays of volume specs - a Unix style -// spec and a Windows style spec. Depending on the platform being unit tested, -// it returns one of them, along with a volume string that would be passed -// on the docker CLI (e.g. -v /bar -v /foo). -func setupPlatformVolume(u []string, w []string) ([]string, string) { - var a []string - if runtime.GOOS == "windows" { - a = w - } else { - a = u - } - s := "" - for _, v := range a { - s = s + "-v " + v + " " - } - return a, s -} - -// check if (a == c && b == d) || (a == d && b == c) -// because maps are randomized -func compareRandomizedStrings(a, b, c, d string) error { - if a == c && b == d { - return nil - } - if a == d && b == c { - return nil - } - return errors.Errorf("strings don't match") -} - -// Simple parse with MacAddress validation -func TestParseWithMacAddress(t *testing.T) { - invalidMacAddress := "--mac-address=invalidMacAddress" - validMacAddress := "--mac-address=92:d0:c6:0a:29:33" - if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { - t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) - } - if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { - t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) - } -} - -func TestParseWithMemory(t *testing.T) { - invalidMemory := "--memory=invalid" - _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}) - assert.Error(t, err, invalidMemory) - - _, hostconfig := mustParse(t, "--memory=1G") - assert.Equal(t, hostconfig.Memory, int64(1073741824)) -} - -func TestParseWithMemorySwap(t *testing.T) { - invalidMemory := "--memory-swap=invalid" - - _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}) - assert.Error(t, err, invalidMemory) - - _, hostconfig := mustParse(t, "--memory-swap=1G") - assert.Equal(t, hostconfig.MemorySwap, int64(1073741824)) - - _, hostconfig = mustParse(t, "--memory-swap=-1") - assert.Equal(t, hostconfig.MemorySwap, int64(-1)) -} - -func TestParseHostname(t *testing.T) { - validHostnames := map[string]string{ - "hostname": "hostname", - "host-name": "host-name", - "hostname123": "hostname123", - "123hostname": "123hostname", - "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", - } - hostnameWithDomain := "--hostname=hostname.domainname" - hostnameWithDomainTld := "--hostname=hostname.domainname.tld" - for hostname, expectedHostname := range validHostnames { - if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname { - t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) - } - } - if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" && config.Domainname != "" { - t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got '%v'", config.Hostname) - } - if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" && config.Domainname != "" { - t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got '%v'", config.Hostname) - } -} - -func TestParseWithExpose(t *testing.T) { - invalids := map[string]string{ - ":": "invalid port format for --expose: :", - "8080:9090": "invalid port format for --expose: 8080:9090", - "/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.", - "/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.", - "NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, - } - valids := map[string][]nat.Port{ - "8080/tcp": {"8080/tcp"}, - "8080/udp": {"8080/udp"}, - "8080/ncp": {"8080/ncp"}, - "8080-8080/udp": {"8080/udp"}, - "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, - } - for expose, expectedError := range invalids { - if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { - t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) - } - } - for expose, exposedPorts := range valids { - config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.ExposedPorts) != len(exposedPorts) { - t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) - } - for _, port := range exposedPorts { - if _, ok := config.ExposedPorts[port]; !ok { - t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) - } - } - } - // Merge with actual published port - config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.ExposedPorts) != 2 { - t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) - } - ports := []nat.Port{"80/tcp", "81/tcp"} - for _, port := range ports { - if _, ok := config.ExposedPorts[port]; !ok { - t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) - } - } -} - -func TestParseDevice(t *testing.T) { - valids := map[string]container.DeviceMapping{ - "/dev/snd": { - PathOnHost: "/dev/snd", - PathInContainer: "/dev/snd", - CgroupPermissions: "rwm", - }, - "/dev/snd:rw": { - PathOnHost: "/dev/snd", - PathInContainer: "/dev/snd", - CgroupPermissions: "rw", - }, - "/dev/snd:/something": { - PathOnHost: "/dev/snd", - PathInContainer: "/something", - CgroupPermissions: "rwm", - }, - "/dev/snd:/something:rw": { - PathOnHost: "/dev/snd", - PathInContainer: "/something", - CgroupPermissions: "rw", - }, - } - for device, deviceMapping := range valids { - _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(hostconfig.Devices) != 1 { - t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) - } - if hostconfig.Devices[0] != deviceMapping { - t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) - } - } - -} - -func TestParseModes(t *testing.T) { - // ipc ko - if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { - t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) - } - // ipc ok - _, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.IpcMode.Valid() { - t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) - } - // pid ko - if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { - t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) - } - // pid ok - _, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.PidMode.Valid() { - t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) - } - // uts ko - if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { - t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) - } - // uts ok - _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.UTSMode.Valid() { - t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) - } - // shm-size ko - expectedErr := `invalid argument "a128m" for --shm-size=a128m: invalid size: 'a128m'` - if _, _, _, err = parseRun([]string{"--shm-size=a128m", "img", "cmd"}); err == nil || err.Error() != expectedErr { - t.Fatalf("Expected an error with message '%v', got %v", expectedErr, err) - } - // shm-size ok - _, hostconfig, _, err = parseRun([]string{"--shm-size=128m", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.ShmSize != 134217728 { - t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) - } -} - -func TestParseRestartPolicy(t *testing.T) { - invalids := map[string]string{ - "always:2:3": "invalid restart policy format", - "on-failure:invalid": "maximum retry count must be an integer", - } - valids := map[string]container.RestartPolicy{ - "": {}, - "always": { - Name: "always", - MaximumRetryCount: 0, - }, - "on-failure:1": { - Name: "on-failure", - MaximumRetryCount: 1, - }, - } - for restart, expectedError := range invalids { - if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) - } - } - for restart, expected := range valids { - _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.RestartPolicy != expected { - t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) - } - } -} - -func TestParseRestartPolicyAutoRemove(t *testing.T) { - expected := "Conflicting options: --restart and --rm" - _, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"}) - if err == nil || err.Error() != expected { - t.Fatalf("Expected error %v, but got none", expected) - } -} - -func TestParseHealth(t *testing.T) { - checkOk := func(args ...string) *container.HealthConfig { - config, _, _, err := parseRun(args) - if err != nil { - t.Fatalf("%#v: %v", args, err) - } - return config.Healthcheck - } - checkError := func(expected string, args ...string) { - config, _, _, err := parseRun(args) - if err == nil { - t.Fatalf("Expected error, but got %#v", config) - } - if err.Error() != expected { - t.Fatalf("Expected %#v, got %#v", expected, err) - } - } - health := checkOk("--no-healthcheck", "img", "cmd") - if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" { - t.Fatalf("--no-healthcheck failed: %#v", health) - } - - health = checkOk("--health-cmd=/check.sh -q", "img", "cmd") - if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" { - t.Fatalf("--health-cmd: got %#v", health.Test) - } - if health.Timeout != 0 { - t.Fatalf("--health-cmd: timeout = %s", health.Timeout) - } - - checkError("--no-healthcheck conflicts with --health-* options", - "--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd") - - health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "--health-start-period=5s", "img", "cmd") - if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond || health.StartPeriod != 5*time.Second { - t.Fatalf("--health-*: got %#v", health) - } -} - -func TestParseLoggingOpts(t *testing.T) { - // logging opts ko - if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" { - t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err) - } - // logging opts ok - _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { - t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) - } -} - -func TestParseEnvfileVariables(t *testing.T) { - e := "open nonexistent: no such file or directory" - if runtime.GOOS == "windows" { - e = "open nonexistent: The system cannot find the file specified." - } - // env ko - if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // env ok - config, _, _, err := parseRun([]string{"--env-file=testdata/valid.env", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { - t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env) - } - config, _, _, err = parseRun([]string{"--env-file=testdata/valid.env", "--env=ENV2=value2", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { - t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env) - } -} - -func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) { - // UTF8 with BOM - config, _, _, err := parseRun([]string{"--env-file=testdata/utf8.env", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - env := []string{"FOO=BAR", "HELLO=" + string([]byte{0xe6, 0x82, 0xa8, 0xe5, 0xa5, 0xbd}), "BAR=FOO"} - if len(config.Env) != len(env) { - t.Fatalf("Expected a config with %d env variables, got %v: %v", len(env), len(config.Env), config.Env) - } - for i, v := range env { - if config.Env[i] != v { - t.Fatalf("Expected a config with [%s], got %v", v, []byte(config.Env[i])) - } - } - - // UTF16 with BOM - e := "contains invalid utf8 bytes at line" - if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // UTF16BE with BOM - if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16be.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } -} - -func TestParseLabelfileVariables(t *testing.T) { - e := "open nonexistent: no such file or directory" - if runtime.GOOS == "windows" { - e = "open nonexistent: The system cannot find the file specified." - } - // label ko - if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // label ok - config, _, _, err := parseRun([]string{"--label-file=testdata/valid.label", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { - t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels) - } - config, _, _, err = parseRun([]string{"--label-file=testdata/valid.label", "--label=LABEL2=value2", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { - t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) - } -} - -func TestParseEntryPoint(t *testing.T) { - config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) - if err != nil { - t.Fatal(err) - } - if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" { - t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) - } -} - -// This tests the cases for binds which are generated through -// DecodeContainerConfig rather than Parse() -func TestDecodeContainerConfigVolumes(t *testing.T) { - - // Root to root - bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // No destination path - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // // No destination path or mode - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // A whole lot of nothing - bindsOrVols = []string{`:`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // A whole lot of nothing with no mode - bindsOrVols = []string{`::`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // Too much including an invalid mode - wTmp := os.Getenv("TEMP") - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // Windows specific error tests - if runtime.GOOS == "windows" { - // Volume which does not include a drive letter - bindsOrVols = []string{`\tmp`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // Root to C-Drive - bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // Container path that does not include a drive letter - bindsOrVols = []string{`c:\windows:\somewhere`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - } - - // Linux-specific error tests - if runtime.GOOS != "windows" { - // Just root - bindsOrVols = []string{`/`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // A single volume that looks like a bind mount passed in Volumes. - // This should be handled as a bind mount, not a volume. - vols := []string{`/foo:/bar`} - if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil { - t.Fatal("Volume /foo:/bar should have succeeded as a volume name") - } else if hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes[vols[0]]; !exists { - t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes) - } - - } -} - -// callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes -// to call DecodeContainerConfig. It effectively does what a client would -// do when calling the daemon by constructing a JSON stream of a -// ContainerConfigWrapper which is populated by the set of volume specs -// passed into it. It returns a config and a hostconfig which can be -// validated to ensure DecodeContainerConfig has manipulated the structures -// correctly. -func callDecodeContainerConfig(volumes []string, binds []string) (*container.Config, *container.HostConfig, error) { - var ( - b []byte - err error - c *container.Config - h *container.HostConfig - ) - w := runconfig.ContainerConfigWrapper{ - Config: &container.Config{ - Volumes: map[string]struct{}{}, - }, - HostConfig: &container.HostConfig{ - NetworkMode: "none", - Binds: binds, - }, - } - for _, v := range volumes { - w.Config.Volumes[v] = struct{}{} - } - if b, err = json.Marshal(w); err != nil { - return nil, nil, errors.Errorf("Error on marshal %s", err.Error()) - } - c, h, _, err = runconfig.DecodeContainerConfig(bytes.NewReader(b)) - if err != nil { - return nil, nil, errors.Errorf("Error parsing %s: %v", string(b), err) - } - if c == nil || h == nil { - return nil, nil, errors.Errorf("Empty config or hostconfig") - } - - return c, h, err -} - -func TestVolumeSplitN(t *testing.T) { - for _, x := range []struct { - input string - n int - expected []string - }{ - {`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}}, - {`:C:\foo:d:`, -1, nil}, - {`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}}, - {`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}}, - {`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}}, - - {`d:\`, -1, []string{`d:\`}}, - {`d:`, -1, []string{`d:`}}, - {`d:\path`, -1, []string{`d:\path`}}, - {`d:\path with space`, -1, []string{`d:\path with space`}}, - {`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}}, - {`c:\:d:\`, -1, []string{`c:\`, `d:\`}}, - {`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}}, - {`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}}, - {`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}}, - {`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}}, - {`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}}, - {`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}}, - {`name:D:`, -1, []string{`name`, `D:`}}, - {`name:D::rW`, -1, []string{`name`, `D:`, `rW`}}, - {`name:D::RW`, -1, []string{`name`, `D:`, `RW`}}, - {`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}}, - {`c:\Windows`, -1, []string{`c:\Windows`}}, - {`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}}, - - {``, -1, nil}, - {`.`, -1, []string{`.`}}, - {`..\`, -1, []string{`..\`}}, - {`c:\:..\`, -1, []string{`c:\`, `..\`}}, - {`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}}, - - // Cover directories with one-character name - {`/tmp/x/y:/foo/x/y`, -1, []string{`/tmp/x/y`, `/foo/x/y`}}, - } { - res := volumeSplitN(x.input, x.n) - if len(res) < len(x.expected) { - t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) - } - for i, e := range res { - if e != x.expected[i] { - t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) - } - } - } -} - -func TestValidateDevice(t *testing.T) { - valid := []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:r", - "/hostPath:/containerPath:rw", - "/hostPath:/containerPath:mrw", - } - invalid := map[string]string{ - "": "bad format for path: ", - "./": "./ is not an absolute path", - "../": "../ is not an absolute path", - "/:../": "../ is not an absolute path", - "/:path": "path is not an absolute path", - ":": "bad format for path: :", - "/tmp:": " is not an absolute path", - ":test": "bad format for path: :test", - ":/test": "bad format for path: :/test", - "tmp:": " is not an absolute path", - ":test:": "bad format for path: :test:", - "::": "bad format for path: ::", - ":::": "bad format for path: :::", - "/tmp:::": "bad format for path: /tmp:::", - ":/tmp::": "bad format for path: :/tmp::", - "path:ro": "ro is not an absolute path", - "path:rr": "rr is not an absolute path", - "a:/b:ro": "bad mode specified: ro", - "a:/b:rr": "bad mode specified: rr", - } - - for _, path := range valid { - if _, err := validateDevice(path); err != nil { - t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := validateDevice(path); err == nil { - t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) - } else { - if err.Error() != expectedError { - t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) - } - } - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/pause.go b/fn/vendor/github.com/docker/docker/cli/command/container/pause.go deleted file mode 100644 index 095a0db2c..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/pause.go +++ /dev/null @@ -1,49 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type pauseOptions struct { - containers []string -} - -// NewPauseCommand creates a new cobra.Command for `docker pause` -func NewPauseCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts pauseOptions - - return &cobra.Command{ - Use: "pause CONTAINER [CONTAINER...]", - Short: "Pause all processes within one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runPause(dockerCli, &opts) - }, - } -} - -func runPause(dockerCli *command.DockerCli, opts *pauseOptions) error { - ctx := context.Background() - - var errs []string - errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerPause) - for _, container := range opts.containers { - if err := <-errChan; err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintln(dockerCli.Out(), container) - } - if len(errs) > 0 { - return errors.New(strings.Join(errs, "\n")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/port.go b/fn/vendor/github.com/docker/docker/cli/command/container/port.go deleted file mode 100644 index 2793f6bc6..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/port.go +++ /dev/null @@ -1,78 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/go-connections/nat" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type portOptions struct { - container string - - port string -} - -// NewPortCommand creates a new cobra.Command for `docker port` -func NewPortCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts portOptions - - cmd := &cobra.Command{ - Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]", - Short: "List port mappings or a specific mapping for the container", - Args: cli.RequiresRangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - if len(args) > 1 { - opts.port = args[1] - } - return runPort(dockerCli, &opts) - }, - } - return cmd -} - -func runPort(dockerCli *command.DockerCli, opts *portOptions) error { - ctx := context.Background() - - c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if opts.port != "" { - port := opts.port - proto := "tcp" - parts := strings.SplitN(port, "/", 2) - - if len(parts) == 2 && len(parts[1]) != 0 { - port = parts[0] - proto = parts[1] - } - natPort := port + "/" + proto - newP, err := nat.NewPort(proto, port) - if err != nil { - return err - } - if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { - for _, frontend := range frontends { - fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort) - } - return nil - } - return errors.Errorf("Error: No public port '%s' published for %s", natPort, opts.container) - } - - for from, frontends := range c.NetworkSettings.Ports { - for _, frontend := range frontends { - fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) - } - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/prune.go b/fn/vendor/github.com/docker/docker/cli/command/container/prune.go deleted file mode 100644 index cf12dc71f..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/prune.go +++ /dev/null @@ -1,78 +0,0 @@ -package container - -import ( - "fmt" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - units "github.com/docker/go-units" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type pruneOptions struct { - force bool - filter opts.FilterOpt -} - -// NewPruneCommand returns a new cobra prune command for containers -func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := pruneOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove all stopped containers", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - spaceReclaimed, output, err := runPrune(dockerCli, opts) - if err != nil { - return err - } - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) - return nil - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") - flags.Var(&opts.filter, "filter", "Provide filter values (e.g. 'until=')") - - return cmd -} - -const warning = `WARNING! This will remove all stopped containers. -Are you sure you want to continue?` - -func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { - pruneFilters := command.PruneFilters(dockerCli, opts.filter.Value()) - - if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { - return - } - - report, err := dockerCli.Client().ContainersPrune(context.Background(), pruneFilters) - if err != nil { - return - } - - if len(report.ContainersDeleted) > 0 { - output = "Deleted Containers:\n" - for _, id := range report.ContainersDeleted { - output += id + "\n" - } - spaceReclaimed = report.SpaceReclaimed - } - - return -} - -// RunPrune calls the Container Prune API -// This returns the amount of space reclaimed and a detailed output string -func RunPrune(dockerCli *command.DockerCli, filter opts.FilterOpt) (uint64, string, error) { - return runPrune(dockerCli, pruneOptions{force: true, filter: filter}) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/ps_test.go b/fn/vendor/github.com/docker/docker/cli/command/container/ps_test.go deleted file mode 100644 index 62b054527..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/ps_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package container - -import ( - "testing" - - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestBuildContainerListOptions(t *testing.T) { - filters := opts.NewFilterOpt() - assert.NilError(t, filters.Set("foo=bar")) - assert.NilError(t, filters.Set("baz=foo")) - - contexts := []struct { - psOpts *psOptions - expectedAll bool - expectedSize bool - expectedLimit int - expectedFilters map[string]string - }{ - { - psOpts: &psOptions{ - all: true, - size: true, - last: 5, - filter: filters, - }, - expectedAll: true, - expectedSize: true, - expectedLimit: 5, - expectedFilters: map[string]string{ - "foo": "bar", - "baz": "foo", - }, - }, - { - psOpts: &psOptions{ - all: true, - size: true, - last: -1, - nLatest: true, - }, - expectedAll: true, - expectedSize: true, - expectedLimit: 1, - expectedFilters: make(map[string]string), - }, - { - psOpts: &psOptions{ - all: true, - size: false, - last: 5, - filter: filters, - // With .Size, size should be true - format: "{{.Size}}", - }, - expectedAll: true, - expectedSize: true, - expectedLimit: 5, - expectedFilters: map[string]string{ - "foo": "bar", - "baz": "foo", - }, - }, - { - psOpts: &psOptions{ - all: true, - size: false, - last: 5, - filter: filters, - // With .Size, size should be true - format: "{{.Size}} {{.CreatedAt}} {{.Networks}}", - }, - expectedAll: true, - expectedSize: true, - expectedLimit: 5, - expectedFilters: map[string]string{ - "foo": "bar", - "baz": "foo", - }, - }, - { - psOpts: &psOptions{ - all: true, - size: false, - last: 5, - filter: filters, - // Without .Size, size should be false - format: "{{.CreatedAt}} {{.Networks}}", - }, - expectedAll: true, - expectedSize: false, - expectedLimit: 5, - expectedFilters: map[string]string{ - "foo": "bar", - "baz": "foo", - }, - }, - } - - for _, c := range contexts { - options, err := buildContainerListOptions(c.psOpts) - assert.NilError(t, err) - - assert.Equal(t, c.expectedAll, options.All) - assert.Equal(t, c.expectedSize, options.Size) - assert.Equal(t, c.expectedLimit, options.Limit) - assert.Equal(t, options.Filters.Len(), len(c.expectedFilters)) - - for k, v := range c.expectedFilters { - f := options.Filters - if !f.ExactMatch(k, v) { - t.Fatalf("Expected filter with key %s to be %s but got %s", k, v, f.Get(k)) - } - } - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/rename.go b/fn/vendor/github.com/docker/docker/cli/command/container/rename.go deleted file mode 100644 index 07b4852f4..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/rename.go +++ /dev/null @@ -1,51 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type renameOptions struct { - oldName string - newName string -} - -// NewRenameCommand creates a new cobra.Command for `docker rename` -func NewRenameCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts renameOptions - - cmd := &cobra.Command{ - Use: "rename CONTAINER NEW_NAME", - Short: "Rename a container", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.oldName = args[0] - opts.newName = args[1] - return runRename(dockerCli, &opts) - }, - } - return cmd -} - -func runRename(dockerCli *command.DockerCli, opts *renameOptions) error { - ctx := context.Background() - - oldName := strings.TrimSpace(opts.oldName) - newName := strings.TrimSpace(opts.newName) - - if oldName == "" || newName == "" { - return errors.New("Error: Neither old nor new names may be empty") - } - - if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { - fmt.Fprintln(dockerCli.Err(), err) - return errors.Errorf("Error: failed to rename container named %s", oldName) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/restart.go b/fn/vendor/github.com/docker/docker/cli/command/container/restart.go deleted file mode 100644 index 73cd2507e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/restart.go +++ /dev/null @@ -1,62 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "time" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type restartOptions struct { - nSeconds int - nSecondsChanged bool - - containers []string -} - -// NewRestartCommand creates a new cobra.Command for `docker restart` -func NewRestartCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts restartOptions - - cmd := &cobra.Command{ - Use: "restart [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Restart one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - opts.nSecondsChanged = cmd.Flags().Changed("time") - return runRestart(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container") - return cmd -} - -func runRestart(dockerCli *command.DockerCli, opts *restartOptions) error { - ctx := context.Background() - var errs []string - var timeout *time.Duration - if opts.nSecondsChanged { - timeoutValue := time.Duration(opts.nSeconds) * time.Second - timeout = &timeoutValue - } - - for _, name := range opts.containers { - if err := dockerCli.Client().ContainerRestart(ctx, name, timeout); err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintln(dockerCli.Out(), name) - } - if len(errs) > 0 { - return errors.New(strings.Join(errs, "\n")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/rm.go b/fn/vendor/github.com/docker/docker/cli/command/container/rm.go deleted file mode 100644 index 887b5c5d3..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/rm.go +++ /dev/null @@ -1,73 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type rmOptions struct { - rmVolumes bool - rmLink bool - force bool - - containers []string -} - -// NewRmCommand creates a new cobra.Command for `docker rm` -func NewRmCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts rmOptions - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Remove one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runRm(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container") - flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link") - flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)") - return cmd -} - -func runRm(dockerCli *command.DockerCli, opts *rmOptions) error { - ctx := context.Background() - - var errs []string - options := types.ContainerRemoveOptions{ - RemoveVolumes: opts.rmVolumes, - RemoveLinks: opts.rmLink, - Force: opts.force, - } - - errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { - container = strings.Trim(container, "/") - if container == "" { - return errors.New("Container name cannot be empty") - } - return dockerCli.Client().ContainerRemove(ctx, container, options) - }) - - for _, name := range opts.containers { - if err := <-errChan; err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintln(dockerCli.Out(), name) - } - if len(errs) > 0 { - return errors.New(strings.Join(errs, "\n")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/run.go b/fn/vendor/github.com/docker/docker/cli/command/container/run.go deleted file mode 100644 index bab6a9cf1..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/run.go +++ /dev/null @@ -1,296 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - "os" - "runtime" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - "github.com/docker/libnetwork/resolvconf/dns" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -type runOptions struct { - detach bool - sigProxy bool - name string - detachKeys string -} - -// NewRunCommand create a new `docker run` command -func NewRunCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts runOptions - var copts *containerOptions - - cmd := &cobra.Command{ - Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Run a command in a new container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - copts.Image = args[0] - if len(args) > 1 { - copts.Args = args[1:] - } - return runRun(dockerCli, cmd.Flags(), &opts, copts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - // These are flags not stored in Config/HostConfig - flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID") - flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process") - flags.StringVar(&opts.name, "name", "", "Assign a name to the container") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - - // Add an explicit help that doesn't have a `-h` to prevent the conflict - // with hostname - flags.Bool("help", false, "Print usage") - - command.AddTrustVerificationFlags(flags) - copts = addFlags(flags) - return cmd -} - -func warnOnOomKillDisable(hostConfig container.HostConfig, stderr io.Writer) { - if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { - fmt.Fprintln(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.") - } -} - -// check the DNS settings passed via --dns against localhost regexp to warn if -// they are trying to set a DNS to a localhost address -func warnOnLocalhostDNS(hostConfig container.HostConfig, stderr io.Writer) { - for _, dnsIP := range hostConfig.DNS { - if dns.IsLocalhost(dnsIP) { - fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) - return - } - } -} - -func runRun(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *runOptions, copts *containerOptions) error { - containerConfig, err := parse(flags, copts) - // just in case the parse does not exit - if err != nil { - reportError(dockerCli.Err(), "run", err.Error(), true) - return cli.StatusError{StatusCode: 125} - } - return runContainer(dockerCli, opts, copts, containerConfig) -} - -func runContainer(dockerCli *command.DockerCli, opts *runOptions, copts *containerOptions, containerConfig *containerConfig) error { - config := containerConfig.Config - hostConfig := containerConfig.HostConfig - stdout, stderr := dockerCli.Out(), dockerCli.Err() - client := dockerCli.Client() - - // TODO: pass this as an argument - cmdPath := "run" - - warnOnOomKillDisable(*hostConfig, stderr) - warnOnLocalhostDNS(*hostConfig, stderr) - - config.ArgsEscaped = false - - if !opts.detach { - if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil { - return err - } - } else { - if copts.attach.Len() != 0 { - return errors.New("Conflicting options: -a and -d") - } - - config.AttachStdin = false - config.AttachStdout = false - config.AttachStderr = false - config.StdinOnce = false - } - - // Disable sigProxy when in TTY mode - if config.Tty { - opts.sigProxy = false - } - - // Telling the Windows daemon the initial size of the tty during start makes - // a far better user experience rather than relying on subsequent resizes - // to cause things to catch up. - if runtime.GOOS == "windows" { - hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize() - } - - ctx, cancelFun := context.WithCancel(context.Background()) - - createResponse, err := createContainer(ctx, dockerCli, containerConfig, opts.name) - if err != nil { - reportError(stderr, cmdPath, err.Error(), true) - return runStartContainerErr(err) - } - if opts.sigProxy { - sigc := ForwardAllSignals(ctx, dockerCli, createResponse.ID) - defer signal.StopCatch(sigc) - } - var ( - waitDisplayID chan struct{} - errCh chan error - ) - if !config.AttachStdout && !config.AttachStderr { - // Make this asynchronous to allow the client to write to stdin before having to read the ID - waitDisplayID = make(chan struct{}) - go func() { - defer close(waitDisplayID) - fmt.Fprintln(stdout, createResponse.ID) - }() - } - attach := config.AttachStdin || config.AttachStdout || config.AttachStderr - if attach { - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - close, err := attachContainer(ctx, dockerCli, &errCh, config, createResponse.ID) - defer close() - if err != nil { - return err - } - } - - statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, copts.autoRemove) - - //start the container - if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { - // If we have holdHijackedConnection, we should notify - // holdHijackedConnection we are going to exit and wait - // to avoid the terminal are not restored. - if attach { - cancelFun() - <-errCh - } - - reportError(stderr, cmdPath, err.Error(), false) - if copts.autoRemove { - // wait container to be removed - <-statusChan - } - return runStartContainerErr(err) - } - - if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() { - if err := MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil { - fmt.Fprintln(stderr, "Error monitoring TTY size:", err) - } - } - - if errCh != nil { - if err := <-errCh; err != nil { - logrus.Debugf("Error hijack: %s", err) - return err - } - } - - // Detached mode: wait for the id to be displayed and return. - if !config.AttachStdout && !config.AttachStderr { - // Detached mode - <-waitDisplayID - return nil - } - - status := <-statusChan - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} - -func attachContainer( - ctx context.Context, - dockerCli *command.DockerCli, - errCh *chan error, - config *container.Config, - containerID string, -) (func(), error) { - stdout, stderr := dockerCli.Out(), dockerCli.Err() - var ( - out, cerr io.Writer - in io.ReadCloser - ) - if config.AttachStdin { - in = dockerCli.In() - } - if config.AttachStdout { - out = stdout - } - if config.AttachStderr { - if config.Tty { - cerr = stdout - } else { - cerr = stderr - } - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: config.AttachStdin, - Stdout: config.AttachStdout, - Stderr: config.AttachStderr, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach returns an ErrPersistEOF (connection closed) - // means server met an error and put it in Hijacked connection - // keep the error and read detailed error message from hijacked connection later - return nil, errAttach - } - - *errCh = promise.Go(func() error { - if errHijack := holdHijackedConnection(ctx, dockerCli, config.Tty, in, out, cerr, resp); errHijack != nil { - return errHijack - } - return errAttach - }) - return resp.Close, nil -} - -// reportError is a utility method that prints a user-friendly message -// containing the error that occurred during parsing and a suggestion to get help -func reportError(stderr io.Writer, name string, str string, withHelp bool) { - str = strings.TrimSuffix(str, ".") + "." - if withHelp { - str += "\nSee '" + os.Args[0] + " " + name + " --help'." - } - fmt.Fprintf(stderr, "%s: %s\n", os.Args[0], str) -} - -// if container start fails with 'not found'/'no such' error, return 127 -// if container start fails with 'permission denied' error, return 126 -// return 125 for generic docker daemon failures -func runStartContainerErr(err error) error { - trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ") - statusError := cli.StatusError{StatusCode: 125} - if strings.Contains(trimmedErr, "executable file not found") || - strings.Contains(trimmedErr, "no such file or directory") || - strings.Contains(trimmedErr, "system cannot find the file specified") { - statusError = cli.StatusError{StatusCode: 127} - } else if strings.Contains(trimmedErr, syscall.EACCES.Error()) { - statusError = cli.StatusError{StatusCode: 126} - } - - return statusError -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/start.go b/fn/vendor/github.com/docker/docker/cli/command/container/start.go deleted file mode 100644 index 7702cd4a7..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/start.go +++ /dev/null @@ -1,179 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type startOptions struct { - attach bool - openStdin bool - detachKeys string - checkpoint string - checkpointDir string - - containers []string -} - -// NewStartCommand creates a new cobra.Command for `docker start` -func NewStartCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts startOptions - - cmd := &cobra.Command{ - Use: "start [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Start one or more stopped containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runStart(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals") - flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - - flags.StringVar(&opts.checkpoint, "checkpoint", "", "Restore from this checkpoint") - flags.SetAnnotation("checkpoint", "experimental", nil) - flags.StringVar(&opts.checkpointDir, "checkpoint-dir", "", "Use a custom checkpoint storage directory") - flags.SetAnnotation("checkpoint-dir", "experimental", nil) - return cmd -} - -func runStart(dockerCli *command.DockerCli, opts *startOptions) error { - ctx, cancelFun := context.WithCancel(context.Background()) - - if opts.attach || opts.openStdin { - // We're going to attach to a container. - // 1. Ensure we only have one container. - if len(opts.containers) > 1 { - return errors.New("You cannot start and attach multiple containers at once.") - } - - // 2. Attach to the container. - container := opts.containers[0] - c, err := dockerCli.Client().ContainerInspect(ctx, container) - if err != nil { - return err - } - - // We always use c.ID instead of container to maintain consistency during `docker start` - if !c.Config.Tty { - sigc := ForwardAllSignals(ctx, dockerCli, c.ID) - defer signal.StopCatch(sigc) - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: opts.openStdin && c.Config.OpenStdin, - Stdout: true, - Stderr: true, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - var in io.ReadCloser - - if options.Stdin { - in = dockerCli.In() - } - - resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach return an ErrPersistEOF (connection closed) - // means server met an error and already put it in Hijacked connection, - // we would keep the error and read the detailed error message from hijacked connection - return errAttach - } - defer resp.Close() - cErr := promise.Go(func() error { - errHijack := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp) - if errHijack == nil { - return errAttach - } - return errHijack - }) - - // 3. We should open a channel for receiving status code of the container - // no matter it's detached, removed on daemon side(--rm) or exit normally. - statusChan := waitExitOrRemoved(ctx, dockerCli, c.ID, c.HostConfig.AutoRemove) - startOptions := types.ContainerStartOptions{ - CheckpointID: opts.checkpoint, - CheckpointDir: opts.checkpointDir, - } - - // 4. Start the container. - if err := dockerCli.Client().ContainerStart(ctx, c.ID, startOptions); err != nil { - cancelFun() - <-cErr - if c.HostConfig.AutoRemove { - // wait container to be removed - <-statusChan - } - return err - } - - // 5. Wait for attachment to break. - if c.Config.Tty && dockerCli.Out().IsTerminal() { - if err := MonitorTtySize(ctx, dockerCli, c.ID, false); err != nil { - fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err) - } - } - if attchErr := <-cErr; attchErr != nil { - return attchErr - } - - if status := <-statusChan; status != 0 { - return cli.StatusError{StatusCode: status} - } - } else if opts.checkpoint != "" { - if len(opts.containers) > 1 { - return errors.New("You cannot restore multiple containers at once.") - } - container := opts.containers[0] - startOptions := types.ContainerStartOptions{ - CheckpointID: opts.checkpoint, - CheckpointDir: opts.checkpointDir, - } - return dockerCli.Client().ContainerStart(ctx, container, startOptions) - - } else { - // We're not going to attach to anything. - // Start as many containers as we want. - return startContainersWithoutAttachments(ctx, dockerCli, opts.containers) - } - - return nil -} - -func startContainersWithoutAttachments(ctx context.Context, dockerCli *command.DockerCli, containers []string) error { - var failedContainers []string - for _, container := range containers { - if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { - fmt.Fprintln(dockerCli.Err(), err) - failedContainers = append(failedContainers, container) - continue - } - fmt.Fprintln(dockerCli.Out(), container) - } - - if len(failedContainers) > 0 { - return errors.Errorf("Error: failed to start containers: %s", strings.Join(failedContainers, ", ")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/stats.go b/fn/vendor/github.com/docker/docker/cli/command/container/stats.go deleted file mode 100644 index c420e8151..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/stats.go +++ /dev/null @@ -1,242 +0,0 @@ -package container - -import ( - "fmt" - "io" - "strings" - "sync" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type statsOptions struct { - all bool - noStream bool - format string - containers []string -} - -// NewStatsCommand creates a new cobra.Command for `docker stats` -func NewStatsCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts statsOptions - - cmd := &cobra.Command{ - Use: "stats [OPTIONS] [CONTAINER...]", - Short: "Display a live stream of container(s) resource usage statistics", - Args: cli.RequiresMinArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runStats(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") - flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") - flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") - return cmd -} - -// runStats displays a live stream of resource usage statistics for one or more containers. -// This shows real-time information on CPU usage, memory usage, and network I/O. -func runStats(dockerCli *command.DockerCli, opts *statsOptions) error { - showAll := len(opts.containers) == 0 - closeChan := make(chan error) - - ctx := context.Background() - - // monitorContainerEvents watches for container creation and removal (only - // used when calling `docker stats` without arguments). - monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { - f := filters.NewArgs() - f.Add("type", "container") - options := types.EventsOptions{ - Filters: f, - } - - eventq, errq := dockerCli.Client().Events(ctx, options) - - // Whether we successfully subscribed to eventq or not, we can now - // unblock the main goroutine. - close(started) - - for { - select { - case event := <-eventq: - c <- event - case err := <-errq: - closeChan <- err - return - } - } - } - - // Get the daemonOSType if not set already - if daemonOSType == "" { - svctx := context.Background() - sv, err := dockerCli.Client().ServerVersion(svctx) - if err != nil { - return err - } - daemonOSType = sv.Os - } - - // waitFirst is a WaitGroup to wait first stat data's reach for each container - waitFirst := &sync.WaitGroup{} - - cStats := stats{} - // getContainerList simulates creation event for all previously existing - // containers (only used when calling `docker stats` without arguments). - getContainerList := func() { - options := types.ContainerListOptions{ - All: opts.all, - } - cs, err := dockerCli.Client().ContainerList(ctx, options) - if err != nil { - closeChan <- err - } - for _, container := range cs { - s := formatter.NewContainerStats(container.ID[:12], daemonOSType) - if cStats.add(s) { - waitFirst.Add(1) - go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - } - - if showAll { - // If no names were specified, start a long running goroutine which - // monitors container events. We make sure we're subscribed before - // retrieving the list of running containers to avoid a race where we - // would "miss" a creation. - started := make(chan struct{}) - eh := command.InitEventHandler() - eh.Handle("create", func(e events.Message) { - if opts.all { - s := formatter.NewContainerStats(e.ID[:12], daemonOSType) - if cStats.add(s) { - waitFirst.Add(1) - go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - }) - - eh.Handle("start", func(e events.Message) { - s := formatter.NewContainerStats(e.ID[:12], daemonOSType) - if cStats.add(s) { - waitFirst.Add(1) - go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) - } - }) - - eh.Handle("die", func(e events.Message) { - if !opts.all { - cStats.remove(e.ID[:12]) - } - }) - - eventChan := make(chan events.Message) - go eh.Watch(eventChan) - go monitorContainerEvents(started, eventChan) - defer close(eventChan) - <-started - - // Start a short-lived goroutine to retrieve the initial list of - // containers. - getContainerList() - } else { - // Artificially send creation events for the containers we were asked to - // monitor (same code path than we use when monitoring all containers). - for _, name := range opts.containers { - s := formatter.NewContainerStats(name, daemonOSType) - if cStats.add(s) { - waitFirst.Add(1) - go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - - // We don't expect any asynchronous errors: closeChan can be closed. - close(closeChan) - - // Do a quick pause to detect any error with the provided list of - // container names. - time.Sleep(1500 * time.Millisecond) - var errs []string - cStats.mu.Lock() - for _, c := range cStats.cs { - if err := c.GetError(); err != nil { - errs = append(errs, err.Error()) - } - } - cStats.mu.Unlock() - if len(errs) > 0 { - return errors.New(strings.Join(errs, "\n")) - } - } - - // before print to screen, make sure each container get at least one valid stat data - waitFirst.Wait() - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().StatsFormat) > 0 { - format = dockerCli.ConfigFile().StatsFormat - } else { - format = formatter.TableFormatKey - } - } - statsCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewStatsFormat(format, daemonOSType), - } - cleanScreen := func() { - if !opts.noStream { - fmt.Fprint(dockerCli.Out(), "\033[2J") - fmt.Fprint(dockerCli.Out(), "\033[H") - } - } - - var err error - for range time.Tick(500 * time.Millisecond) { - cleanScreen() - ccstats := []formatter.StatsEntry{} - cStats.mu.Lock() - for _, c := range cStats.cs { - ccstats = append(ccstats, c.GetStatistics()) - } - cStats.mu.Unlock() - if err = formatter.ContainerStatsWrite(statsCtx, ccstats, daemonOSType); err != nil { - break - } - if len(cStats.cs) == 0 && !showAll { - break - } - if opts.noStream { - break - } - select { - case err, ok := <-closeChan: - if ok { - if err != nil { - // this is suppressing "unexpected EOF" in the cli when the - // daemon restarts so it shutdowns cleanly - if err == io.ErrUnexpectedEOF { - return nil - } - return err - } - } - default: - // just skip - } - } - return err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go b/fn/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go deleted file mode 100644 index 5cbcf03e4..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go +++ /dev/null @@ -1,229 +0,0 @@ -package container - -import ( - "encoding/json" - "io" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/client" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -type stats struct { - ostype string - mu sync.Mutex - cs []*formatter.ContainerStats -} - -// daemonOSType is set once we have at least one stat for a container -// from the daemon. It is used to ensure we print the right header based -// on the daemon platform. -var daemonOSType string - -func (s *stats) add(cs *formatter.ContainerStats) bool { - s.mu.Lock() - defer s.mu.Unlock() - if _, exists := s.isKnownContainer(cs.Container); !exists { - s.cs = append(s.cs, cs) - return true - } - return false -} - -func (s *stats) remove(id string) { - s.mu.Lock() - if i, exists := s.isKnownContainer(id); exists { - s.cs = append(s.cs[:i], s.cs[i+1:]...) - } - s.mu.Unlock() -} - -func (s *stats) isKnownContainer(cid string) (int, bool) { - for i, c := range s.cs { - if c.Container == cid { - return i, true - } - } - return -1, false -} - -func collect(ctx context.Context, s *formatter.ContainerStats, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { - logrus.Debugf("collecting stats for %s", s.Container) - var ( - getFirst bool - previousCPU uint64 - previousSystem uint64 - u = make(chan error, 1) - ) - - defer func() { - // if error happens and we get nothing of stats, release wait group whatever - if !getFirst { - getFirst = true - waitFirst.Done() - } - }() - - response, err := cli.ContainerStats(ctx, s.Container, streamStats) - if err != nil { - s.SetError(err) - return - } - defer response.Body.Close() - - dec := json.NewDecoder(response.Body) - go func() { - for { - var ( - v *types.StatsJSON - memPercent, cpuPercent float64 - blkRead, blkWrite uint64 // Only used on Linux - mem, memLimit, memPerc float64 - pidsStatsCurrent uint64 - ) - - if err := dec.Decode(&v); err != nil { - dec = json.NewDecoder(io.MultiReader(dec.Buffered(), response.Body)) - u <- err - if err == io.EOF { - break - } - time.Sleep(100 * time.Millisecond) - continue - } - - daemonOSType = response.OSType - - if daemonOSType != "windows" { - // MemoryStats.Limit will never be 0 unless the container is not running and we haven't - // got any data from cgroup - if v.MemoryStats.Limit != 0 { - memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 - } - previousCPU = v.PreCPUStats.CPUUsage.TotalUsage - previousSystem = v.PreCPUStats.SystemUsage - cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v) - blkRead, blkWrite = calculateBlockIO(v.BlkioStats) - mem = float64(v.MemoryStats.Usage) - memLimit = float64(v.MemoryStats.Limit) - memPerc = memPercent - pidsStatsCurrent = v.PidsStats.Current - } else { - cpuPercent = calculateCPUPercentWindows(v) - blkRead = v.StorageStats.ReadSizeBytes - blkWrite = v.StorageStats.WriteSizeBytes - mem = float64(v.MemoryStats.PrivateWorkingSet) - } - netRx, netTx := calculateNetwork(v.Networks) - s.SetStatistics(formatter.StatsEntry{ - Name: v.Name, - ID: v.ID, - CPUPercentage: cpuPercent, - Memory: mem, - MemoryPercentage: memPerc, - MemoryLimit: memLimit, - NetworkRx: netRx, - NetworkTx: netTx, - BlockRead: float64(blkRead), - BlockWrite: float64(blkWrite), - PidsCurrent: pidsStatsCurrent, - }) - u <- nil - if !streamStats { - return - } - } - }() - for { - select { - case <-time.After(2 * time.Second): - // zero out the values if we have not received an update within - // the specified duration. - s.SetErrorAndReset(errors.New("timeout waiting for stats")) - // if this is the first stat you get, release WaitGroup - if !getFirst { - getFirst = true - waitFirst.Done() - } - case err := <-u: - s.SetError(err) - if err == io.EOF { - break - } - if err != nil { - continue - } - // if this is the first stat you get, release WaitGroup - if !getFirst { - getFirst = true - waitFirst.Done() - } - } - if !streamStats { - return - } - } -} - -func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { - var ( - cpuPercent = 0.0 - // calculate the change for the cpu usage of the container in between readings - cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) - // calculate the change for the entire system between readings - systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) - onlineCPUs = float64(v.CPUStats.OnlineCPUs) - ) - - if onlineCPUs == 0.0 { - onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage)) - } - if systemDelta > 0.0 && cpuDelta > 0.0 { - cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0 - } - return cpuPercent -} - -func calculateCPUPercentWindows(v *types.StatsJSON) float64 { - // Max number of 100ns intervals between the previous time read and now - possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals - possIntervals /= 100 // Convert to number of 100ns intervals - possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors - - // Intervals used - intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage - - // Percentage avoiding divide-by-zero - if possIntervals > 0 { - return float64(intervalsUsed) / float64(possIntervals) * 100.0 - } - return 0.00 -} - -func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { - for _, bioEntry := range blkio.IoServiceBytesRecursive { - switch strings.ToLower(bioEntry.Op) { - case "read": - blkRead = blkRead + bioEntry.Value - case "write": - blkWrite = blkWrite + bioEntry.Value - } - } - return -} - -func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { - var rx, tx float64 - - for _, v := range network { - rx += float64(v.RxBytes) - tx += float64(v.TxBytes) - } - return rx, tx -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go b/fn/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go deleted file mode 100644 index 612914c9c..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package container - -import ( - "testing" - - "github.com/docker/docker/api/types" -) - -func TestCalculateBlockIO(t *testing.T) { - blkio := types.BlkioStats{ - IoServiceBytesRecursive: []types.BlkioStatEntry{{Major: 8, Minor: 0, Op: "read", Value: 1234}, {Major: 8, Minor: 1, Op: "read", Value: 4567}, {Major: 8, Minor: 0, Op: "write", Value: 123}, {Major: 8, Minor: 1, Op: "write", Value: 456}}, - } - blkRead, blkWrite := calculateBlockIO(blkio) - if blkRead != 5801 { - t.Fatalf("blkRead = %d, want 5801", blkRead) - } - if blkWrite != 579 { - t.Fatalf("blkWrite = %d, want 579", blkWrite) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/stop.go b/fn/vendor/github.com/docker/docker/cli/command/container/stop.go deleted file mode 100644 index 32729e1ea..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/stop.go +++ /dev/null @@ -1,67 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "time" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type stopOptions struct { - time int - timeChanged bool - - containers []string -} - -// NewStopCommand creates a new cobra.Command for `docker stop` -func NewStopCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts stopOptions - - cmd := &cobra.Command{ - Use: "stop [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Stop one or more running containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - opts.timeChanged = cmd.Flags().Changed("time") - return runStop(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it") - return cmd -} - -func runStop(dockerCli *command.DockerCli, opts *stopOptions) error { - ctx := context.Background() - - var timeout *time.Duration - if opts.timeChanged { - timeoutValue := time.Duration(opts.time) * time.Second - timeout = &timeoutValue - } - - var errs []string - - errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, id string) error { - return dockerCli.Client().ContainerStop(ctx, id, timeout) - }) - for _, container := range opts.containers { - if err := <-errChan; err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintln(dockerCli.Out(), container) - } - if len(errs) > 0 { - return errors.New(strings.Join(errs, "\n")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/testdata/utf16.env b/fn/vendor/github.com/docker/docker/cli/command/container/testdata/utf16.env deleted file mode 100755 index 3a73358fffbc0d5d3d4df985ccf2f4a1a29cdb2a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 54 ucmezW&yB$!2yGdh7#tab7 0 { - return errors.New(strings.Join(errs, "\n")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/update.go b/fn/vendor/github.com/docker/docker/cli/command/container/update.go deleted file mode 100644 index a650815e8..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/update.go +++ /dev/null @@ -1,134 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type updateOptions struct { - blkioWeight uint16 - cpuPeriod int64 - cpuQuota int64 - cpuRealtimePeriod int64 - cpuRealtimeRuntime int64 - cpusetCpus string - cpusetMems string - cpuShares int64 - memory opts.MemBytes - memoryReservation opts.MemBytes - memorySwap opts.MemSwapBytes - kernelMemory opts.MemBytes - restartPolicy string - cpus opts.NanoCPUs - - nFlag int - - containers []string -} - -// NewUpdateCommand creates a new cobra.Command for `docker update` -func NewUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts updateOptions - - cmd := &cobra.Command{ - Use: "update [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Update configuration of one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - opts.nFlag = cmd.Flags().NFlag() - return runUpdate(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.Uint16Var(&opts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") - flags.Int64Var(&opts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&opts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flags.Int64Var(&opts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") - flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"}) - flags.Int64Var(&opts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") - flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"}) - flags.StringVar(&opts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&opts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.Int64VarP(&opts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.VarP(&opts.memory, "memory", "m", "Memory limit") - flags.Var(&opts.memoryReservation, "memory-reservation", "Memory soft limit") - flags.Var(&opts.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.Var(&opts.kernelMemory, "kernel-memory", "Kernel memory limit") - flags.StringVar(&opts.restartPolicy, "restart", "", "Restart policy to apply when a container exits") - - flags.Var(&opts.cpus, "cpus", "Number of CPUs") - flags.SetAnnotation("cpus", "version", []string{"1.29"}) - - return cmd -} - -func runUpdate(dockerCli *command.DockerCli, opts *updateOptions) error { - var err error - - if opts.nFlag == 0 { - return errors.New("You must provide one or more flags when using this command.") - } - - var restartPolicy containertypes.RestartPolicy - if opts.restartPolicy != "" { - restartPolicy, err = runconfigopts.ParseRestartPolicy(opts.restartPolicy) - if err != nil { - return err - } - } - - resources := containertypes.Resources{ - BlkioWeight: opts.blkioWeight, - CpusetCpus: opts.cpusetCpus, - CpusetMems: opts.cpusetMems, - CPUShares: opts.cpuShares, - Memory: opts.memory.Value(), - MemoryReservation: opts.memoryReservation.Value(), - MemorySwap: opts.memorySwap.Value(), - KernelMemory: opts.kernelMemory.Value(), - CPUPeriod: opts.cpuPeriod, - CPUQuota: opts.cpuQuota, - CPURealtimePeriod: opts.cpuRealtimePeriod, - CPURealtimeRuntime: opts.cpuRealtimeRuntime, - NanoCPUs: opts.cpus.Value(), - } - - updateConfig := containertypes.UpdateConfig{ - Resources: resources, - RestartPolicy: restartPolicy, - } - - ctx := context.Background() - - var ( - warns []string - errs []string - ) - for _, container := range opts.containers { - r, err := dockerCli.Client().ContainerUpdate(ctx, container, updateConfig) - if err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintln(dockerCli.Out(), container) - } - warns = append(warns, r.Warnings...) - } - if len(warns) > 0 { - fmt.Fprintln(dockerCli.Out(), strings.Join(warns, "\n")) - } - if len(errs) > 0 { - return errors.New(strings.Join(errs, "\n")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/utils.go b/fn/vendor/github.com/docker/docker/cli/command/container/utils.go deleted file mode 100644 index e4664b745..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/utils.go +++ /dev/null @@ -1,142 +0,0 @@ -package container - -import ( - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/cli/command" - clientapi "github.com/docker/docker/client" - "golang.org/x/net/context" -) - -func waitExitOrRemoved(ctx context.Context, dockerCli *command.DockerCli, containerID string, waitRemove bool) chan int { - if len(containerID) == 0 { - // containerID can never be empty - panic("Internal Error: waitExitOrRemoved needs a containerID as parameter") - } - - var removeErr error - statusChan := make(chan int) - exitCode := 125 - - // Get events via Events API - f := filters.NewArgs() - f.Add("type", "container") - f.Add("container", containerID) - options := types.EventsOptions{ - Filters: f, - } - eventCtx, cancel := context.WithCancel(ctx) - eventq, errq := dockerCli.Client().Events(eventCtx, options) - - eventProcessor := func(e events.Message) bool { - stopProcessing := false - switch e.Status { - case "die": - if v, ok := e.Actor.Attributes["exitCode"]; ok { - code, cerr := strconv.Atoi(v) - if cerr != nil { - logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr) - } else { - exitCode = code - } - } - if !waitRemove { - stopProcessing = true - } else { - // If we are talking to an older daemon, `AutoRemove` is not supported. - // We need to fall back to the old behavior, which is client-side removal - if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") { - go func() { - removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true}) - if removeErr != nil { - logrus.Errorf("error removing container: %v", removeErr) - cancel() // cancel the event Q - } - }() - } - } - case "detach": - exitCode = 0 - stopProcessing = true - case "destroy": - stopProcessing = true - } - return stopProcessing - } - - go func() { - defer func() { - statusChan <- exitCode // must always send an exit code or the caller will block - cancel() - }() - - for { - select { - case <-eventCtx.Done(): - if removeErr != nil { - return - } - case evt := <-eventq: - if eventProcessor(evt) { - return - } - case err := <-errq: - logrus.Errorf("error getting events from daemon: %v", err) - return - } - } - }() - - return statusChan -} - -// getExitCode performs an inspect on the container. It returns -// the running state and the exit code. -func getExitCode(ctx context.Context, dockerCli *command.DockerCli, containerID string) (bool, int, error) { - c, err := dockerCli.Client().ContainerInspect(ctx, containerID) - if err != nil { - // If we can't connect, then the daemon probably died. - if !clientapi.IsErrConnectionFailed(err) { - return false, -1, err - } - return false, -1, nil - } - return c.State.Running, c.State.ExitCode, nil -} - -func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error { - if len(containers) == 0 { - return nil - } - const defaultParallel int = 50 - sem := make(chan struct{}, defaultParallel) - errChan := make(chan error) - - // make sure result is printed in correct order - output := map[string]chan error{} - for _, c := range containers { - output[c] = make(chan error, 1) - } - go func() { - for _, c := range containers { - err := <-output[c] - errChan <- err - } - }() - - go func() { - for _, c := range containers { - sem <- struct{}{} // Wait for active queue sem to drain. - go func(container string) { - output[container] <- op(ctx, container) - <-sem - }(c) - } - }() - return errChan -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/container/wait.go b/fn/vendor/github.com/docker/docker/cli/command/container/wait.go deleted file mode 100644 index f978207b9..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/container/wait.go +++ /dev/null @@ -1,50 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type waitOptions struct { - containers []string -} - -// NewWaitCommand creates a new cobra.Command for `docker wait` -func NewWaitCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts waitOptions - - cmd := &cobra.Command{ - Use: "wait CONTAINER [CONTAINER...]", - Short: "Block until one or more containers stop, then print their exit codes", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runWait(dockerCli, &opts) - }, - } - return cmd -} - -func runWait(dockerCli *command.DockerCli, opts *waitOptions) error { - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - status, err := dockerCli.Client().ContainerWait(ctx, container) - if err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintf(dockerCli.Out(), "%d\n", status) - } - if len(errs) > 0 { - return errors.New(strings.Join(errs, "\n")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/events_utils.go b/fn/vendor/github.com/docker/docker/cli/command/events_utils.go deleted file mode 100644 index e710c9757..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/events_utils.go +++ /dev/null @@ -1,49 +0,0 @@ -package command - -import ( - "sync" - - "github.com/Sirupsen/logrus" - eventtypes "github.com/docker/docker/api/types/events" -) - -type eventProcessor func(eventtypes.Message, error) error - -// EventHandler is abstract interface for user to customize -// own handle functions of each type of events -type EventHandler interface { - Handle(action string, h func(eventtypes.Message)) - Watch(c <-chan eventtypes.Message) -} - -// InitEventHandler initializes and returns an EventHandler -func InitEventHandler() EventHandler { - return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} -} - -type eventHandler struct { - handlers map[string]func(eventtypes.Message) - mu sync.Mutex -} - -func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { - w.mu.Lock() - w.handlers[action] = h - w.mu.Unlock() -} - -// Watch ranges over the passed in event chan and processes the events based on the -// handlers created for a given action. -// To stop watching, close the event chan. -func (w *eventHandler) Watch(c <-chan eventtypes.Message) { - for e := range c { - w.mu.Lock() - h, exists := w.handlers[e.Action] - w.mu.Unlock() - if !exists { - continue - } - logrus.Debugf("event handler: received event: %v", e) - go h(e) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/checkpoint.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/checkpoint.go deleted file mode 100644 index 041fcafb7..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/checkpoint.go +++ /dev/null @@ -1,52 +0,0 @@ -package formatter - -import "github.com/docker/docker/api/types" - -const ( - defaultCheckpointFormat = "table {{.Name}}" - - checkpointNameHeader = "CHECKPOINT NAME" -) - -// NewCheckpointFormat returns a format for use with a checkpoint Context -func NewCheckpointFormat(source string) Format { - switch source { - case TableFormatKey: - return defaultCheckpointFormat - } - return Format(source) -} - -// CheckpointWrite writes formatted checkpoints using the Context -func CheckpointWrite(ctx Context, checkpoints []types.Checkpoint) error { - render := func(format func(subContext subContext) error) error { - for _, checkpoint := range checkpoints { - if err := format(&checkpointContext{c: checkpoint}); err != nil { - return err - } - } - return nil - } - return ctx.Write(newCheckpointContext(), render) -} - -type checkpointContext struct { - HeaderContext - c types.Checkpoint -} - -func newCheckpointContext() *checkpointContext { - cpCtx := checkpointContext{} - cpCtx.header = volumeHeaderContext{ - "Name": checkpointNameHeader, - } - return &cpCtx -} - -func (c *checkpointContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *checkpointContext) Name() string { - return c.c.Name -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/checkpoint_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/checkpoint_test.go deleted file mode 100644 index e88c4d013..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/checkpoint_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package formatter - -import ( - "bytes" - "testing" - - "github.com/docker/docker/api/types" - "github.com/stretchr/testify/assert" -) - -func TestCheckpointContextFormatWrite(t *testing.T) { - cases := []struct { - context Context - expected string - }{ - { - Context{Format: NewCheckpointFormat(defaultCheckpointFormat)}, - `CHECKPOINT NAME -checkpoint-1 -checkpoint-2 -checkpoint-3 -`, - }, - { - Context{Format: NewCheckpointFormat("{{.Name}}")}, - `checkpoint-1 -checkpoint-2 -checkpoint-3 -`, - }, - { - Context{Format: NewCheckpointFormat("{{.Name}}:")}, - `checkpoint-1: -checkpoint-2: -checkpoint-3: -`, - }, - } - - checkpoints := []types.Checkpoint{ - {"checkpoint-1"}, - {"checkpoint-2"}, - {"checkpoint-3"}, - } - for _, testcase := range cases { - out := bytes.NewBufferString("") - testcase.context.Output = out - err := CheckpointWrite(testcase.context, checkpoints) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/container.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/container.go deleted file mode 100644 index 9b5c24636..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/container.go +++ /dev/null @@ -1,259 +0,0 @@ -package formatter - -import ( - "fmt" - "strconv" - "strings" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - units "github.com/docker/go-units" -) - -const ( - defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}" - - containerIDHeader = "CONTAINER ID" - namesHeader = "NAMES" - commandHeader = "COMMAND" - runningForHeader = "CREATED" - statusHeader = "STATUS" - portsHeader = "PORTS" - mountsHeader = "MOUNTS" - localVolumes = "LOCAL VOLUMES" - networksHeader = "NETWORKS" -) - -// NewContainerFormat returns a Format for rendering using a Context -func NewContainerFormat(source string, quiet bool, size bool) Format { - switch source { - case TableFormatKey: - if quiet { - return defaultQuietFormat - } - format := defaultContainerTableFormat - if size { - format += `\t{{.Size}}` - } - return Format(format) - case RawFormatKey: - if quiet { - return `container_id: {{.ID}}` - } - format := `container_id: {{.ID}} -image: {{.Image}} -command: {{.Command}} -created_at: {{.CreatedAt}} -status: {{- pad .Status 1 0}} -names: {{.Names}} -labels: {{- pad .Labels 1 0}} -ports: {{- pad .Ports 1 0}} -` - if size { - format += `size: {{.Size}}\n` - } - return Format(format) - } - return Format(source) -} - -// ContainerWrite renders the context for a list of containers -func ContainerWrite(ctx Context, containers []types.Container) error { - render := func(format func(subContext subContext) error) error { - for _, container := range containers { - err := format(&containerContext{trunc: ctx.Trunc, c: container}) - if err != nil { - return err - } - } - return nil - } - return ctx.Write(newContainerContext(), render) -} - -type containerHeaderContext map[string]string - -func (c containerHeaderContext) Label(name string) string { - n := strings.Split(name, ".") - r := strings.NewReplacer("-", " ", "_", " ") - h := r.Replace(n[len(n)-1]) - - return h -} - -type containerContext struct { - HeaderContext - trunc bool - c types.Container -} - -func newContainerContext() *containerContext { - containerCtx := containerContext{} - containerCtx.header = containerHeaderContext{ - "ID": containerIDHeader, - "Names": namesHeader, - "Image": imageHeader, - "Command": commandHeader, - "CreatedAt": createdAtHeader, - "RunningFor": runningForHeader, - "Ports": portsHeader, - "Status": statusHeader, - "Size": sizeHeader, - "Labels": labelsHeader, - "Mounts": mountsHeader, - "LocalVolumes": localVolumes, - "Networks": networksHeader, - } - return &containerCtx -} - -func (c *containerContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *containerContext) ID() string { - if c.trunc { - return stringid.TruncateID(c.c.ID) - } - return c.c.ID -} - -func (c *containerContext) Names() string { - names := stripNamePrefix(c.c.Names) - if c.trunc { - for _, name := range names { - if len(strings.Split(name, "/")) == 1 { - names = []string{name} - break - } - } - } - return strings.Join(names, ",") -} - -func (c *containerContext) Image() string { - if c.c.Image == "" { - return "" - } - if c.trunc { - if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { - return trunc - } - // truncate digest if no-trunc option was not selected - ref, err := reference.ParseNormalizedNamed(c.c.Image) - if err == nil { - if nt, ok := ref.(reference.NamedTagged); ok { - // case for when a tag is provided - if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { - return reference.FamiliarString(namedTagged) - } - } else { - // case for when a tag is not provided - named := reference.TrimNamed(ref) - return reference.FamiliarString(named) - } - } - } - - return c.c.Image -} - -func (c *containerContext) Command() string { - command := c.c.Command - if c.trunc { - command = stringutils.Ellipsis(command, 20) - } - return strconv.Quote(command) -} - -func (c *containerContext) CreatedAt() string { - return time.Unix(int64(c.c.Created), 0).String() -} - -func (c *containerContext) RunningFor() string { - createdAt := time.Unix(int64(c.c.Created), 0) - return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago" -} - -func (c *containerContext) Ports() string { - return api.DisplayablePorts(c.c.Ports) -} - -func (c *containerContext) Status() string { - return c.c.Status -} - -func (c *containerContext) Size() string { - srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3) - sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3) - - sf := srw - if c.c.SizeRootFs > 0 { - sf = fmt.Sprintf("%s (virtual %s)", srw, sv) - } - return sf -} - -func (c *containerContext) Labels() string { - if c.c.Labels == nil { - return "" - } - - var joinLabels []string - for k, v := range c.c.Labels { - joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(joinLabels, ",") -} - -func (c *containerContext) Label(name string) string { - if c.c.Labels == nil { - return "" - } - return c.c.Labels[name] -} - -func (c *containerContext) Mounts() string { - var name string - var mounts []string - for _, m := range c.c.Mounts { - if m.Name == "" { - name = m.Source - } else { - name = m.Name - } - if c.trunc { - name = stringutils.Ellipsis(name, 15) - } - mounts = append(mounts, name) - } - return strings.Join(mounts, ",") -} - -func (c *containerContext) LocalVolumes() string { - count := 0 - for _, m := range c.c.Mounts { - if m.Driver == "local" { - count++ - } - } - - return fmt.Sprintf("%d", count) -} - -func (c *containerContext) Networks() string { - if c.c.NetworkSettings == nil { - return "" - } - - networks := []string{} - for k := range c.c.NetworkSettings.Networks { - networks = append(networks, k) - } - - return strings.Join(networks, ",") -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/container_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/container_test.go deleted file mode 100644 index a5615d176..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/container_test.go +++ /dev/null @@ -1,385 +0,0 @@ -package formatter - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestContainerPsContext(t *testing.T) { - containerID := stringid.GenerateRandomID() - unix := time.Now().Add(-65 * time.Second).Unix() - - var ctx containerContext - cases := []struct { - container types.Container - trunc bool - expValue string - call func() string - }{ - {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), ctx.ID}, - {types.Container{ID: containerID}, false, containerID, ctx.ID}, - {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", ctx.Names}, - {types.Container{Image: "ubuntu"}, true, "ubuntu", ctx.Image}, - {types.Container{Image: "verylongimagename"}, true, "verylongimagename", ctx.Image}, - {types.Container{Image: "verylongimagename"}, false, "verylongimagename", ctx.Image}, - {types.Container{ - Image: "a5a665ff33eced1e0803148700880edab4", - ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", - }, - true, - "a5a665ff33ec", - ctx.Image, - }, - {types.Container{ - Image: "a5a665ff33eced1e0803148700880edab4", - ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", - }, - false, - "a5a665ff33eced1e0803148700880edab4", - ctx.Image, - }, - {types.Container{Image: ""}, true, "", ctx.Image}, - {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, ctx.Command}, - {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), ctx.CreatedAt}, - {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", ctx.Ports}, - {types.Container{Status: "RUNNING"}, true, "RUNNING", ctx.Status}, - {types.Container{SizeRw: 10}, true, "10B", ctx.Size}, - {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10B (virtual 20B)", ctx.Size}, - {types.Container{}, true, "", ctx.Labels}, - {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", ctx.Labels}, - {types.Container{Created: unix}, true, "About a minute ago", ctx.RunningFor}, - {types.Container{ - Mounts: []types.MountPoint{ - { - Name: "this-is-a-long-volume-name-and-will-be-truncated-if-trunc-is-set", - Driver: "local", - Source: "/a/path", - }, - }, - }, true, "this-is-a-lo...", ctx.Mounts}, - {types.Container{ - Mounts: []types.MountPoint{ - { - Driver: "local", - Source: "/a/path", - }, - }, - }, false, "/a/path", ctx.Mounts}, - {types.Container{ - Mounts: []types.MountPoint{ - { - Name: "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", - Driver: "local", - Source: "/a/path", - }, - }, - }, false, "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", ctx.Mounts}, - } - - for _, c := range cases { - ctx = containerContext{c: c.container, trunc: c.trunc} - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - } - - c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} - ctx = containerContext{c: c1, trunc: true} - - sid := ctx.Label("com.docker.swarm.swarm-id") - node := ctx.Label("com.docker.swarm.node_name") - if sid != "33" { - t.Fatalf("Expected 33, was %s\n", sid) - } - - if node != "ubuntu" { - t.Fatalf("Expected ubuntu, was %s\n", node) - } - - c2 := types.Container{} - ctx = containerContext{c: c2, trunc: true} - - label := ctx.Label("anything.really") - if label != "" { - t.Fatalf("Expected an empty string, was %s", label) - } -} - -func TestContainerContextWrite(t *testing.T) { - unixTime := time.Now().AddDate(0, 0, -1).Unix() - expectedTime := time.Unix(unixTime, 0).String() - - cases := []struct { - context Context - expected string - }{ - // Errors - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table Format - { - Context{Format: NewContainerFormat("table", false, true)}, - `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE -containerID1 ubuntu "" 24 hours ago foobar_baz 0B -containerID2 ubuntu "" 24 hours ago foobar_bar 0B -`, - }, - { - Context{Format: NewContainerFormat("table", false, false)}, - `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -containerID1 ubuntu "" 24 hours ago foobar_baz -containerID2 ubuntu "" 24 hours ago foobar_bar -`, - }, - { - Context{Format: NewContainerFormat("table {{.Image}}", false, false)}, - "IMAGE\nubuntu\nubuntu\n", - }, - { - Context{Format: NewContainerFormat("table {{.Image}}", false, true)}, - "IMAGE\nubuntu\nubuntu\n", - }, - { - Context{Format: NewContainerFormat("table {{.Image}}", true, false)}, - "IMAGE\nubuntu\nubuntu\n", - }, - { - Context{Format: NewContainerFormat("table", true, false)}, - "containerID1\ncontainerID2\n", - }, - // Raw Format - { - Context{Format: NewContainerFormat("raw", false, false)}, - fmt.Sprintf(`container_id: containerID1 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_baz -labels: -ports: - -container_id: containerID2 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_bar -labels: -ports: - -`, expectedTime, expectedTime), - }, - { - Context{Format: NewContainerFormat("raw", false, true)}, - fmt.Sprintf(`container_id: containerID1 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_baz -labels: -ports: -size: 0B - -container_id: containerID2 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_bar -labels: -ports: -size: 0B - -`, expectedTime, expectedTime), - }, - { - Context{Format: NewContainerFormat("raw", true, false)}, - "container_id: containerID1\ncontainer_id: containerID2\n", - }, - // Custom Format - { - Context{Format: "{{.Image}}"}, - "ubuntu\nubuntu\n", - }, - { - Context{Format: NewContainerFormat("{{.Image}}", false, true)}, - "ubuntu\nubuntu\n", - }, - // Special headers for customerized table format - { - Context{Format: NewContainerFormat(`table {{truncate .ID 5}}\t{{json .Image}} {{.RunningFor}}/{{title .Status}}/{{pad .Ports 2 2}}.{{upper .Names}} {{lower .Status}}`, false, true)}, - `CONTAINER ID IMAGE CREATED/STATUS/ PORTS .NAMES STATUS -conta "ubuntu" 24 hours ago//.FOOBAR_BAZ -conta "ubuntu" 24 hours ago//.FOOBAR_BAR -`, - }, - } - - for _, testcase := range cases { - containers := []types.Container{ - {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime}, - {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime}, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := ContainerWrite(testcase.context, containers) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestContainerContextWriteWithNoContainers(t *testing.T) { - out := bytes.NewBufferString("") - containers := []types.Container{} - - contexts := []struct { - context Context - expected string - }{ - { - Context{ - Format: "{{.Image}}", - Output: out, - }, - "", - }, - { - Context{ - Format: "table {{.Image}}", - Output: out, - }, - "IMAGE\n", - }, - { - Context{ - Format: NewContainerFormat("{{.Image}}", false, true), - Output: out, - }, - "", - }, - { - Context{ - Format: NewContainerFormat("table {{.Image}}", false, true), - Output: out, - }, - "IMAGE\n", - }, - { - Context{ - Format: "table {{.Image}}\t{{.Size}}", - Output: out, - }, - "IMAGE SIZE\n", - }, - { - Context{ - Format: NewContainerFormat("table {{.Image}}\t{{.Size}}", false, true), - Output: out, - }, - "IMAGE SIZE\n", - }, - } - - for _, context := range contexts { - ContainerWrite(context.context, containers) - assert.Equal(t, context.expected, out.String()) - // Clean buffer - out.Reset() - } -} - -func TestContainerContextWriteJSON(t *testing.T) { - unix := time.Now().Add(-65 * time.Second).Unix() - containers := []types.Container{ - {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unix}, - {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unix}, - } - expectedCreated := time.Unix(unix, 0).String() - expectedJSONs := []map[string]interface{}{ - {"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID1", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_baz", "Networks": "", "Ports": "", "RunningFor": "About a minute ago", "Size": "0B", "Status": ""}, - {"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID2", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_bar", "Networks": "", "Ports": "", "RunningFor": "About a minute ago", "Size": "0B", "Status": ""}, - } - out := bytes.NewBufferString("") - err := ContainerWrite(Context{Format: "{{json .}}", Output: out}, containers) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var m map[string]interface{} - if err := json.Unmarshal([]byte(line), &m); err != nil { - t.Fatal(err) - } - assert.DeepEqual(t, m, expectedJSONs[i]) - } -} - -func TestContainerContextWriteJSONField(t *testing.T) { - containers := []types.Container{ - {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu"}, - {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu"}, - } - out := bytes.NewBufferString("") - err := ContainerWrite(Context{Format: "{{json .ID}}", Output: out}, containers) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var s string - if err := json.Unmarshal([]byte(line), &s); err != nil { - t.Fatal(err) - } - assert.Equal(t, s, containers[i].ID) - } -} - -func TestContainerBackCompat(t *testing.T) { - containers := []types.Container{{ID: "brewhaha"}} - cases := []string{ - "ID", - "Names", - "Image", - "Command", - "CreatedAt", - "RunningFor", - "Ports", - "Status", - "Size", - "Labels", - "Mounts", - } - buf := bytes.NewBuffer(nil) - for _, c := range cases { - ctx := Context{Format: Format(fmt.Sprintf("{{ .%s }}", c)), Output: buf} - if err := ContainerWrite(ctx, containers); err != nil { - t.Logf("could not render template for field '%s': %v", c, err) - t.Fail() - } - buf.Reset() - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/custom.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/custom.go deleted file mode 100644 index 73487f63e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/custom.go +++ /dev/null @@ -1,35 +0,0 @@ -package formatter - -const ( - imageHeader = "IMAGE" - createdSinceHeader = "CREATED" - createdAtHeader = "CREATED AT" - sizeHeader = "SIZE" - labelsHeader = "LABELS" - nameHeader = "NAME" - driverHeader = "DRIVER" - scopeHeader = "SCOPE" -) - -type subContext interface { - FullHeader() interface{} -} - -// HeaderContext provides the subContext interface for managing headers -type HeaderContext struct { - header interface{} -} - -// FullHeader returns the header as an interface -func (c *HeaderContext) FullHeader() interface{} { - return c.header -} - -func stripNamePrefix(ss []string) []string { - sss := make([]string, len(ss)) - for i, s := range ss { - sss[i] = s[1:] - } - - return sss -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go deleted file mode 100644 index da42039dc..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package formatter - -import ( - "reflect" - "strings" - "testing" -) - -func compareMultipleValues(t *testing.T, value, expected string) { - // comma-separated values means probably a map input, which won't - // be guaranteed to have the same order as our expected value - // We'll create maps and use reflect.DeepEquals to check instead: - entriesMap := make(map[string]string) - expMap := make(map[string]string) - entries := strings.Split(value, ",") - expectedEntries := strings.Split(expected, ",") - for _, entry := range entries { - keyval := strings.Split(entry, "=") - entriesMap[keyval[0]] = keyval[1] - } - for _, expected := range expectedEntries { - keyval := strings.Split(expected, "=") - expMap[keyval[0]] = keyval[1] - } - if !reflect.DeepEqual(expMap, entriesMap) { - t.Fatalf("Expected entries: %v, got: %v", expected, value) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/diff.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/diff.go deleted file mode 100644 index 9b4681934..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/diff.go +++ /dev/null @@ -1,72 +0,0 @@ -package formatter - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/pkg/archive" -) - -const ( - defaultDiffTableFormat = "table {{.Type}}\t{{.Path}}" - - changeTypeHeader = "CHANGE TYPE" - pathHeader = "PATH" -) - -// NewDiffFormat returns a format for use with a diff Context -func NewDiffFormat(source string) Format { - switch source { - case TableFormatKey: - return defaultDiffTableFormat - } - return Format(source) -} - -// DiffWrite writes formatted diff using the Context -func DiffWrite(ctx Context, changes []container.ContainerChangeResponseItem) error { - - render := func(format func(subContext subContext) error) error { - for _, change := range changes { - if err := format(&diffContext{c: change}); err != nil { - return err - } - } - return nil - } - return ctx.Write(newDiffContext(), render) -} - -type diffContext struct { - HeaderContext - c container.ContainerChangeResponseItem -} - -func newDiffContext() *diffContext { - diffCtx := diffContext{} - diffCtx.header = map[string]string{ - "Type": changeTypeHeader, - "Path": pathHeader, - } - return &diffCtx -} - -func (d *diffContext) MarshalJSON() ([]byte, error) { - return marshalJSON(d) -} - -func (d *diffContext) Type() string { - var kind string - switch d.c.Kind { - case archive.ChangeModify: - kind = "C" - case archive.ChangeAdd: - kind = "A" - case archive.ChangeDelete: - kind = "D" - } - return kind - -} - -func (d *diffContext) Path() string { - return d.c.Path -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/diff_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/diff_test.go deleted file mode 100644 index 52080354f..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/diff_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package formatter - -import ( - "bytes" - "testing" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestDiffContextFormatWrite(t *testing.T) { - // Check default output format (verbose and non-verbose mode) for table headers - cases := []struct { - context Context - expected string - }{ - { - Context{Format: NewDiffFormat("table")}, - `CHANGE TYPE PATH -C /var/log/app.log -A /usr/app/app.js -D /usr/app/old_app.js -`, - }, - { - Context{Format: NewDiffFormat("table {{.Path}}")}, - `PATH -/var/log/app.log -/usr/app/app.js -/usr/app/old_app.js -`, - }, - { - Context{Format: NewDiffFormat("{{.Type}}: {{.Path}}")}, - `C: /var/log/app.log -A: /usr/app/app.js -D: /usr/app/old_app.js -`, - }, - } - - diffs := []container.ContainerChangeResponseItem{ - {archive.ChangeModify, "/var/log/app.log"}, - {archive.ChangeAdd, "/usr/app/app.js"}, - {archive.ChangeDelete, "/usr/app/old_app.js"}, - } - - for _, testcase := range cases { - out := bytes.NewBufferString("") - testcase.context.Output = out - err := DiffWrite(testcase.context, diffs) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go deleted file mode 100644 index 7170411e1..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go +++ /dev/null @@ -1,339 +0,0 @@ -package formatter - -import ( - "bytes" - "fmt" - "strings" - "text/template" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - units "github.com/docker/go-units" -) - -const ( - defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}" - defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}" - defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}" - defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" - - typeHeader = "TYPE" - totalHeader = "TOTAL" - activeHeader = "ACTIVE" - reclaimableHeader = "RECLAIMABLE" - containersHeader = "CONTAINERS" - sharedSizeHeader = "SHARED SIZE" - uniqueSizeHeader = "UNIQUE SiZE" -) - -// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct. -type DiskUsageContext struct { - Context - Verbose bool - LayersSize int64 - Images []*types.ImageSummary - Containers []*types.Container - Volumes []*types.Volume -} - -func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) { - ctx.buffer = bytes.NewBufferString("") - ctx.header = "" - ctx.Format = Format(format) - ctx.preFormat() - - return ctx.parseFormat() -} - -func (ctx *DiskUsageContext) Write() { - if ctx.Verbose == false { - ctx.buffer = bytes.NewBufferString("") - ctx.Format = defaultDiskUsageTableFormat - ctx.preFormat() - - tmpl, err := ctx.parseFormat() - if err != nil { - return - } - - err = ctx.contextFormat(tmpl, &diskUsageImagesContext{ - totalSize: ctx.LayersSize, - images: ctx.Images, - }) - if err != nil { - return - } - err = ctx.contextFormat(tmpl, &diskUsageContainersContext{ - containers: ctx.Containers, - }) - if err != nil { - return - } - - err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{ - volumes: ctx.Volumes, - }) - if err != nil { - return - } - - diskUsageContainersCtx := diskUsageContainersContext{containers: []*types.Container{}} - diskUsageContainersCtx.header = map[string]string{ - "Type": typeHeader, - "TotalCount": totalHeader, - "Active": activeHeader, - "Size": sizeHeader, - "Reclaimable": reclaimableHeader, - } - ctx.postFormat(tmpl, &diskUsageContainersCtx) - - return - } - - // First images - tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat) - if err != nil { - return - } - - ctx.Output.Write([]byte("Images space usage:\n\n")) - for _, i := range ctx.Images { - repo := "" - tag := "" - if len(i.RepoTags) > 0 && !isDangling(*i) { - // Only show the first tag - ref, err := reference.ParseNormalizedNamed(i.RepoTags[0]) - if err != nil { - continue - } - if nt, ok := ref.(reference.NamedTagged); ok { - repo = reference.FamiliarName(ref) - tag = nt.Tag() - } - } - - err = ctx.contextFormat(tmpl, &imageContext{ - repo: repo, - tag: tag, - trunc: true, - i: *i, - }) - if err != nil { - return - } - } - ctx.postFormat(tmpl, newImageContext()) - - // Now containers - ctx.Output.Write([]byte("\nContainers space usage:\n\n")) - tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat) - if err != nil { - return - } - for _, c := range ctx.Containers { - // Don't display the virtual size - c.SizeRootFs = 0 - err = ctx.contextFormat(tmpl, &containerContext{ - trunc: true, - c: *c, - }) - if err != nil { - return - } - } - ctx.postFormat(tmpl, newContainerContext()) - - // And volumes - ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n")) - tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat) - if err != nil { - return - } - for _, v := range ctx.Volumes { - err = ctx.contextFormat(tmpl, &volumeContext{ - v: *v, - }) - if err != nil { - return - } - } - ctx.postFormat(tmpl, newVolumeContext()) -} - -type diskUsageImagesContext struct { - HeaderContext - totalSize int64 - images []*types.ImageSummary -} - -func (c *diskUsageImagesContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *diskUsageImagesContext) Type() string { - return "Images" -} - -func (c *diskUsageImagesContext) TotalCount() string { - return fmt.Sprintf("%d", len(c.images)) -} - -func (c *diskUsageImagesContext) Active() string { - used := 0 - for _, i := range c.images { - if i.Containers > 0 { - used++ - } - } - - return fmt.Sprintf("%d", used) -} - -func (c *diskUsageImagesContext) Size() string { - return units.HumanSize(float64(c.totalSize)) - -} - -func (c *diskUsageImagesContext) Reclaimable() string { - var used int64 - - for _, i := range c.images { - if i.Containers != 0 { - if i.VirtualSize == -1 || i.SharedSize == -1 { - continue - } - used += i.VirtualSize - i.SharedSize - } - } - - reclaimable := c.totalSize - used - if c.totalSize > 0 { - return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize) - } - return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) -} - -type diskUsageContainersContext struct { - HeaderContext - verbose bool - containers []*types.Container -} - -func (c *diskUsageContainersContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *diskUsageContainersContext) Type() string { - return "Containers" -} - -func (c *diskUsageContainersContext) TotalCount() string { - return fmt.Sprintf("%d", len(c.containers)) -} - -func (c *diskUsageContainersContext) isActive(container types.Container) bool { - return strings.Contains(container.State, "running") || - strings.Contains(container.State, "paused") || - strings.Contains(container.State, "restarting") -} - -func (c *diskUsageContainersContext) Active() string { - used := 0 - for _, container := range c.containers { - if c.isActive(*container) { - used++ - } - } - - return fmt.Sprintf("%d", used) -} - -func (c *diskUsageContainersContext) Size() string { - var size int64 - - for _, container := range c.containers { - size += container.SizeRw - } - - return units.HumanSize(float64(size)) -} - -func (c *diskUsageContainersContext) Reclaimable() string { - var reclaimable int64 - var totalSize int64 - - for _, container := range c.containers { - if !c.isActive(*container) { - reclaimable += container.SizeRw - } - totalSize += container.SizeRw - } - - if totalSize > 0 { - return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) - } - - return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) -} - -type diskUsageVolumesContext struct { - HeaderContext - verbose bool - volumes []*types.Volume -} - -func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *diskUsageVolumesContext) Type() string { - return "Local Volumes" -} - -func (c *diskUsageVolumesContext) TotalCount() string { - return fmt.Sprintf("%d", len(c.volumes)) -} - -func (c *diskUsageVolumesContext) Active() string { - - used := 0 - for _, v := range c.volumes { - if v.UsageData.RefCount > 0 { - used++ - } - } - - return fmt.Sprintf("%d", used) -} - -func (c *diskUsageVolumesContext) Size() string { - var size int64 - - for _, v := range c.volumes { - if v.UsageData.Size != -1 { - size += v.UsageData.Size - } - } - - return units.HumanSize(float64(size)) -} - -func (c *diskUsageVolumesContext) Reclaimable() string { - var reclaimable int64 - var totalSize int64 - - for _, v := range c.volumes { - if v.UsageData.Size != -1 { - if v.UsageData.RefCount == 0 { - reclaimable += v.UsageData.Size - } - totalSize += v.UsageData.Size - } - } - - if totalSize > 0 { - return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) - } - - return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/disk_usage_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/disk_usage_test.go deleted file mode 100644 index 318e1692b..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/disk_usage_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package formatter - -import ( - "bytes" - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestDiskUsageContextFormatWrite(t *testing.T) { - // Check default output format (verbose and non-verbose mode) for table headers - cases := []struct { - context DiskUsageContext - expected string - }{ - { - DiskUsageContext{Verbose: false}, - `TYPE TOTAL ACTIVE SIZE RECLAIMABLE -Images 0 0 0B 0B -Containers 0 0 0B 0B -Local Volumes 0 0 0B 0B -`, - }, - { - DiskUsageContext{Verbose: true}, - `Images space usage: - -REPOSITORY TAG IMAGE ID CREATED ago SIZE SHARED SIZE UNIQUE SiZE CONTAINERS - -Containers space usage: - -CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED ago STATUS NAMES - -Local Volumes space usage: - -VOLUME NAME LINKS SIZE -`, - }, - } - - for _, testcase := range cases { - out := bytes.NewBufferString("") - testcase.context.Output = out - testcase.context.Write() - assert.Equal(t, out.String(), testcase.expected) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/formatter.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/formatter.go deleted file mode 100644 index 3f07aee96..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/formatter.go +++ /dev/null @@ -1,119 +0,0 @@ -package formatter - -import ( - "bytes" - "io" - "strings" - "text/tabwriter" - "text/template" - - "github.com/docker/docker/pkg/templates" - "github.com/pkg/errors" -) - -// Format keys used to specify certain kinds of output formats -const ( - TableFormatKey = "table" - RawFormatKey = "raw" - PrettyFormatKey = "pretty" - - defaultQuietFormat = "{{.ID}}" -) - -// Format is the format string rendered using the Context -type Format string - -// IsTable returns true if the format is a table-type format -func (f Format) IsTable() bool { - return strings.HasPrefix(string(f), TableFormatKey) -} - -// Contains returns true if the format contains the substring -func (f Format) Contains(sub string) bool { - return strings.Contains(string(f), sub) -} - -// Context contains information required by the formatter to print the output as desired. -type Context struct { - // Output is the output stream to which the formatted string is written. - Output io.Writer - // Format is used to choose raw, table or custom format for the output. - Format Format - // Trunc when set to true will truncate the output of certain fields such as Container ID. - Trunc bool - - // internal element - finalFormat string - header interface{} - buffer *bytes.Buffer -} - -func (c *Context) preFormat() { - c.finalFormat = string(c.Format) - - // TODO: handle this in the Format type - if c.Format.IsTable() { - c.finalFormat = c.finalFormat[len(TableFormatKey):] - } - - c.finalFormat = strings.Trim(c.finalFormat, " ") - r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") - c.finalFormat = r.Replace(c.finalFormat) -} - -func (c *Context) parseFormat() (*template.Template, error) { - tmpl, err := templates.Parse(c.finalFormat) - if err != nil { - return tmpl, errors.Errorf("Template parsing error: %v\n", err) - } - return tmpl, err -} - -func (c *Context) postFormat(tmpl *template.Template, subContext subContext) { - if c.Format.IsTable() { - t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) - buffer := bytes.NewBufferString("") - tmpl.Funcs(templates.HeaderFunctions).Execute(buffer, subContext.FullHeader()) - buffer.WriteTo(t) - t.Write([]byte("\n")) - c.buffer.WriteTo(t) - t.Flush() - } else { - c.buffer.WriteTo(c.Output) - } -} - -func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { - if err := tmpl.Execute(c.buffer, subContext); err != nil { - return errors.Errorf("Template parsing error: %v\n", err) - } - if c.Format.IsTable() && c.header != nil { - c.header = subContext.FullHeader() - } - c.buffer.WriteString("\n") - return nil -} - -// SubFormat is a function type accepted by Write() -type SubFormat func(func(subContext) error) error - -// Write the template to the buffer using this Context -func (c *Context) Write(sub subContext, f SubFormat) error { - c.buffer = bytes.NewBufferString("") - c.preFormat() - - tmpl, err := c.parseFormat() - if err != nil { - return err - } - - subFormat := func(subContext subContext) error { - return c.contextFormat(tmpl, subContext) - } - if err := f(subFormat); err != nil { - return err - } - - c.postFormat(tmpl, sub) - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/image.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/image.go deleted file mode 100644 index 3aae34ea1..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/image.go +++ /dev/null @@ -1,272 +0,0 @@ -package formatter - -import ( - "fmt" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - units "github.com/docker/go-units" -) - -const ( - defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}" - defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}" - - imageIDHeader = "IMAGE ID" - repositoryHeader = "REPOSITORY" - tagHeader = "TAG" - digestHeader = "DIGEST" -) - -// ImageContext contains image specific information required by the formatter, encapsulate a Context struct. -type ImageContext struct { - Context - Digest bool -} - -func isDangling(image types.ImageSummary) bool { - return len(image.RepoTags) == 1 && image.RepoTags[0] == ":" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "@" -} - -// NewImageFormat returns a format for rendering an ImageContext -func NewImageFormat(source string, quiet bool, digest bool) Format { - switch source { - case TableFormatKey: - switch { - case quiet: - return defaultQuietFormat - case digest: - return defaultImageTableFormatWithDigest - default: - return defaultImageTableFormat - } - case RawFormatKey: - switch { - case quiet: - return `image_id: {{.ID}}` - case digest: - return `repository: {{ .Repository }} -tag: {{.Tag}} -digest: {{.Digest}} -image_id: {{.ID}} -created_at: {{.CreatedAt}} -virtual_size: {{.Size}} -` - default: - return `repository: {{ .Repository }} -tag: {{.Tag}} -image_id: {{.ID}} -created_at: {{.CreatedAt}} -virtual_size: {{.Size}} -` - } - } - - format := Format(source) - if format.IsTable() && digest && !format.Contains("{{.Digest}}") { - format += "\t{{.Digest}}" - } - return format -} - -// ImageWrite writes the formatter images using the ImageContext -func ImageWrite(ctx ImageContext, images []types.ImageSummary) error { - render := func(format func(subContext subContext) error) error { - return imageFormat(ctx, images, format) - } - return ctx.Write(newImageContext(), render) -} - -func imageFormat(ctx ImageContext, images []types.ImageSummary, format func(subContext subContext) error) error { - for _, image := range images { - images := []*imageContext{} - if isDangling(image) { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: "", - tag: "", - digest: "", - }) - } else { - repoTags := map[string][]string{} - repoDigests := map[string][]string{} - - for _, refString := range image.RepoTags { - ref, err := reference.ParseNormalizedNamed(refString) - if err != nil { - continue - } - if nt, ok := ref.(reference.NamedTagged); ok { - familiarRef := reference.FamiliarName(ref) - repoTags[familiarRef] = append(repoTags[familiarRef], nt.Tag()) - } - } - for _, refString := range image.RepoDigests { - ref, err := reference.ParseNormalizedNamed(refString) - if err != nil { - continue - } - if c, ok := ref.(reference.Canonical); ok { - familiarRef := reference.FamiliarName(ref) - repoDigests[familiarRef] = append(repoDigests[familiarRef], c.Digest().String()) - } - } - - for repo, tags := range repoTags { - digests := repoDigests[repo] - - // Do not display digests as their own row - delete(repoDigests, repo) - - if !ctx.Digest { - // Ignore digest references, just show tag once - digests = nil - } - - for _, tag := range tags { - if len(digests) == 0 { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: tag, - digest: "", - }) - continue - } - // Display the digests for each tag - for _, dgst := range digests { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: tag, - digest: dgst, - }) - } - - } - } - - // Show rows for remaining digest only references - for repo, digests := range repoDigests { - // If digests are displayed, show row per digest - if ctx.Digest { - for _, dgst := range digests { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: "", - digest: dgst, - }) - } - } else { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: "", - }) - } - } - } - for _, imageCtx := range images { - if err := format(imageCtx); err != nil { - return err - } - } - } - return nil -} - -type imageContext struct { - HeaderContext - trunc bool - i types.ImageSummary - repo string - tag string - digest string -} - -func newImageContext() *imageContext { - imageCtx := imageContext{} - imageCtx.header = map[string]string{ - "ID": imageIDHeader, - "Repository": repositoryHeader, - "Tag": tagHeader, - "Digest": digestHeader, - "CreatedSince": createdSinceHeader, - "CreatedAt": createdAtHeader, - "Size": sizeHeader, - "Containers": containersHeader, - "VirtualSize": sizeHeader, - "SharedSize": sharedSizeHeader, - "UniqueSize": uniqueSizeHeader, - } - return &imageCtx -} - -func (c *imageContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *imageContext) ID() string { - if c.trunc { - return stringid.TruncateID(c.i.ID) - } - return c.i.ID -} - -func (c *imageContext) Repository() string { - return c.repo -} - -func (c *imageContext) Tag() string { - return c.tag -} - -func (c *imageContext) Digest() string { - return c.digest -} - -func (c *imageContext) CreatedSince() string { - createdAt := time.Unix(int64(c.i.Created), 0) - return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago" -} - -func (c *imageContext) CreatedAt() string { - return time.Unix(int64(c.i.Created), 0).String() -} - -func (c *imageContext) Size() string { - return units.HumanSizeWithPrecision(float64(c.i.Size), 3) -} - -func (c *imageContext) Containers() string { - if c.i.Containers == -1 { - return "N/A" - } - return fmt.Sprintf("%d", c.i.Containers) -} - -func (c *imageContext) VirtualSize() string { - return units.HumanSize(float64(c.i.VirtualSize)) -} - -func (c *imageContext) SharedSize() string { - if c.i.SharedSize == -1 { - return "N/A" - } - return units.HumanSize(float64(c.i.SharedSize)) -} - -func (c *imageContext) UniqueSize() string { - if c.i.VirtualSize == -1 || c.i.SharedSize == -1 { - return "N/A" - } - return units.HumanSize(float64(c.i.VirtualSize - c.i.SharedSize)) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/image_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/image_test.go deleted file mode 100644 index e7c15dbf5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/image_test.go +++ /dev/null @@ -1,327 +0,0 @@ -package formatter - -import ( - "bytes" - "fmt" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestImageContext(t *testing.T) { - imageID := stringid.GenerateRandomID() - unix := time.Now().Unix() - - var ctx imageContext - cases := []struct { - imageCtx imageContext - expValue string - call func() string - }{ - {imageContext{ - i: types.ImageSummary{ID: imageID}, - trunc: true, - }, stringid.TruncateID(imageID), ctx.ID}, - {imageContext{ - i: types.ImageSummary{ID: imageID}, - trunc: false, - }, imageID, ctx.ID}, - {imageContext{ - i: types.ImageSummary{Size: 10, VirtualSize: 10}, - trunc: true, - }, "10B", ctx.Size}, - {imageContext{ - i: types.ImageSummary{Created: unix}, - trunc: true, - }, time.Unix(unix, 0).String(), ctx.CreatedAt}, - // FIXME - // {imageContext{ - // i: types.ImageSummary{Created: unix}, - // trunc: true, - // }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince}, - {imageContext{ - i: types.ImageSummary{}, - repo: "busybox", - }, "busybox", ctx.Repository}, - {imageContext{ - i: types.ImageSummary{}, - tag: "latest", - }, "latest", ctx.Tag}, - {imageContext{ - i: types.ImageSummary{}, - digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", - }, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", ctx.Digest}, - } - - for _, c := range cases { - ctx = c.imageCtx - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - } -} - -func TestImageContextWrite(t *testing.T) { - unixTime := time.Now().AddDate(0, 0, -1).Unix() - expectedTime := time.Unix(unixTime, 0).String() - - cases := []struct { - context ImageContext - expected string - }{ - // Errors - { - ImageContext{ - Context: Context{ - Format: "{{InvalidFunction}}", - }, - }, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - ImageContext{ - Context: Context{ - Format: "{{nil}}", - }, - }, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table Format - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table", false, false), - }, - }, - `REPOSITORY TAG IMAGE ID CREATED SIZE -image tag1 imageID1 24 hours ago 0B -image tag2 imageID2 24 hours ago 0B - imageID3 24 hours ago 0B -`, - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table {{.Repository}}", false, false), - }, - }, - "REPOSITORY\nimage\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table {{.Repository}}", false, true), - }, - Digest: true, - }, - `REPOSITORY DIGEST -image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf -image - -`, - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table {{.Repository}}", true, false), - }, - }, - "REPOSITORY\nimage\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table", true, false), - }, - }, - "imageID1\nimageID2\nimageID3\n", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table", false, true), - }, - Digest: true, - }, - `REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE -image tag1 sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0B -image tag2 imageID2 24 hours ago 0B - imageID3 24 hours ago 0B -`, - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table", true, true), - }, - Digest: true, - }, - "imageID1\nimageID2\nimageID3\n", - }, - // Raw Format - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("raw", false, false), - }, - }, - fmt.Sprintf(`repository: image -tag: tag1 -image_id: imageID1 -created_at: %s -virtual_size: 0B - -repository: image -tag: tag2 -image_id: imageID2 -created_at: %s -virtual_size: 0B - -repository: -tag: -image_id: imageID3 -created_at: %s -virtual_size: 0B - -`, expectedTime, expectedTime, expectedTime), - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("raw", false, true), - }, - Digest: true, - }, - fmt.Sprintf(`repository: image -tag: tag1 -digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf -image_id: imageID1 -created_at: %s -virtual_size: 0B - -repository: image -tag: tag2 -digest: -image_id: imageID2 -created_at: %s -virtual_size: 0B - -repository: -tag: -digest: -image_id: imageID3 -created_at: %s -virtual_size: 0B - -`, expectedTime, expectedTime, expectedTime), - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("raw", true, false), - }, - }, - `image_id: imageID1 -image_id: imageID2 -image_id: imageID3 -`, - }, - // Custom Format - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("{{.Repository}}", false, false), - }, - }, - "image\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("{{.Repository}}", false, true), - }, - Digest: true, - }, - "image\nimage\n\n", - }, - } - - for _, testcase := range cases { - images := []types.ImageSummary{ - {ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime}, - {ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime}, - {ID: "imageID3", RepoTags: []string{":"}, RepoDigests: []string{"@"}, Created: unixTime}, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := ImageWrite(testcase.context, images) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestImageContextWriteWithNoImage(t *testing.T) { - out := bytes.NewBufferString("") - images := []types.ImageSummary{} - - contexts := []struct { - context ImageContext - expected string - }{ - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("{{.Repository}}", false, false), - Output: out, - }, - }, - "", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table {{.Repository}}", false, false), - Output: out, - }, - }, - "REPOSITORY\n", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("{{.Repository}}", false, true), - Output: out, - }, - }, - "", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table {{.Repository}}", false, true), - Output: out, - }, - }, - "REPOSITORY DIGEST\n", - }, - } - - for _, context := range contexts { - ImageWrite(context.context, images) - assert.Equal(t, out.String(), context.expected) - // Clean buffer - out.Reset() - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/network.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/network.go deleted file mode 100644 index 4aeebd175..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/network.go +++ /dev/null @@ -1,129 +0,0 @@ -package formatter - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" -) - -const ( - defaultNetworkTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.Scope}}" - - networkIDHeader = "NETWORK ID" - ipv6Header = "IPV6" - internalHeader = "INTERNAL" -) - -// NewNetworkFormat returns a Format for rendering using a network Context -func NewNetworkFormat(source string, quiet bool) Format { - switch source { - case TableFormatKey: - if quiet { - return defaultQuietFormat - } - return defaultNetworkTableFormat - case RawFormatKey: - if quiet { - return `network_id: {{.ID}}` - } - return `network_id: {{.ID}}\nname: {{.Name}}\ndriver: {{.Driver}}\nscope: {{.Scope}}\n` - } - return Format(source) -} - -// NetworkWrite writes the context -func NetworkWrite(ctx Context, networks []types.NetworkResource) error { - render := func(format func(subContext subContext) error) error { - for _, network := range networks { - networkCtx := &networkContext{trunc: ctx.Trunc, n: network} - if err := format(networkCtx); err != nil { - return err - } - } - return nil - } - networkCtx := networkContext{} - networkCtx.header = networkHeaderContext{ - "ID": networkIDHeader, - "Name": nameHeader, - "Driver": driverHeader, - "Scope": scopeHeader, - "IPv6": ipv6Header, - "Internal": internalHeader, - "Labels": labelsHeader, - "CreatedAt": createdAtHeader, - } - return ctx.Write(&networkCtx, render) -} - -type networkHeaderContext map[string]string - -func (c networkHeaderContext) Label(name string) string { - n := strings.Split(name, ".") - r := strings.NewReplacer("-", " ", "_", " ") - h := r.Replace(n[len(n)-1]) - - return h -} - -type networkContext struct { - HeaderContext - trunc bool - n types.NetworkResource -} - -func (c *networkContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *networkContext) ID() string { - if c.trunc { - return stringid.TruncateID(c.n.ID) - } - return c.n.ID -} - -func (c *networkContext) Name() string { - return c.n.Name -} - -func (c *networkContext) Driver() string { - return c.n.Driver -} - -func (c *networkContext) Scope() string { - return c.n.Scope -} - -func (c *networkContext) IPv6() string { - return fmt.Sprintf("%v", c.n.EnableIPv6) -} - -func (c *networkContext) Internal() string { - return fmt.Sprintf("%v", c.n.Internal) -} - -func (c *networkContext) Labels() string { - if c.n.Labels == nil { - return "" - } - - var joinLabels []string - for k, v := range c.n.Labels { - joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(joinLabels, ",") -} - -func (c *networkContext) Label(name string) string { - if c.n.Labels == nil { - return "" - } - return c.n.Labels[name] -} - -func (c *networkContext) CreatedAt() string { - return c.n.Created.String() -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/network_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/network_test.go deleted file mode 100644 index 24bf46d25..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/network_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package formatter - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestNetworkContext(t *testing.T) { - networkID := stringid.GenerateRandomID() - - var ctx networkContext - cases := []struct { - networkCtx networkContext - expValue string - call func() string - }{ - {networkContext{ - n: types.NetworkResource{ID: networkID}, - trunc: false, - }, networkID, ctx.ID}, - {networkContext{ - n: types.NetworkResource{ID: networkID}, - trunc: true, - }, stringid.TruncateID(networkID), ctx.ID}, - {networkContext{ - n: types.NetworkResource{Name: "network_name"}, - }, "network_name", ctx.Name}, - {networkContext{ - n: types.NetworkResource{Driver: "driver_name"}, - }, "driver_name", ctx.Driver}, - {networkContext{ - n: types.NetworkResource{EnableIPv6: true}, - }, "true", ctx.IPv6}, - {networkContext{ - n: types.NetworkResource{EnableIPv6: false}, - }, "false", ctx.IPv6}, - {networkContext{ - n: types.NetworkResource{Internal: true}, - }, "true", ctx.Internal}, - {networkContext{ - n: types.NetworkResource{Internal: false}, - }, "false", ctx.Internal}, - {networkContext{ - n: types.NetworkResource{}, - }, "", ctx.Labels}, - {networkContext{ - n: types.NetworkResource{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, - }, "label1=value1,label2=value2", ctx.Labels}, - } - - for _, c := range cases { - ctx = c.networkCtx - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - } -} - -func TestNetworkContextWrite(t *testing.T) { - cases := []struct { - context Context - expected string - }{ - - // Errors - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table format - { - Context{Format: NewNetworkFormat("table", false)}, - `NETWORK ID NAME DRIVER SCOPE -networkID1 foobar_baz foo local -networkID2 foobar_bar bar local -`, - }, - { - Context{Format: NewNetworkFormat("table", true)}, - `networkID1 -networkID2 -`, - }, - { - Context{Format: NewNetworkFormat("table {{.Name}}", false)}, - `NAME -foobar_baz -foobar_bar -`, - }, - { - Context{Format: NewNetworkFormat("table {{.Name}}", true)}, - `NAME -foobar_baz -foobar_bar -`, - }, - // Raw Format - { - Context{Format: NewNetworkFormat("raw", false)}, - `network_id: networkID1 -name: foobar_baz -driver: foo -scope: local - -network_id: networkID2 -name: foobar_bar -driver: bar -scope: local - -`, - }, - { - Context{Format: NewNetworkFormat("raw", true)}, - `network_id: networkID1 -network_id: networkID2 -`, - }, - // Custom Format - { - Context{Format: NewNetworkFormat("{{.Name}}", false)}, - `foobar_baz -foobar_bar -`, - }, - // Custom Format with CreatedAt - { - Context{Format: NewNetworkFormat("{{.Name}} {{.CreatedAt}}", false)}, - `foobar_baz 2016-01-01 00:00:00 +0000 UTC -foobar_bar 2017-01-01 00:00:00 +0000 UTC -`, - }, - } - - timestamp1, _ := time.Parse("2006-01-02", "2016-01-01") - timestamp2, _ := time.Parse("2006-01-02", "2017-01-01") - - for _, testcase := range cases { - networks := []types.NetworkResource{ - {ID: "networkID1", Name: "foobar_baz", Driver: "foo", Scope: "local", Created: timestamp1}, - {ID: "networkID2", Name: "foobar_bar", Driver: "bar", Scope: "local", Created: timestamp2}, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := NetworkWrite(testcase.context, networks) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestNetworkContextWriteJSON(t *testing.T) { - networks := []types.NetworkResource{ - {ID: "networkID1", Name: "foobar_baz"}, - {ID: "networkID2", Name: "foobar_bar"}, - } - expectedJSONs := []map[string]interface{}{ - {"Driver": "", "ID": "networkID1", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_baz", "Scope": "", "CreatedAt": "0001-01-01 00:00:00 +0000 UTC"}, - {"Driver": "", "ID": "networkID2", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_bar", "Scope": "", "CreatedAt": "0001-01-01 00:00:00 +0000 UTC"}, - } - - out := bytes.NewBufferString("") - err := NetworkWrite(Context{Format: "{{json .}}", Output: out}, networks) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var m map[string]interface{} - if err := json.Unmarshal([]byte(line), &m); err != nil { - t.Fatal(err) - } - assert.DeepEqual(t, m, expectedJSONs[i]) - } -} - -func TestNetworkContextWriteJSONField(t *testing.T) { - networks := []types.NetworkResource{ - {ID: "networkID1", Name: "foobar_baz"}, - {ID: "networkID2", Name: "foobar_bar"}, - } - out := bytes.NewBufferString("") - err := NetworkWrite(Context{Format: "{{json .ID}}", Output: out}, networks) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var s string - if err := json.Unmarshal([]byte(line), &s); err != nil { - t.Fatal(err) - } - assert.Equal(t, s, networks[i].ID) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/node.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/node.go deleted file mode 100644 index 4d7f293f5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/node.go +++ /dev/null @@ -1,292 +0,0 @@ -package formatter - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - units "github.com/docker/go-units" -) - -const ( - defaultNodeTableFormat = "table {{.ID}} {{if .Self}}*{{else}} {{ end }}\t{{.Hostname}}\t{{.Status}}\t{{.Availability}}\t{{.ManagerStatus}}" - nodeInspectPrettyTemplate Format = `ID: {{.ID}} -{{- if .Name }} -Name: {{.Name}} -{{- end }} -{{- if .Labels }} -Labels: -{{- range $k, $v := .Labels }} - - {{ $k }}{{if $v }}={{ $v }}{{ end }} -{{- end }}{{ end }} -Hostname: {{.Hostname}} -Joined at: {{.CreatedAt}} -Status: - State: {{.StatusState}} - {{- if .HasStatusMessage}} - Message: {{.StatusMessage}} - {{- end}} - Availability: {{.SpecAvailability}} - {{- if .Status.Addr}} - Address: {{.StatusAddr}} - {{- end}} -{{- if .HasManagerStatus}} -Manager Status: - Address: {{.ManagerStatusAddr}} - Raft Status: {{.ManagerStatusReachability}} - {{- if .IsManagerStatusLeader}} - Leader: Yes - {{- else}} - Leader: No - {{- end}} -{{- end}} -Platform: - Operating System: {{.PlatformOS}} - Architecture: {{.PlatformArchitecture}} -Resources: - CPUs: {{.ResourceNanoCPUs}} - Memory: {{.ResourceMemory}} -{{- if .HasEnginePlugins}} -Plugins: -{{- range $k, $v := .EnginePlugins }} - {{ $k }}:{{if $v }} {{ $v }}{{ end }} -{{- end }} -{{- end }} -Engine Version: {{.EngineVersion}} -{{- if .EngineLabels}} -Engine Labels: -{{- range $k, $v := .EngineLabels }} - - {{ $k }}{{if $v }}={{ $v }}{{ end }} -{{- end }}{{- end }} -` - nodeIDHeader = "ID" - selfHeader = "" - hostnameHeader = "HOSTNAME" - availabilityHeader = "AVAILABILITY" - managerStatusHeader = "MANAGER STATUS" -) - -// NewNodeFormat returns a Format for rendering using a node Context -func NewNodeFormat(source string, quiet bool) Format { - switch source { - case PrettyFormatKey: - return nodeInspectPrettyTemplate - case TableFormatKey: - if quiet { - return defaultQuietFormat - } - return defaultNodeTableFormat - case RawFormatKey: - if quiet { - return `node_id: {{.ID}}` - } - return `node_id: {{.ID}}\nhostname: {{.Hostname}}\nstatus: {{.Status}}\navailability: {{.Availability}}\nmanager_status: {{.ManagerStatus}}\n` - } - return Format(source) -} - -// NodeWrite writes the context -func NodeWrite(ctx Context, nodes []swarm.Node, info types.Info) error { - render := func(format func(subContext subContext) error) error { - for _, node := range nodes { - nodeCtx := &nodeContext{n: node, info: info} - if err := format(nodeCtx); err != nil { - return err - } - } - return nil - } - nodeCtx := nodeContext{} - nodeCtx.header = nodeHeaderContext{ - "ID": nodeIDHeader, - "Self": selfHeader, - "Hostname": hostnameHeader, - "Status": statusHeader, - "Availability": availabilityHeader, - "ManagerStatus": managerStatusHeader, - } - return ctx.Write(&nodeCtx, render) -} - -type nodeHeaderContext map[string]string - -type nodeContext struct { - HeaderContext - n swarm.Node - info types.Info -} - -func (c *nodeContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *nodeContext) ID() string { - return c.n.ID -} - -func (c *nodeContext) Self() bool { - return c.n.ID == c.info.Swarm.NodeID -} - -func (c *nodeContext) Hostname() string { - return c.n.Description.Hostname -} - -func (c *nodeContext) Status() string { - return command.PrettyPrint(string(c.n.Status.State)) -} - -func (c *nodeContext) Availability() string { - return command.PrettyPrint(string(c.n.Spec.Availability)) -} - -func (c *nodeContext) ManagerStatus() string { - reachability := "" - if c.n.ManagerStatus != nil { - if c.n.ManagerStatus.Leader { - reachability = "Leader" - } else { - reachability = string(c.n.ManagerStatus.Reachability) - } - } - return command.PrettyPrint(reachability) -} - -// NodeInspectWrite renders the context for a list of services -func NodeInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { - if ctx.Format != nodeInspectPrettyTemplate { - return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) - } - render := func(format func(subContext subContext) error) error { - for _, ref := range refs { - nodeI, _, err := getRef(ref) - if err != nil { - return err - } - node, ok := nodeI.(swarm.Node) - if !ok { - return fmt.Errorf("got wrong object to inspect :%v", ok) - } - if err := format(&nodeInspectContext{Node: node}); err != nil { - return err - } - } - return nil - } - return ctx.Write(&nodeInspectContext{}, render) -} - -type nodeInspectContext struct { - swarm.Node - subContext -} - -func (ctx *nodeInspectContext) ID() string { - return ctx.Node.ID -} - -func (ctx *nodeInspectContext) Name() string { - return ctx.Node.Spec.Name -} - -func (ctx *nodeInspectContext) Labels() map[string]string { - return ctx.Node.Spec.Labels -} - -func (ctx *nodeInspectContext) Hostname() string { - return ctx.Node.Description.Hostname -} - -func (ctx *nodeInspectContext) CreatedAt() string { - return command.PrettyPrint(ctx.Node.CreatedAt) -} - -func (ctx *nodeInspectContext) StatusState() string { - return command.PrettyPrint(ctx.Node.Status.State) -} - -func (ctx *nodeInspectContext) HasStatusMessage() bool { - return ctx.Node.Status.Message != "" -} - -func (ctx *nodeInspectContext) StatusMessage() string { - return command.PrettyPrint(ctx.Node.Status.Message) -} - -func (ctx *nodeInspectContext) SpecAvailability() string { - return command.PrettyPrint(ctx.Node.Spec.Availability) -} - -func (ctx *nodeInspectContext) HasStatusAddr() bool { - return ctx.Node.Status.Addr != "" -} - -func (ctx *nodeInspectContext) StatusAddr() string { - return ctx.Node.Status.Addr -} - -func (ctx *nodeInspectContext) HasManagerStatus() bool { - return ctx.Node.ManagerStatus != nil -} - -func (ctx *nodeInspectContext) ManagerStatusAddr() string { - return ctx.Node.ManagerStatus.Addr -} - -func (ctx *nodeInspectContext) ManagerStatusReachability() string { - return command.PrettyPrint(ctx.Node.ManagerStatus.Reachability) -} - -func (ctx *nodeInspectContext) IsManagerStatusLeader() bool { - return ctx.Node.ManagerStatus.Leader -} - -func (ctx *nodeInspectContext) PlatformOS() string { - return ctx.Node.Description.Platform.OS -} - -func (ctx *nodeInspectContext) PlatformArchitecture() string { - return ctx.Node.Description.Platform.Architecture -} - -func (ctx *nodeInspectContext) ResourceNanoCPUs() int { - if ctx.Node.Description.Resources.NanoCPUs == 0 { - return int(0) - } - return int(ctx.Node.Description.Resources.NanoCPUs) / 1e9 -} - -func (ctx *nodeInspectContext) ResourceMemory() string { - if ctx.Node.Description.Resources.MemoryBytes == 0 { - return "" - } - return units.BytesSize(float64(ctx.Node.Description.Resources.MemoryBytes)) -} - -func (ctx *nodeInspectContext) HasEnginePlugins() bool { - return len(ctx.Node.Description.Engine.Plugins) > 0 -} - -func (ctx *nodeInspectContext) EnginePlugins() map[string]string { - pluginMap := map[string][]string{} - for _, p := range ctx.Node.Description.Engine.Plugins { - pluginMap[p.Type] = append(pluginMap[p.Type], p.Name) - } - - pluginNamesByType := map[string]string{} - for k, v := range pluginMap { - pluginNamesByType[k] = strings.Join(v, ", ") - } - return pluginNamesByType -} - -func (ctx *nodeInspectContext) EngineLabels() map[string]string { - return ctx.Node.Description.Engine.Labels -} - -func (ctx *nodeInspectContext) EngineVersion() string { - return ctx.Node.Description.Engine.EngineVersion -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/node_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/node_test.go deleted file mode 100644 index 86f4979d3..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/node_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package formatter - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestNodeContext(t *testing.T) { - nodeID := stringid.GenerateRandomID() - - var ctx nodeContext - cases := []struct { - nodeCtx nodeContext - expValue string - call func() string - }{ - {nodeContext{ - n: swarm.Node{ID: nodeID}, - }, nodeID, ctx.ID}, - {nodeContext{ - n: swarm.Node{Description: swarm.NodeDescription{Hostname: "node_hostname"}}, - }, "node_hostname", ctx.Hostname}, - {nodeContext{ - n: swarm.Node{Status: swarm.NodeStatus{State: swarm.NodeState("foo")}}, - }, "Foo", ctx.Status}, - {nodeContext{ - n: swarm.Node{Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("drain")}}, - }, "Drain", ctx.Availability}, - {nodeContext{ - n: swarm.Node{ManagerStatus: &swarm.ManagerStatus{Leader: true}}, - }, "Leader", ctx.ManagerStatus}, - } - - for _, c := range cases { - ctx = c.nodeCtx - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - } -} - -func TestNodeContextWrite(t *testing.T) { - cases := []struct { - context Context - expected string - }{ - - // Errors - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table format - { - Context{Format: NewNodeFormat("table", false)}, - `ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -nodeID1 foobar_baz Foo Drain Leader -nodeID2 foobar_bar Bar Active Reachable -`, - }, - { - Context{Format: NewNodeFormat("table", true)}, - `nodeID1 -nodeID2 -`, - }, - { - Context{Format: NewNodeFormat("table {{.Hostname}}", false)}, - `HOSTNAME -foobar_baz -foobar_bar -`, - }, - { - Context{Format: NewNodeFormat("table {{.Hostname}}", true)}, - `HOSTNAME -foobar_baz -foobar_bar -`, - }, - // Raw Format - { - Context{Format: NewNodeFormat("raw", false)}, - `node_id: nodeID1 -hostname: foobar_baz -status: Foo -availability: Drain -manager_status: Leader - -node_id: nodeID2 -hostname: foobar_bar -status: Bar -availability: Active -manager_status: Reachable - -`, - }, - { - Context{Format: NewNodeFormat("raw", true)}, - `node_id: nodeID1 -node_id: nodeID2 -`, - }, - // Custom Format - { - Context{Format: NewNodeFormat("{{.Hostname}}", false)}, - `foobar_baz -foobar_bar -`, - }, - } - - for _, testcase := range cases { - nodes := []swarm.Node{ - {ID: "nodeID1", Description: swarm.NodeDescription{Hostname: "foobar_baz"}, Status: swarm.NodeStatus{State: swarm.NodeState("foo")}, Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("drain")}, ManagerStatus: &swarm.ManagerStatus{Leader: true}}, - {ID: "nodeID2", Description: swarm.NodeDescription{Hostname: "foobar_bar"}, Status: swarm.NodeStatus{State: swarm.NodeState("bar")}, Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("active")}, ManagerStatus: &swarm.ManagerStatus{Leader: false, Reachability: swarm.Reachability("Reachable")}}, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := NodeWrite(testcase.context, nodes, types.Info{}) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestNodeContextWriteJSON(t *testing.T) { - nodes := []swarm.Node{ - {ID: "nodeID1", Description: swarm.NodeDescription{Hostname: "foobar_baz"}}, - {ID: "nodeID2", Description: swarm.NodeDescription{Hostname: "foobar_bar"}}, - } - expectedJSONs := []map[string]interface{}{ - {"Availability": "", "Hostname": "foobar_baz", "ID": "nodeID1", "ManagerStatus": "", "Status": "", "Self": false}, - {"Availability": "", "Hostname": "foobar_bar", "ID": "nodeID2", "ManagerStatus": "", "Status": "", "Self": false}, - } - - out := bytes.NewBufferString("") - err := NodeWrite(Context{Format: "{{json .}}", Output: out}, nodes, types.Info{}) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var m map[string]interface{} - if err := json.Unmarshal([]byte(line), &m); err != nil { - t.Fatal(err) - } - assert.DeepEqual(t, m, expectedJSONs[i]) - } -} - -func TestNodeContextWriteJSONField(t *testing.T) { - nodes := []swarm.Node{ - {ID: "nodeID1", Description: swarm.NodeDescription{Hostname: "foobar_baz"}}, - {ID: "nodeID2", Description: swarm.NodeDescription{Hostname: "foobar_bar"}}, - } - out := bytes.NewBufferString("") - err := NodeWrite(Context{Format: "{{json .ID}}", Output: out}, nodes, types.Info{}) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var s string - if err := json.Unmarshal([]byte(line), &s); err != nil { - t.Fatal(err) - } - assert.Equal(t, s, nodes[i].ID) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/plugin.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/plugin.go deleted file mode 100644 index 2b71281a5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/plugin.go +++ /dev/null @@ -1,95 +0,0 @@ -package formatter - -import ( - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" -) - -const ( - defaultPluginTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Description}}\t{{.Enabled}}" - - pluginIDHeader = "ID" - descriptionHeader = "DESCRIPTION" - enabledHeader = "ENABLED" -) - -// NewPluginFormat returns a Format for rendering using a plugin Context -func NewPluginFormat(source string, quiet bool) Format { - switch source { - case TableFormatKey: - if quiet { - return defaultQuietFormat - } - return defaultPluginTableFormat - case RawFormatKey: - if quiet { - return `plugin_id: {{.ID}}` - } - return `plugin_id: {{.ID}}\nname: {{.Name}}\ndescription: {{.Description}}\nenabled: {{.Enabled}}\n` - } - return Format(source) -} - -// PluginWrite writes the context -func PluginWrite(ctx Context, plugins []*types.Plugin) error { - render := func(format func(subContext subContext) error) error { - for _, plugin := range plugins { - pluginCtx := &pluginContext{trunc: ctx.Trunc, p: *plugin} - if err := format(pluginCtx); err != nil { - return err - } - } - return nil - } - pluginCtx := pluginContext{} - pluginCtx.header = map[string]string{ - "ID": pluginIDHeader, - "Name": nameHeader, - "Description": descriptionHeader, - "Enabled": enabledHeader, - "PluginReference": imageHeader, - } - return ctx.Write(&pluginCtx, render) -} - -type pluginContext struct { - HeaderContext - trunc bool - p types.Plugin -} - -func (c *pluginContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *pluginContext) ID() string { - if c.trunc { - return stringid.TruncateID(c.p.ID) - } - return c.p.ID -} - -func (c *pluginContext) Name() string { - return c.p.Name -} - -func (c *pluginContext) Description() string { - desc := strings.Replace(c.p.Config.Description, "\n", "", -1) - desc = strings.Replace(desc, "\r", "", -1) - if c.trunc { - desc = stringutils.Ellipsis(desc, 45) - } - - return desc -} - -func (c *pluginContext) Enabled() bool { - return c.p.Enabled -} - -func (c *pluginContext) PluginReference() string { - return c.p.PluginReference -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/plugin_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/plugin_test.go deleted file mode 100644 index 3cc0af8a3..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/plugin_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package formatter - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestPluginContext(t *testing.T) { - pluginID := stringid.GenerateRandomID() - - var ctx pluginContext - cases := []struct { - pluginCtx pluginContext - expValue string - call func() string - }{ - {pluginContext{ - p: types.Plugin{ID: pluginID}, - trunc: false, - }, pluginID, ctx.ID}, - {pluginContext{ - p: types.Plugin{ID: pluginID}, - trunc: true, - }, stringid.TruncateID(pluginID), ctx.ID}, - {pluginContext{ - p: types.Plugin{Name: "plugin_name"}, - }, "plugin_name", ctx.Name}, - {pluginContext{ - p: types.Plugin{Config: types.PluginConfig{Description: "plugin_description"}}, - }, "plugin_description", ctx.Description}, - } - - for _, c := range cases { - ctx = c.pluginCtx - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - } -} - -func TestPluginContextWrite(t *testing.T) { - cases := []struct { - context Context - expected string - }{ - - // Errors - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table format - { - Context{Format: NewPluginFormat("table", false)}, - `ID NAME DESCRIPTION ENABLED -pluginID1 foobar_baz description 1 true -pluginID2 foobar_bar description 2 false -`, - }, - { - Context{Format: NewPluginFormat("table", true)}, - `pluginID1 -pluginID2 -`, - }, - { - Context{Format: NewPluginFormat("table {{.Name}}", false)}, - `NAME -foobar_baz -foobar_bar -`, - }, - { - Context{Format: NewPluginFormat("table {{.Name}}", true)}, - `NAME -foobar_baz -foobar_bar -`, - }, - // Raw Format - { - Context{Format: NewPluginFormat("raw", false)}, - `plugin_id: pluginID1 -name: foobar_baz -description: description 1 -enabled: true - -plugin_id: pluginID2 -name: foobar_bar -description: description 2 -enabled: false - -`, - }, - { - Context{Format: NewPluginFormat("raw", true)}, - `plugin_id: pluginID1 -plugin_id: pluginID2 -`, - }, - // Custom Format - { - Context{Format: NewPluginFormat("{{.Name}}", false)}, - `foobar_baz -foobar_bar -`, - }, - } - - for _, testcase := range cases { - plugins := []*types.Plugin{ - {ID: "pluginID1", Name: "foobar_baz", Config: types.PluginConfig{Description: "description 1"}, Enabled: true}, - {ID: "pluginID2", Name: "foobar_bar", Config: types.PluginConfig{Description: "description 2"}, Enabled: false}, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := PluginWrite(testcase.context, plugins) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestPluginContextWriteJSON(t *testing.T) { - plugins := []*types.Plugin{ - {ID: "pluginID1", Name: "foobar_baz"}, - {ID: "pluginID2", Name: "foobar_bar"}, - } - expectedJSONs := []map[string]interface{}{ - {"Description": "", "Enabled": false, "ID": "pluginID1", "Name": "foobar_baz", "PluginReference": ""}, - {"Description": "", "Enabled": false, "ID": "pluginID2", "Name": "foobar_bar", "PluginReference": ""}, - } - - out := bytes.NewBufferString("") - err := PluginWrite(Context{Format: "{{json .}}", Output: out}, plugins) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - var m map[string]interface{} - if err := json.Unmarshal([]byte(line), &m); err != nil { - t.Fatal(err) - } - assert.DeepEqual(t, m, expectedJSONs[i]) - } -} - -func TestPluginContextWriteJSONField(t *testing.T) { - plugins := []*types.Plugin{ - {ID: "pluginID1", Name: "foobar_baz"}, - {ID: "pluginID2", Name: "foobar_bar"}, - } - out := bytes.NewBufferString("") - err := PluginWrite(Context{Format: "{{json .ID}}", Output: out}, plugins) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - var s string - if err := json.Unmarshal([]byte(line), &s); err != nil { - t.Fatal(err) - } - assert.Equal(t, s, plugins[i].ID) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/reflect.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/reflect.go deleted file mode 100644 index fd59404d0..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/reflect.go +++ /dev/null @@ -1,66 +0,0 @@ -package formatter - -import ( - "encoding/json" - "reflect" - "unicode" - - "github.com/pkg/errors" -) - -func marshalJSON(x interface{}) ([]byte, error) { - m, err := marshalMap(x) - if err != nil { - return nil, err - } - return json.Marshal(m) -} - -// marshalMap marshals x to map[string]interface{} -func marshalMap(x interface{}) (map[string]interface{}, error) { - val := reflect.ValueOf(x) - if val.Kind() != reflect.Ptr { - return nil, errors.Errorf("expected a pointer to a struct, got %v", val.Kind()) - } - if val.IsNil() { - return nil, errors.Errorf("expected a pointer to a struct, got nil pointer") - } - valElem := val.Elem() - if valElem.Kind() != reflect.Struct { - return nil, errors.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind()) - } - typ := val.Type() - m := make(map[string]interface{}) - for i := 0; i < val.NumMethod(); i++ { - k, v, err := marshalForMethod(typ.Method(i), val.Method(i)) - if err != nil { - return nil, err - } - if k != "" { - m[k] = v - } - } - return m, nil -} - -var unmarshallableNames = map[string]struct{}{"FullHeader": {}} - -// marshalForMethod returns the map key and the map value for marshalling the method. -// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()") -func marshalForMethod(typ reflect.Method, val reflect.Value) (string, interface{}, error) { - if val.Kind() != reflect.Func { - return "", nil, errors.Errorf("expected func, got %v", val.Kind()) - } - name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut() - _, blackListed := unmarshallableNames[name] - // FIXME: In text/template, (numOut == 2) is marshallable, - // if the type of the second param is error. - marshallable := unicode.IsUpper(rune(name[0])) && !blackListed && - numIn == 0 && numOut == 1 - if !marshallable { - return "", nil, nil - } - result := val.Call(make([]reflect.Value, numIn)) - intf := result[0].Interface() - return name, intf, nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go deleted file mode 100644 index e547b1841..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package formatter - -import ( - "reflect" - "testing" -) - -type dummy struct { -} - -func (d *dummy) Func1() string { - return "Func1" -} - -func (d *dummy) func2() string { - return "func2(should not be marshalled)" -} - -func (d *dummy) Func3() (string, int) { - return "Func3(should not be marshalled)", -42 -} - -func (d *dummy) Func4() int { - return 4 -} - -type dummyType string - -func (d *dummy) Func5() dummyType { - return dummyType("Func5") -} - -func (d *dummy) FullHeader() string { - return "FullHeader(should not be marshalled)" -} - -var dummyExpected = map[string]interface{}{ - "Func1": "Func1", - "Func4": 4, - "Func5": dummyType("Func5"), -} - -func TestMarshalMap(t *testing.T) { - d := dummy{} - m, err := marshalMap(&d) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(dummyExpected, m) { - t.Fatalf("expected %+v, got %+v", - dummyExpected, m) - } -} - -func TestMarshalMapBad(t *testing.T) { - if _, err := marshalMap(nil); err == nil { - t.Fatal("expected an error (argument is nil)") - } - if _, err := marshalMap(dummy{}); err == nil { - t.Fatal("expected an error (argument is non-pointer)") - } - x := 42 - if _, err := marshalMap(&x); err == nil { - t.Fatal("expected an error (argument is a pointer to non-struct)") - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/secret.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/secret.go deleted file mode 100644 index 7ec6f9a62..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/secret.go +++ /dev/null @@ -1,101 +0,0 @@ -package formatter - -import ( - "fmt" - "strings" - "time" - - "github.com/docker/docker/api/types/swarm" - units "github.com/docker/go-units" -) - -const ( - defaultSecretTableFormat = "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}" - secretIDHeader = "ID" - secretNameHeader = "NAME" - secretCreatedHeader = "CREATED" - secretUpdatedHeader = "UPDATED" -) - -// NewSecretFormat returns a Format for rendering using a network Context -func NewSecretFormat(source string, quiet bool) Format { - switch source { - case TableFormatKey: - if quiet { - return defaultQuietFormat - } - return defaultSecretTableFormat - } - return Format(source) -} - -// SecretWrite writes the context -func SecretWrite(ctx Context, secrets []swarm.Secret) error { - render := func(format func(subContext subContext) error) error { - for _, secret := range secrets { - secretCtx := &secretContext{s: secret} - if err := format(secretCtx); err != nil { - return err - } - } - return nil - } - return ctx.Write(newSecretContext(), render) -} - -func newSecretContext() *secretContext { - sCtx := &secretContext{} - - sCtx.header = map[string]string{ - "ID": secretIDHeader, - "Name": nameHeader, - "CreatedAt": secretCreatedHeader, - "UpdatedAt": secretUpdatedHeader, - "Labels": labelsHeader, - } - return sCtx -} - -type secretContext struct { - HeaderContext - s swarm.Secret -} - -func (c *secretContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *secretContext) ID() string { - return c.s.ID -} - -func (c *secretContext) Name() string { - return c.s.Spec.Annotations.Name -} - -func (c *secretContext) CreatedAt() string { - return units.HumanDuration(time.Now().UTC().Sub(c.s.Meta.CreatedAt)) + " ago" -} - -func (c *secretContext) UpdatedAt() string { - return units.HumanDuration(time.Now().UTC().Sub(c.s.Meta.UpdatedAt)) + " ago" -} - -func (c *secretContext) Labels() string { - mapLabels := c.s.Spec.Annotations.Labels - if mapLabels == nil { - return "" - } - var joinLabels []string - for k, v := range mapLabels { - joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(joinLabels, ",") -} - -func (c *secretContext) Label(name string) string { - if c.s.Spec.Annotations.Labels == nil { - return "" - } - return c.s.Spec.Annotations.Labels[name] -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/secret_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/secret_test.go deleted file mode 100644 index 722b65056..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/secret_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package formatter - -import ( - "bytes" - "testing" - "time" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestSecretContextFormatWrite(t *testing.T) { - // Check default output format (verbose and non-verbose mode) for table headers - cases := []struct { - context Context - expected string - }{ - // Errors - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table format - {Context{Format: NewSecretFormat("table", false)}, - `ID NAME CREATED UPDATED -1 passwords Less than a second ago Less than a second ago -2 id_rsa Less than a second ago Less than a second ago -`}, - {Context{Format: NewSecretFormat("table {{.Name}}", true)}, - `NAME -passwords -id_rsa -`}, - {Context{Format: NewSecretFormat("{{.ID}}-{{.Name}}", false)}, - `1-passwords -2-id_rsa -`}, - } - - secrets := []swarm.Secret{ - {ID: "1", - Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()}, - Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "passwords"}}}, - {ID: "2", - Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()}, - Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "id_rsa"}}}, - } - for _, testcase := range cases { - out := bytes.NewBufferString("") - testcase.context.Output = out - if err := SecretWrite(testcase.context, secrets); err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/service.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/service.go deleted file mode 100644 index 8f57af22d..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/service.go +++ /dev/null @@ -1,535 +0,0 @@ -package formatter - -import ( - "fmt" - "strings" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command/inspect" - "github.com/docker/docker/pkg/stringid" - units "github.com/docker/go-units" - "github.com/pkg/errors" -) - -const serviceInspectPrettyTemplate Format = ` -ID: {{.ID}} -Name: {{.Name}} -{{- if .Labels }} -Labels: -{{- range $k, $v := .Labels }} - {{ $k }}{{if $v }}={{ $v }}{{ end }} -{{- end }}{{ end }} -Service Mode: -{{- if .IsModeGlobal }} Global -{{- else if .IsModeReplicated }} Replicated -{{- if .ModeReplicatedReplicas }} - Replicas: {{ .ModeReplicatedReplicas }} -{{- end }}{{ end }} -{{- if .HasUpdateStatus }} -UpdateStatus: - State: {{ .UpdateStatusState }} -{{- if .HasUpdateStatusStarted }} - Started: {{ .UpdateStatusStarted }} -{{- end }} -{{- if .UpdateIsCompleted }} - Completed: {{ .UpdateStatusCompleted }} -{{- end }} - Message: {{ .UpdateStatusMessage }} -{{- end }} -Placement: -{{- if .TaskPlacementConstraints }} - Constraints: {{ .TaskPlacementConstraints }} -{{- end }} -{{- if .TaskPlacementPreferences }} - Preferences: {{ .TaskPlacementPreferences }} -{{- end }} -{{- if .HasUpdateConfig }} -UpdateConfig: - Parallelism: {{ .UpdateParallelism }} -{{- if .HasUpdateDelay}} - Delay: {{ .UpdateDelay }} -{{- end }} - On failure: {{ .UpdateOnFailure }} -{{- if .HasUpdateMonitor}} - Monitoring Period: {{ .UpdateMonitor }} -{{- end }} - Max failure ratio: {{ .UpdateMaxFailureRatio }} - Update order: {{ .UpdateOrder }} -{{- end }} -{{- if .HasRollbackConfig }} -RollbackConfig: - Parallelism: {{ .RollbackParallelism }} -{{- if .HasRollbackDelay}} - Delay: {{ .RollbackDelay }} -{{- end }} - On failure: {{ .RollbackOnFailure }} -{{- if .HasRollbackMonitor}} - Monitoring Period: {{ .RollbackMonitor }} -{{- end }} - Max failure ratio: {{ .RollbackMaxFailureRatio }} - Rollback order: {{ .RollbackOrder }} -{{- end }} -ContainerSpec: - Image: {{ .ContainerImage }} -{{- if .ContainerArgs }} - Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }} -{{- end -}} -{{- if .ContainerEnv }} - Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }} -{{- end -}} -{{- if .ContainerWorkDir }} - Dir: {{ .ContainerWorkDir }} -{{- end -}} -{{- if .ContainerUser }} - User: {{ .ContainerUser }} -{{- end }} -{{- if .ContainerMounts }} -Mounts: -{{- end }} -{{- range $mount := .ContainerMounts }} - Target = {{ $mount.Target }} - Source = {{ $mount.Source }} - ReadOnly = {{ $mount.ReadOnly }} - Type = {{ $mount.Type }} -{{- end -}} -{{- if .HasResources }} -Resources: -{{- if .HasResourceReservations }} - Reservations: -{{- if gt .ResourceReservationNanoCPUs 0.0 }} - CPU: {{ .ResourceReservationNanoCPUs }} -{{- end }} -{{- if .ResourceReservationMemory }} - Memory: {{ .ResourceReservationMemory }} -{{- end }}{{ end }} -{{- if .HasResourceLimits }} - Limits: -{{- if gt .ResourceLimitsNanoCPUs 0.0 }} - CPU: {{ .ResourceLimitsNanoCPUs }} -{{- end }} -{{- if .ResourceLimitMemory }} - Memory: {{ .ResourceLimitMemory }} -{{- end }}{{ end }}{{ end }} -{{- if .Networks }} -Networks: -{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }} -Endpoint Mode: {{ .EndpointMode }} -{{- if .Ports }} -Ports: -{{- range $port := .Ports }} - PublishedPort = {{ $port.PublishedPort }} - Protocol = {{ $port.Protocol }} - TargetPort = {{ $port.TargetPort }} - PublishMode = {{ $port.PublishMode }} -{{- end }} {{ end -}} -` - -// NewServiceFormat returns a Format for rendering using a Context -func NewServiceFormat(source string) Format { - switch source { - case PrettyFormatKey: - return serviceInspectPrettyTemplate - default: - return Format(strings.TrimPrefix(source, RawFormatKey)) - } -} - -func resolveNetworks(service swarm.Service, getNetwork inspect.GetRefFunc) map[string]string { - networkNames := make(map[string]string) - for _, network := range service.Spec.TaskTemplate.Networks { - if resolved, _, err := getNetwork(network.Target); err == nil { - if resolvedNetwork, ok := resolved.(types.NetworkResource); ok { - networkNames[resolvedNetwork.ID] = resolvedNetwork.Name - } - } - } - return networkNames -} - -// ServiceInspectWrite renders the context for a list of services -func ServiceInspectWrite(ctx Context, refs []string, getRef, getNetwork inspect.GetRefFunc) error { - if ctx.Format != serviceInspectPrettyTemplate { - return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) - } - render := func(format func(subContext subContext) error) error { - for _, ref := range refs { - serviceI, _, err := getRef(ref) - if err != nil { - return err - } - service, ok := serviceI.(swarm.Service) - if !ok { - return errors.Errorf("got wrong object to inspect") - } - if err := format(&serviceInspectContext{Service: service, networkNames: resolveNetworks(service, getNetwork)}); err != nil { - return err - } - } - return nil - } - return ctx.Write(&serviceInspectContext{}, render) -} - -type serviceInspectContext struct { - swarm.Service - subContext - - // networkNames is a map from network IDs (as found in - // Networks[x].Target) to network names. - networkNames map[string]string -} - -func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) { - return marshalJSON(ctx) -} - -func (ctx *serviceInspectContext) ID() string { - return ctx.Service.ID -} - -func (ctx *serviceInspectContext) Name() string { - return ctx.Service.Spec.Name -} - -func (ctx *serviceInspectContext) Labels() map[string]string { - return ctx.Service.Spec.Labels -} - -func (ctx *serviceInspectContext) IsModeGlobal() bool { - return ctx.Service.Spec.Mode.Global != nil -} - -func (ctx *serviceInspectContext) IsModeReplicated() bool { - return ctx.Service.Spec.Mode.Replicated != nil -} - -func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 { - return ctx.Service.Spec.Mode.Replicated.Replicas -} - -func (ctx *serviceInspectContext) HasUpdateStatus() bool { - return ctx.Service.UpdateStatus != nil && ctx.Service.UpdateStatus.State != "" -} - -func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState { - return ctx.Service.UpdateStatus.State -} - -func (ctx *serviceInspectContext) HasUpdateStatusStarted() bool { - return ctx.Service.UpdateStatus.StartedAt != nil -} - -func (ctx *serviceInspectContext) UpdateStatusStarted() string { - return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.StartedAt)) -} - -func (ctx *serviceInspectContext) UpdateIsCompleted() bool { - return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted && ctx.Service.UpdateStatus.CompletedAt != nil -} - -func (ctx *serviceInspectContext) UpdateStatusCompleted() string { - return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.CompletedAt)) -} - -func (ctx *serviceInspectContext) UpdateStatusMessage() string { - return ctx.Service.UpdateStatus.Message -} - -func (ctx *serviceInspectContext) TaskPlacementConstraints() []string { - if ctx.Service.Spec.TaskTemplate.Placement != nil { - return ctx.Service.Spec.TaskTemplate.Placement.Constraints - } - return nil -} - -func (ctx *serviceInspectContext) TaskPlacementPreferences() []string { - if ctx.Service.Spec.TaskTemplate.Placement == nil { - return nil - } - var strings []string - for _, pref := range ctx.Service.Spec.TaskTemplate.Placement.Preferences { - if pref.Spread != nil { - strings = append(strings, "spread="+pref.Spread.SpreadDescriptor) - } - } - return strings -} - -func (ctx *serviceInspectContext) HasUpdateConfig() bool { - return ctx.Service.Spec.UpdateConfig != nil -} - -func (ctx *serviceInspectContext) UpdateParallelism() uint64 { - return ctx.Service.Spec.UpdateConfig.Parallelism -} - -func (ctx *serviceInspectContext) HasUpdateDelay() bool { - return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 -} - -func (ctx *serviceInspectContext) UpdateDelay() time.Duration { - return ctx.Service.Spec.UpdateConfig.Delay -} - -func (ctx *serviceInspectContext) UpdateOnFailure() string { - return ctx.Service.Spec.UpdateConfig.FailureAction -} - -func (ctx *serviceInspectContext) UpdateOrder() string { - return ctx.Service.Spec.UpdateConfig.Order -} - -func (ctx *serviceInspectContext) HasUpdateMonitor() bool { - return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0 -} - -func (ctx *serviceInspectContext) UpdateMonitor() time.Duration { - return ctx.Service.Spec.UpdateConfig.Monitor -} - -func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 { - return ctx.Service.Spec.UpdateConfig.MaxFailureRatio -} - -func (ctx *serviceInspectContext) HasRollbackConfig() bool { - return ctx.Service.Spec.RollbackConfig != nil -} - -func (ctx *serviceInspectContext) RollbackParallelism() uint64 { - return ctx.Service.Spec.RollbackConfig.Parallelism -} - -func (ctx *serviceInspectContext) HasRollbackDelay() bool { - return ctx.Service.Spec.RollbackConfig.Delay.Nanoseconds() > 0 -} - -func (ctx *serviceInspectContext) RollbackDelay() time.Duration { - return ctx.Service.Spec.RollbackConfig.Delay -} - -func (ctx *serviceInspectContext) RollbackOnFailure() string { - return ctx.Service.Spec.RollbackConfig.FailureAction -} - -func (ctx *serviceInspectContext) HasRollbackMonitor() bool { - return ctx.Service.Spec.RollbackConfig.Monitor.Nanoseconds() > 0 -} - -func (ctx *serviceInspectContext) RollbackMonitor() time.Duration { - return ctx.Service.Spec.RollbackConfig.Monitor -} - -func (ctx *serviceInspectContext) RollbackMaxFailureRatio() float32 { - return ctx.Service.Spec.RollbackConfig.MaxFailureRatio -} - -func (ctx *serviceInspectContext) RollbackOrder() string { - return ctx.Service.Spec.RollbackConfig.Order -} - -func (ctx *serviceInspectContext) ContainerImage() string { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image -} - -func (ctx *serviceInspectContext) ContainerArgs() []string { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args -} - -func (ctx *serviceInspectContext) ContainerEnv() []string { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env -} - -func (ctx *serviceInspectContext) ContainerWorkDir() string { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir -} - -func (ctx *serviceInspectContext) ContainerUser() string { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.User -} - -func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts -} - -func (ctx *serviceInspectContext) HasResources() bool { - return ctx.Service.Spec.TaskTemplate.Resources != nil -} - -func (ctx *serviceInspectContext) HasResourceReservations() bool { - if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil { - return false - } - return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0 -} - -func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 { - if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 { - return float64(0) - } - return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9 -} - -func (ctx *serviceInspectContext) ResourceReservationMemory() string { - if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 { - return "" - } - return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes)) -} - -func (ctx *serviceInspectContext) HasResourceLimits() bool { - if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { - return false - } - return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 -} - -func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 { - return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9 -} - -func (ctx *serviceInspectContext) ResourceLimitMemory() string { - if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 { - return "" - } - return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes)) -} - -func (ctx *serviceInspectContext) Networks() []string { - var out []string - for _, n := range ctx.Service.Spec.TaskTemplate.Networks { - if name, ok := ctx.networkNames[n.Target]; ok { - out = append(out, name) - } else { - out = append(out, n.Target) - } - } - return out -} - -func (ctx *serviceInspectContext) EndpointMode() string { - if ctx.Service.Spec.EndpointSpec == nil { - return "" - } - - return string(ctx.Service.Spec.EndpointSpec.Mode) -} - -func (ctx *serviceInspectContext) Ports() []swarm.PortConfig { - return ctx.Service.Endpoint.Ports -} - -const ( - defaultServiceTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Mode}}\t{{.Replicas}}\t{{.Image}}\t{{.Ports}}" - - serviceIDHeader = "ID" - modeHeader = "MODE" - replicasHeader = "REPLICAS" -) - -// NewServiceListFormat returns a Format for rendering using a service Context -func NewServiceListFormat(source string, quiet bool) Format { - switch source { - case TableFormatKey: - if quiet { - return defaultQuietFormat - } - return defaultServiceTableFormat - case RawFormatKey: - if quiet { - return `id: {{.ID}}` - } - return `id: {{.ID}}\nname: {{.Name}}\nmode: {{.Mode}}\nreplicas: {{.Replicas}}\nimage: {{.Image}}\nports: {{.Ports}}\n` - } - return Format(source) -} - -// ServiceListInfo stores the information about mode and replicas to be used by template -type ServiceListInfo struct { - Mode string - Replicas string -} - -// ServiceListWrite writes the context -func ServiceListWrite(ctx Context, services []swarm.Service, info map[string]ServiceListInfo) error { - render := func(format func(subContext subContext) error) error { - for _, service := range services { - serviceCtx := &serviceContext{service: service, mode: info[service.ID].Mode, replicas: info[service.ID].Replicas} - if err := format(serviceCtx); err != nil { - return err - } - } - return nil - } - serviceCtx := serviceContext{} - serviceCtx.header = map[string]string{ - "ID": serviceIDHeader, - "Name": nameHeader, - "Mode": modeHeader, - "Replicas": replicasHeader, - "Image": imageHeader, - "Ports": portsHeader, - } - return ctx.Write(&serviceCtx, render) -} - -type serviceContext struct { - HeaderContext - service swarm.Service - mode string - replicas string -} - -func (c *serviceContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *serviceContext) ID() string { - return stringid.TruncateID(c.service.ID) -} - -func (c *serviceContext) Name() string { - return c.service.Spec.Name -} - -func (c *serviceContext) Mode() string { - return c.mode -} - -func (c *serviceContext) Replicas() string { - return c.replicas -} - -func (c *serviceContext) Image() string { - image := c.service.Spec.TaskTemplate.ContainerSpec.Image - if ref, err := reference.ParseNormalizedNamed(image); err == nil { - // update image string for display, (strips any digest) - if nt, ok := ref.(reference.NamedTagged); ok { - if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { - image = reference.FamiliarString(namedTagged) - } - } - } - - return image -} - -func (c *serviceContext) Ports() string { - if c.service.Spec.EndpointSpec == nil || c.service.Spec.EndpointSpec.Ports == nil { - return "" - } - ports := []string{} - for _, pConfig := range c.service.Spec.EndpointSpec.Ports { - if pConfig.PublishMode == swarm.PortConfigPublishModeIngress { - ports = append(ports, fmt.Sprintf("*:%d->%d/%s", - pConfig.PublishedPort, - pConfig.TargetPort, - pConfig.Protocol, - )) - } - } - return strings.Join(ports, ",") -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/service_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/service_test.go deleted file mode 100644 index 93ffc92a3..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/service_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package formatter - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestServiceContextWrite(t *testing.T) { - cases := []struct { - context Context - expected string - }{ - // Errors - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table format - { - Context{Format: NewServiceListFormat("table", false)}, - `ID NAME MODE REPLICAS IMAGE PORTS -id_baz baz global 2/4 *:80->8080/tcp -id_bar bar replicated 2/4 *:80->8080/tcp -`, - }, - { - Context{Format: NewServiceListFormat("table", true)}, - `id_baz -id_bar -`, - }, - { - Context{Format: NewServiceListFormat("table {{.Name}}", false)}, - `NAME -baz -bar -`, - }, - { - Context{Format: NewServiceListFormat("table {{.Name}}", true)}, - `NAME -baz -bar -`, - }, - // Raw Format - { - Context{Format: NewServiceListFormat("raw", false)}, - `id: id_baz -name: baz -mode: global -replicas: 2/4 -image: -ports: *:80->8080/tcp - -id: id_bar -name: bar -mode: replicated -replicas: 2/4 -image: -ports: *:80->8080/tcp - -`, - }, - { - Context{Format: NewServiceListFormat("raw", true)}, - `id: id_baz -id: id_bar -`, - }, - // Custom Format - { - Context{Format: NewServiceListFormat("{{.Name}}", false)}, - `baz -bar -`, - }, - } - - for _, testcase := range cases { - services := []swarm.Service{ - { - ID: "id_baz", - Spec: swarm.ServiceSpec{ - Annotations: swarm.Annotations{Name: "baz"}, - EndpointSpec: &swarm.EndpointSpec{ - Ports: []swarm.PortConfig{ - { - PublishMode: "ingress", - PublishedPort: 80, - TargetPort: 8080, - Protocol: "tcp", - }, - }, - }, - }, - }, - { - ID: "id_bar", - Spec: swarm.ServiceSpec{ - Annotations: swarm.Annotations{Name: "bar"}, - EndpointSpec: &swarm.EndpointSpec{ - Ports: []swarm.PortConfig{ - { - PublishMode: "ingress", - PublishedPort: 80, - TargetPort: 8080, - Protocol: "tcp", - }, - }, - }, - }, - }, - } - info := map[string]ServiceListInfo{ - "id_baz": { - Mode: "global", - Replicas: "2/4", - }, - "id_bar": { - Mode: "replicated", - Replicas: "2/4", - }, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := ServiceListWrite(testcase.context, services, info) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestServiceContextWriteJSON(t *testing.T) { - services := []swarm.Service{ - { - ID: "id_baz", - Spec: swarm.ServiceSpec{ - Annotations: swarm.Annotations{Name: "baz"}, - EndpointSpec: &swarm.EndpointSpec{ - Ports: []swarm.PortConfig{ - { - PublishMode: "ingress", - PublishedPort: 80, - TargetPort: 8080, - Protocol: "tcp", - }, - }, - }, - }, - }, - { - ID: "id_bar", - Spec: swarm.ServiceSpec{ - Annotations: swarm.Annotations{Name: "bar"}, - EndpointSpec: &swarm.EndpointSpec{ - Ports: []swarm.PortConfig{ - { - PublishMode: "ingress", - PublishedPort: 80, - TargetPort: 8080, - Protocol: "tcp", - }, - }, - }, - }, - }, - } - info := map[string]ServiceListInfo{ - "id_baz": { - Mode: "global", - Replicas: "2/4", - }, - "id_bar": { - Mode: "replicated", - Replicas: "2/4", - }, - } - expectedJSONs := []map[string]interface{}{ - {"ID": "id_baz", "Name": "baz", "Mode": "global", "Replicas": "2/4", "Image": "", "Ports": "*:80->8080/tcp"}, - {"ID": "id_bar", "Name": "bar", "Mode": "replicated", "Replicas": "2/4", "Image": "", "Ports": "*:80->8080/tcp"}, - } - - out := bytes.NewBufferString("") - err := ServiceListWrite(Context{Format: "{{json .}}", Output: out}, services, info) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var m map[string]interface{} - if err := json.Unmarshal([]byte(line), &m); err != nil { - t.Fatal(err) - } - assert.DeepEqual(t, m, expectedJSONs[i]) - } -} -func TestServiceContextWriteJSONField(t *testing.T) { - services := []swarm.Service{ - {ID: "id_baz", Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "baz"}}}, - {ID: "id_bar", Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "bar"}}}, - } - info := map[string]ServiceListInfo{ - "id_baz": { - Mode: "global", - Replicas: "2/4", - }, - "id_bar": { - Mode: "replicated", - Replicas: "2/4", - }, - } - out := bytes.NewBufferString("") - err := ServiceListWrite(Context{Format: "{{json .Name}}", Output: out}, services, info) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var s string - if err := json.Unmarshal([]byte(line), &s); err != nil { - t.Fatal(err) - } - assert.Equal(t, s, services[i].Spec.Name) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/stats.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/stats.go deleted file mode 100644 index c0151101a..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/stats.go +++ /dev/null @@ -1,220 +0,0 @@ -package formatter - -import ( - "fmt" - "sync" - - units "github.com/docker/go-units" -) - -const ( - winOSType = "windows" - defaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" - winDefaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" - - containerHeader = "CONTAINER" - cpuPercHeader = "CPU %" - netIOHeader = "NET I/O" - blockIOHeader = "BLOCK I/O" - memPercHeader = "MEM %" // Used only on Linux - winMemUseHeader = "PRIV WORKING SET" // Used only on Windows - memUseHeader = "MEM USAGE / LIMIT" // Used only on Linux - pidsHeader = "PIDS" // Used only on Linux -) - -// StatsEntry represents represents the statistics data collected from a container -type StatsEntry struct { - Container string - Name string - ID string - CPUPercentage float64 - Memory float64 // On Windows this is the private working set - MemoryLimit float64 // Not used on Windows - MemoryPercentage float64 // Not used on Windows - NetworkRx float64 - NetworkTx float64 - BlockRead float64 - BlockWrite float64 - PidsCurrent uint64 // Not used on Windows - IsInvalid bool -} - -// ContainerStats represents an entity to store containers statistics synchronously -type ContainerStats struct { - mutex sync.Mutex - StatsEntry - err error -} - -// GetError returns the container statistics error. -// This is used to determine whether the statistics are valid or not -func (cs *ContainerStats) GetError() error { - cs.mutex.Lock() - defer cs.mutex.Unlock() - return cs.err -} - -// SetErrorAndReset zeroes all the container statistics and store the error. -// It is used when receiving time out error during statistics collecting to reduce lock overhead -func (cs *ContainerStats) SetErrorAndReset(err error) { - cs.mutex.Lock() - defer cs.mutex.Unlock() - cs.CPUPercentage = 0 - cs.Memory = 0 - cs.MemoryPercentage = 0 - cs.MemoryLimit = 0 - cs.NetworkRx = 0 - cs.NetworkTx = 0 - cs.BlockRead = 0 - cs.BlockWrite = 0 - cs.PidsCurrent = 0 - cs.err = err - cs.IsInvalid = true -} - -// SetError sets container statistics error -func (cs *ContainerStats) SetError(err error) { - cs.mutex.Lock() - defer cs.mutex.Unlock() - cs.err = err - if err != nil { - cs.IsInvalid = true - } -} - -// SetStatistics set the container statistics -func (cs *ContainerStats) SetStatistics(s StatsEntry) { - cs.mutex.Lock() - defer cs.mutex.Unlock() - s.Container = cs.Container - cs.StatsEntry = s -} - -// GetStatistics returns container statistics with other meta data such as the container name -func (cs *ContainerStats) GetStatistics() StatsEntry { - cs.mutex.Lock() - defer cs.mutex.Unlock() - return cs.StatsEntry -} - -// NewStatsFormat returns a format for rendering an CStatsContext -func NewStatsFormat(source, osType string) Format { - if source == TableFormatKey { - if osType == winOSType { - return Format(winDefaultStatsTableFormat) - } - return Format(defaultStatsTableFormat) - } - return Format(source) -} - -// NewContainerStats returns a new ContainerStats entity and sets in it the given name -func NewContainerStats(container, osType string) *ContainerStats { - return &ContainerStats{ - StatsEntry: StatsEntry{Container: container}, - } -} - -// ContainerStatsWrite renders the context for a list of containers statistics -func ContainerStatsWrite(ctx Context, containerStats []StatsEntry, osType string) error { - render := func(format func(subContext subContext) error) error { - for _, cstats := range containerStats { - containerStatsCtx := &containerStatsContext{ - s: cstats, - os: osType, - } - if err := format(containerStatsCtx); err != nil { - return err - } - } - return nil - } - memUsage := memUseHeader - if osType == winOSType { - memUsage = winMemUseHeader - } - containerStatsCtx := containerStatsContext{} - containerStatsCtx.header = map[string]string{ - "Container": containerHeader, - "Name": nameHeader, - "ID": containerIDHeader, - "CPUPerc": cpuPercHeader, - "MemUsage": memUsage, - "MemPerc": memPercHeader, - "NetIO": netIOHeader, - "BlockIO": blockIOHeader, - "PIDs": pidsHeader, - } - containerStatsCtx.os = osType - return ctx.Write(&containerStatsCtx, render) -} - -type containerStatsContext struct { - HeaderContext - s StatsEntry - os string -} - -func (c *containerStatsContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *containerStatsContext) Container() string { - return c.s.Container -} - -func (c *containerStatsContext) Name() string { - if len(c.s.Name) > 1 { - return c.s.Name[1:] - } - return "--" -} - -func (c *containerStatsContext) ID() string { - return c.s.ID -} - -func (c *containerStatsContext) CPUPerc() string { - if c.s.IsInvalid { - return fmt.Sprintf("--") - } - return fmt.Sprintf("%.2f%%", c.s.CPUPercentage) -} - -func (c *containerStatsContext) MemUsage() string { - if c.s.IsInvalid { - return fmt.Sprintf("-- / --") - } - if c.os == winOSType { - return fmt.Sprintf("%s", units.BytesSize(c.s.Memory)) - } - return fmt.Sprintf("%s / %s", units.BytesSize(c.s.Memory), units.BytesSize(c.s.MemoryLimit)) -} - -func (c *containerStatsContext) MemPerc() string { - if c.s.IsInvalid || c.os == winOSType { - return fmt.Sprintf("--") - } - return fmt.Sprintf("%.2f%%", c.s.MemoryPercentage) -} - -func (c *containerStatsContext) NetIO() string { - if c.s.IsInvalid { - return fmt.Sprintf("--") - } - return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.NetworkRx, 3), units.HumanSizeWithPrecision(c.s.NetworkTx, 3)) -} - -func (c *containerStatsContext) BlockIO() string { - if c.s.IsInvalid { - return fmt.Sprintf("--") - } - return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.BlockRead, 3), units.HumanSizeWithPrecision(c.s.BlockWrite, 3)) -} - -func (c *containerStatsContext) PIDs() string { - if c.s.IsInvalid || c.os == winOSType { - return fmt.Sprintf("--") - } - return fmt.Sprintf("%d", c.s.PidsCurrent) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go deleted file mode 100644 index 5d6a91e7c..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package formatter - -import ( - "bytes" - "testing" - - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestContainerStatsContext(t *testing.T) { - containerID := stringid.GenerateRandomID() - - var ctx containerStatsContext - tt := []struct { - stats StatsEntry - osType string - expValue string - expHeader string - call func() string - }{ - {StatsEntry{Container: containerID}, "", containerID, containerHeader, ctx.Container}, - {StatsEntry{CPUPercentage: 5.5}, "", "5.50%", cpuPercHeader, ctx.CPUPerc}, - {StatsEntry{CPUPercentage: 5.5, IsInvalid: true}, "", "--", cpuPercHeader, ctx.CPUPerc}, - {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3}, "", "0.31B / 12.3B", netIOHeader, ctx.NetIO}, - {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3, IsInvalid: true}, "", "--", netIOHeader, ctx.NetIO}, - {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3}, "", "0.1B / 2.3B", blockIOHeader, ctx.BlockIO}, - {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3, IsInvalid: true}, "", "--", blockIOHeader, ctx.BlockIO}, - {StatsEntry{MemoryPercentage: 10.2}, "", "10.20%", memPercHeader, ctx.MemPerc}, - {StatsEntry{MemoryPercentage: 10.2, IsInvalid: true}, "", "--", memPercHeader, ctx.MemPerc}, - {StatsEntry{MemoryPercentage: 10.2}, "windows", "--", memPercHeader, ctx.MemPerc}, - {StatsEntry{Memory: 24, MemoryLimit: 30}, "", "24B / 30B", memUseHeader, ctx.MemUsage}, - {StatsEntry{Memory: 24, MemoryLimit: 30, IsInvalid: true}, "", "-- / --", memUseHeader, ctx.MemUsage}, - {StatsEntry{Memory: 24, MemoryLimit: 30}, "windows", "24B", winMemUseHeader, ctx.MemUsage}, - {StatsEntry{PidsCurrent: 10}, "", "10", pidsHeader, ctx.PIDs}, - {StatsEntry{PidsCurrent: 10, IsInvalid: true}, "", "--", pidsHeader, ctx.PIDs}, - {StatsEntry{PidsCurrent: 10}, "windows", "--", pidsHeader, ctx.PIDs}, - } - - for _, te := range tt { - ctx = containerStatsContext{s: te.stats, os: te.osType} - if v := te.call(); v != te.expValue { - t.Fatalf("Expected %q, got %q", te.expValue, v) - } - } -} - -func TestContainerStatsContextWrite(t *testing.T) { - tt := []struct { - context Context - expected string - }{ - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - { - Context{Format: "table {{.MemUsage}}"}, - `MEM USAGE / LIMIT -20B / 20B --- / -- -`, - }, - { - Context{Format: "{{.Container}} {{.ID}} {{.Name}}"}, - `container1 abcdef foo -container2 -- -`, - }, - { - Context{Format: "{{.Container}} {{.CPUPerc}}"}, - `container1 20.00% -container2 -- -`, - }, - } - - for _, te := range tt { - stats := []StatsEntry{ - { - Container: "container1", - ID: "abcdef", - Name: "/foo", - CPUPercentage: 20, - Memory: 20, - MemoryLimit: 20, - MemoryPercentage: 20, - NetworkRx: 20, - NetworkTx: 20, - BlockRead: 20, - BlockWrite: 20, - PidsCurrent: 2, - IsInvalid: false, - }, - { - Container: "container2", - CPUPercentage: 30, - Memory: 30, - MemoryLimit: 30, - MemoryPercentage: 30, - NetworkRx: 30, - NetworkTx: 30, - BlockRead: 30, - BlockWrite: 30, - PidsCurrent: 3, - IsInvalid: true, - }, - } - var out bytes.Buffer - te.context.Output = &out - err := ContainerStatsWrite(te.context, stats, "linux") - if err != nil { - assert.Error(t, err, te.expected) - } else { - assert.Equal(t, out.String(), te.expected) - } - } -} - -func TestContainerStatsContextWriteWindows(t *testing.T) { - tt := []struct { - context Context - expected string - }{ - { - Context{Format: "table {{.MemUsage}}"}, - `PRIV WORKING SET -20B --- / -- -`, - }, - { - Context{Format: "{{.Container}} {{.CPUPerc}}"}, - `container1 20.00% -container2 -- -`, - }, - { - Context{Format: "{{.Container}} {{.MemPerc}} {{.PIDs}}"}, - `container1 -- -- -container2 -- -- -`, - }, - } - - for _, te := range tt { - stats := []StatsEntry{ - { - Container: "container1", - CPUPercentage: 20, - Memory: 20, - MemoryLimit: 20, - MemoryPercentage: 20, - NetworkRx: 20, - NetworkTx: 20, - BlockRead: 20, - BlockWrite: 20, - PidsCurrent: 2, - IsInvalid: false, - }, - { - Container: "container2", - CPUPercentage: 30, - Memory: 30, - MemoryLimit: 30, - MemoryPercentage: 30, - NetworkRx: 30, - NetworkTx: 30, - BlockRead: 30, - BlockWrite: 30, - PidsCurrent: 3, - IsInvalid: true, - }, - } - var out bytes.Buffer - te.context.Output = &out - err := ContainerStatsWrite(te.context, stats, "windows") - if err != nil { - assert.Error(t, err, te.expected) - } else { - assert.Equal(t, out.String(), te.expected) - } - } -} - -func TestContainerStatsContextWriteWithNoStats(t *testing.T) { - var out bytes.Buffer - - contexts := []struct { - context Context - expected string - }{ - { - Context{ - Format: "{{.Container}}", - Output: &out, - }, - "", - }, - { - Context{ - Format: "table {{.Container}}", - Output: &out, - }, - "CONTAINER\n", - }, - { - Context{ - Format: "table {{.Container}}\t{{.CPUPerc}}", - Output: &out, - }, - "CONTAINER CPU %\n", - }, - } - - for _, context := range contexts { - ContainerStatsWrite(context.context, []StatsEntry{}, "linux") - assert.Equal(t, context.expected, out.String()) - // Clean buffer - out.Reset() - } -} - -func TestContainerStatsContextWriteWithNoStatsWindows(t *testing.T) { - var out bytes.Buffer - - contexts := []struct { - context Context - expected string - }{ - { - Context{ - Format: "{{.Container}}", - Output: &out, - }, - "", - }, - { - Context{ - Format: "table {{.Container}}\t{{.MemUsage}}", - Output: &out, - }, - "CONTAINER PRIV WORKING SET\n", - }, - { - Context{ - Format: "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", - Output: &out, - }, - "CONTAINER CPU % PRIV WORKING SET\n", - }, - } - - for _, context := range contexts { - ContainerStatsWrite(context.context, []StatsEntry{}, "windows") - assert.Equal(t, out.String(), context.expected) - // Clean buffer - out.Reset() - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/task.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/task.go deleted file mode 100644 index 2c6e7bb12..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/task.go +++ /dev/null @@ -1,150 +0,0 @@ -package formatter - -import ( - "fmt" - "strings" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/go-units" -) - -const ( - defaultTaskTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Image}}\t{{.Node}}\t{{.DesiredState}}\t{{.CurrentState}}\t{{.Error}}\t{{.Ports}}" - - nodeHeader = "NODE" - taskIDHeader = "ID" - desiredStateHeader = "DESIRED STATE" - currentStateHeader = "CURRENT STATE" - errorHeader = "ERROR" - - maxErrLength = 30 -) - -// NewTaskFormat returns a Format for rendering using a task Context -func NewTaskFormat(source string, quiet bool) Format { - switch source { - case TableFormatKey: - if quiet { - return defaultQuietFormat - } - return defaultTaskTableFormat - case RawFormatKey: - if quiet { - return `id: {{.ID}}` - } - return `id: {{.ID}}\nname: {{.Name}}\nimage: {{.Image}}\nnode: {{.Node}}\ndesired_state: {{.DesiredState}}\ncurrent_state: {{.CurrentState}}\nerror: {{.Error}}\nports: {{.Ports}}\n` - } - return Format(source) -} - -// TaskWrite writes the context -func TaskWrite(ctx Context, tasks []swarm.Task, names map[string]string, nodes map[string]string) error { - render := func(format func(subContext subContext) error) error { - for _, task := range tasks { - taskCtx := &taskContext{trunc: ctx.Trunc, task: task, name: names[task.ID], node: nodes[task.ID]} - if err := format(taskCtx); err != nil { - return err - } - } - return nil - } - taskCtx := taskContext{} - taskCtx.header = taskHeaderContext{ - "ID": taskIDHeader, - "Name": nameHeader, - "Image": imageHeader, - "Node": nodeHeader, - "DesiredState": desiredStateHeader, - "CurrentState": currentStateHeader, - "Error": errorHeader, - "Ports": portsHeader, - } - return ctx.Write(&taskCtx, render) -} - -type taskHeaderContext map[string]string - -type taskContext struct { - HeaderContext - trunc bool - task swarm.Task - name string - node string -} - -func (c *taskContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *taskContext) ID() string { - if c.trunc { - return stringid.TruncateID(c.task.ID) - } - return c.task.ID -} - -func (c *taskContext) Name() string { - return c.name -} - -func (c *taskContext) Image() string { - image := c.task.Spec.ContainerSpec.Image - if c.trunc { - ref, err := reference.ParseNormalizedNamed(image) - if err == nil { - // update image string for display, (strips any digest) - if nt, ok := ref.(reference.NamedTagged); ok { - if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { - image = reference.FamiliarString(namedTagged) - } - } - } - } - return image -} - -func (c *taskContext) Node() string { - return c.node -} - -func (c *taskContext) DesiredState() string { - return command.PrettyPrint(c.task.DesiredState) -} - -func (c *taskContext) CurrentState() string { - return fmt.Sprintf("%s %s ago", - command.PrettyPrint(c.task.Status.State), - strings.ToLower(units.HumanDuration(time.Since(c.task.Status.Timestamp))), - ) -} - -func (c *taskContext) Error() string { - // Trim and quote the error message. - taskErr := c.task.Status.Err - if c.trunc && len(taskErr) > maxErrLength { - taskErr = fmt.Sprintf("%s…", taskErr[:maxErrLength-1]) - } - if len(taskErr) > 0 { - taskErr = fmt.Sprintf("\"%s\"", taskErr) - } - return taskErr -} - -func (c *taskContext) Ports() string { - if len(c.task.Status.PortStatus.Ports) == 0 { - return "" - } - ports := []string{} - for _, pConfig := range c.task.Status.PortStatus.Ports { - ports = append(ports, fmt.Sprintf("*:%d->%d/%s", - pConfig.PublishedPort, - pConfig.TargetPort, - pConfig.Protocol, - )) - } - return strings.Join(ports, ",") -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/task_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/task_test.go deleted file mode 100644 index 8de9d66f5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/task_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package formatter - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestTaskContextWrite(t *testing.T) { - cases := []struct { - context Context - expected string - }{ - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - { - Context{Format: NewTaskFormat("table", true)}, - `taskID1 -taskID2 -`, - }, - { - Context{Format: NewTaskFormat("table {{.Name}}\t{{.Node}}\t{{.Ports}}", false)}, - `NAME NODE PORTS -foobar_baz foo1 -foobar_bar foo2 -`, - }, - { - Context{Format: NewTaskFormat("table {{.Name}}", true)}, - `NAME -foobar_baz -foobar_bar -`, - }, - { - Context{Format: NewTaskFormat("raw", true)}, - `id: taskID1 -id: taskID2 -`, - }, - { - Context{Format: NewTaskFormat("{{.Name}} {{.Node}}", false)}, - `foobar_baz foo1 -foobar_bar foo2 -`, - }, - } - - for _, testcase := range cases { - tasks := []swarm.Task{ - {ID: "taskID1"}, - {ID: "taskID2"}, - } - names := map[string]string{ - "taskID1": "foobar_baz", - "taskID2": "foobar_bar", - } - nodes := map[string]string{ - "taskID1": "foo1", - "taskID2": "foo2", - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := TaskWrite(testcase.context, tasks, names, nodes) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestTaskContextWriteJSONField(t *testing.T) { - tasks := []swarm.Task{ - {ID: "taskID1"}, - {ID: "taskID2"}, - } - names := map[string]string{ - "taskID1": "foobar_baz", - "taskID2": "foobar_bar", - } - out := bytes.NewBufferString("") - err := TaskWrite(Context{Format: "{{json .ID}}", Output: out}, tasks, names, map[string]string{}) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - var s string - if err := json.Unmarshal([]byte(line), &s); err != nil { - t.Fatal(err) - } - assert.Equal(t, s, tasks[i].ID) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/volume.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/volume.go deleted file mode 100644 index 342f2fb93..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/volume.go +++ /dev/null @@ -1,131 +0,0 @@ -package formatter - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - units "github.com/docker/go-units" -) - -const ( - defaultVolumeQuietFormat = "{{.Name}}" - defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}" - - volumeNameHeader = "VOLUME NAME" - mountpointHeader = "MOUNTPOINT" - linksHeader = "LINKS" - // Status header ? -) - -// NewVolumeFormat returns a format for use with a volume Context -func NewVolumeFormat(source string, quiet bool) Format { - switch source { - case TableFormatKey: - if quiet { - return defaultVolumeQuietFormat - } - return defaultVolumeTableFormat - case RawFormatKey: - if quiet { - return `name: {{.Name}}` - } - return `name: {{.Name}}\ndriver: {{.Driver}}\n` - } - return Format(source) -} - -// VolumeWrite writes formatted volumes using the Context -func VolumeWrite(ctx Context, volumes []*types.Volume) error { - render := func(format func(subContext subContext) error) error { - for _, volume := range volumes { - if err := format(&volumeContext{v: *volume}); err != nil { - return err - } - } - return nil - } - return ctx.Write(newVolumeContext(), render) -} - -type volumeHeaderContext map[string]string - -func (c volumeHeaderContext) Label(name string) string { - n := strings.Split(name, ".") - r := strings.NewReplacer("-", " ", "_", " ") - h := r.Replace(n[len(n)-1]) - - return h -} - -type volumeContext struct { - HeaderContext - v types.Volume -} - -func newVolumeContext() *volumeContext { - volumeCtx := volumeContext{} - volumeCtx.header = volumeHeaderContext{ - "Name": volumeNameHeader, - "Driver": driverHeader, - "Scope": scopeHeader, - "Mountpoint": mountpointHeader, - "Labels": labelsHeader, - "Links": linksHeader, - "Size": sizeHeader, - } - return &volumeCtx -} - -func (c *volumeContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *volumeContext) Name() string { - return c.v.Name -} - -func (c *volumeContext) Driver() string { - return c.v.Driver -} - -func (c *volumeContext) Scope() string { - return c.v.Scope -} - -func (c *volumeContext) Mountpoint() string { - return c.v.Mountpoint -} - -func (c *volumeContext) Labels() string { - if c.v.Labels == nil { - return "" - } - - var joinLabels []string - for k, v := range c.v.Labels { - joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(joinLabels, ",") -} - -func (c *volumeContext) Label(name string) string { - if c.v.Labels == nil { - return "" - } - return c.v.Labels[name] -} - -func (c *volumeContext) Links() string { - if c.v.UsageData == nil { - return "N/A" - } - return fmt.Sprintf("%d", c.v.UsageData.RefCount) -} - -func (c *volumeContext) Size() string { - if c.v.UsageData == nil { - return "N/A" - } - return units.HumanSize(float64(c.v.UsageData.Size)) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go b/fn/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go deleted file mode 100644 index 9c23ae447..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package formatter - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestVolumeContext(t *testing.T) { - volumeName := stringid.GenerateRandomID() - - var ctx volumeContext - cases := []struct { - volumeCtx volumeContext - expValue string - call func() string - }{ - {volumeContext{ - v: types.Volume{Name: volumeName}, - }, volumeName, ctx.Name}, - {volumeContext{ - v: types.Volume{Driver: "driver_name"}, - }, "driver_name", ctx.Driver}, - {volumeContext{ - v: types.Volume{Scope: "local"}, - }, "local", ctx.Scope}, - {volumeContext{ - v: types.Volume{Mountpoint: "mountpoint"}, - }, "mountpoint", ctx.Mountpoint}, - {volumeContext{ - v: types.Volume{}, - }, "", ctx.Labels}, - {volumeContext{ - v: types.Volume{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, - }, "label1=value1,label2=value2", ctx.Labels}, - } - - for _, c := range cases { - ctx = c.volumeCtx - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - } -} - -func TestVolumeContextWrite(t *testing.T) { - cases := []struct { - context Context - expected string - }{ - - // Errors - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table format - { - Context{Format: NewVolumeFormat("table", false)}, - `DRIVER VOLUME NAME -foo foobar_baz -bar foobar_bar -`, - }, - { - Context{Format: NewVolumeFormat("table", true)}, - `foobar_baz -foobar_bar -`, - }, - { - Context{Format: NewVolumeFormat("table {{.Name}}", false)}, - `VOLUME NAME -foobar_baz -foobar_bar -`, - }, - { - Context{Format: NewVolumeFormat("table {{.Name}}", true)}, - `VOLUME NAME -foobar_baz -foobar_bar -`, - }, - // Raw Format - { - Context{Format: NewVolumeFormat("raw", false)}, - `name: foobar_baz -driver: foo - -name: foobar_bar -driver: bar - -`, - }, - { - Context{Format: NewVolumeFormat("raw", true)}, - `name: foobar_baz -name: foobar_bar -`, - }, - // Custom Format - { - Context{Format: NewVolumeFormat("{{.Name}}", false)}, - `foobar_baz -foobar_bar -`, - }, - } - - for _, testcase := range cases { - volumes := []*types.Volume{ - {Name: "foobar_baz", Driver: "foo"}, - {Name: "foobar_bar", Driver: "bar"}, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := VolumeWrite(testcase.context, volumes) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestVolumeContextWriteJSON(t *testing.T) { - volumes := []*types.Volume{ - {Driver: "foo", Name: "foobar_baz"}, - {Driver: "bar", Name: "foobar_bar"}, - } - expectedJSONs := []map[string]interface{}{ - {"Driver": "foo", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A"}, - {"Driver": "bar", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A"}, - } - out := bytes.NewBufferString("") - err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var m map[string]interface{} - if err := json.Unmarshal([]byte(line), &m); err != nil { - t.Fatal(err) - } - assert.DeepEqual(t, m, expectedJSONs[i]) - } -} - -func TestVolumeContextWriteJSONField(t *testing.T) { - volumes := []*types.Volume{ - {Driver: "foo", Name: "foobar_baz"}, - {Driver: "bar", Name: "foobar_bar"}, - } - out := bytes.NewBufferString("") - err := VolumeWrite(Context{Format: "{{json .Name}}", Output: out}, volumes) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var s string - if err := json.Unmarshal([]byte(line), &s); err != nil { - t.Fatal(err) - } - assert.Equal(t, s, volumes[i].Name) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/idresolver/client_test.go b/fn/vendor/github.com/docker/docker/cli/command/idresolver/client_test.go deleted file mode 100644 index f84683b90..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/idresolver/client_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package idresolver - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "golang.org/x/net/context" -) - -type fakeClient struct { - client.Client - nodeInspectFunc func(string) (swarm.Node, []byte, error) - serviceInspectFunc func(string) (swarm.Service, []byte, error) -} - -func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - if cli.nodeInspectFunc != nil { - return cli.nodeInspectFunc(nodeID) - } - return swarm.Node{}, []byte{}, nil -} - -func (cli *fakeClient) ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { - if cli.serviceInspectFunc != nil { - return cli.serviceInspectFunc(serviceID) - } - return swarm.Service{}, []byte{}, nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go b/fn/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go deleted file mode 100644 index 6088b64b5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go +++ /dev/null @@ -1,70 +0,0 @@ -package idresolver - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "github.com/pkg/errors" -) - -// IDResolver provides ID to Name resolution. -type IDResolver struct { - client client.APIClient - noResolve bool - cache map[string]string -} - -// New creates a new IDResolver. -func New(client client.APIClient, noResolve bool) *IDResolver { - return &IDResolver{ - client: client, - noResolve: noResolve, - cache: make(map[string]string), - } -} - -func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) { - switch t.(type) { - case swarm.Node: - node, _, err := r.client.NodeInspectWithRaw(ctx, id) - if err != nil { - return id, nil - } - if node.Spec.Annotations.Name != "" { - return node.Spec.Annotations.Name, nil - } - if node.Description.Hostname != "" { - return node.Description.Hostname, nil - } - return id, nil - case swarm.Service: - service, _, err := r.client.ServiceInspectWithRaw(ctx, id, types.ServiceInspectOptions{}) - if err != nil { - return id, nil - } - return service.Spec.Annotations.Name, nil - default: - return "", errors.Errorf("unsupported type") - } - -} - -// Resolve will attempt to resolve an ID to a Name by querying the manager. -// Results are stored into a cache. -// If the `-n` flag is used in the command-line, resolution is disabled. -func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) { - if r.noResolve { - return id, nil - } - if name, ok := r.cache[id]; ok { - return name, nil - } - name, err := r.get(ctx, t, id) - if err != nil { - return "", err - } - r.cache[id] = name - return name, nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/idresolver/idresolver_test.go b/fn/vendor/github.com/docker/docker/cli/command/idresolver/idresolver_test.go deleted file mode 100644 index 720667daa..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/idresolver/idresolver_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package idresolver - -import ( - "testing" - - "github.com/docker/docker/api/types/swarm" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func TestResolveError(t *testing.T) { - cli := &fakeClient{ - nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) { - return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node") - }, - } - - idResolver := New(cli, false) - _, err := idResolver.Resolve(context.Background(), struct{}{}, "nodeID") - - assert.Error(t, err, "unsupported type") -} - -func TestResolveWithNoResolveOption(t *testing.T) { - resolved := false - cli := &fakeClient{ - nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) { - resolved = true - return swarm.Node{}, []byte{}, nil - }, - serviceInspectFunc: func(serviceID string) (swarm.Service, []byte, error) { - resolved = true - return swarm.Service{}, []byte{}, nil - }, - } - - idResolver := New(cli, true) - id, err := idResolver.Resolve(context.Background(), swarm.Node{}, "nodeID") - - assert.NilError(t, err) - assert.Equal(t, id, "nodeID") - assert.Equal(t, resolved, false) -} - -func TestResolveWithCache(t *testing.T) { - inspectCounter := 0 - cli := &fakeClient{ - nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) { - inspectCounter++ - return *Node(NodeName("node-foo")), []byte{}, nil - }, - } - - idResolver := New(cli, false) - - ctx := context.Background() - for i := 0; i < 2; i++ { - id, err := idResolver.Resolve(ctx, swarm.Node{}, "nodeID") - assert.NilError(t, err) - assert.Equal(t, id, "node-foo") - } - - assert.Equal(t, inspectCounter, 1) -} - -func TestResolveNode(t *testing.T) { - testCases := []struct { - nodeID string - nodeInspectFunc func(string) (swarm.Node, []byte, error) - expectedID string - }{ - { - nodeID: "nodeID", - nodeInspectFunc: func(string) (swarm.Node, []byte, error) { - return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node") - }, - expectedID: "nodeID", - }, - { - nodeID: "nodeID", - nodeInspectFunc: func(string) (swarm.Node, []byte, error) { - return *Node(NodeName("node-foo")), []byte{}, nil - }, - expectedID: "node-foo", - }, - { - nodeID: "nodeID", - nodeInspectFunc: func(string) (swarm.Node, []byte, error) { - return *Node(NodeName(""), Hostname("node-hostname")), []byte{}, nil - }, - expectedID: "node-hostname", - }, - } - - ctx := context.Background() - for _, tc := range testCases { - cli := &fakeClient{ - nodeInspectFunc: tc.nodeInspectFunc, - } - idResolver := New(cli, false) - id, err := idResolver.Resolve(ctx, swarm.Node{}, tc.nodeID) - - assert.NilError(t, err) - assert.Equal(t, id, tc.expectedID) - } -} - -func TestResolveService(t *testing.T) { - testCases := []struct { - serviceID string - serviceInspectFunc func(string) (swarm.Service, []byte, error) - expectedID string - }{ - { - serviceID: "serviceID", - serviceInspectFunc: func(string) (swarm.Service, []byte, error) { - return swarm.Service{}, []byte{}, errors.Errorf("error inspecting service") - }, - expectedID: "serviceID", - }, - { - serviceID: "serviceID", - serviceInspectFunc: func(string) (swarm.Service, []byte, error) { - return *Service(ServiceName("service-foo")), []byte{}, nil - }, - expectedID: "service-foo", - }, - } - - ctx := context.Background() - for _, tc := range testCases { - cli := &fakeClient{ - serviceInspectFunc: tc.serviceInspectFunc, - } - idResolver := New(cli, false) - id, err := idResolver.Resolve(ctx, swarm.Service{}, tc.serviceID) - - assert.NilError(t, err) - assert.Equal(t, id, tc.expectedID) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/build.go b/fn/vendor/github.com/docker/docker/cli/command/image/build.go deleted file mode 100644 index 27fe83c52..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/build.go +++ /dev/null @@ -1,530 +0,0 @@ -package image - -import ( - "archive/tar" - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "runtime" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/image/build" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/urlutil" - runconfigopts "github.com/docker/docker/runconfig/opts" - units "github.com/docker/go-units" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type buildOptions struct { - context string - dockerfileName string - tags opts.ListOpts - labels opts.ListOpts - buildArgs opts.ListOpts - extraHosts opts.ListOpts - ulimits *opts.UlimitOpt - memory opts.MemBytes - memorySwap opts.MemSwapBytes - shmSize opts.MemBytes - cpuShares int64 - cpuPeriod int64 - cpuQuota int64 - cpuSetCpus string - cpuSetMems string - cgroupParent string - isolation string - quiet bool - noCache bool - rm bool - forceRm bool - pull bool - cacheFrom []string - compress bool - securityOpt []string - networkMode string - squash bool - target string -} - -// NewBuildCommand creates a new `docker build` command -func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command { - ulimits := make(map[string]*units.Ulimit) - options := buildOptions{ - tags: opts.NewListOpts(validateTag), - buildArgs: opts.NewListOpts(opts.ValidateEnv), - ulimits: opts.NewUlimitOpt(&ulimits), - labels: opts.NewListOpts(opts.ValidateEnv), - extraHosts: opts.NewListOpts(opts.ValidateExtraHost), - } - - cmd := &cobra.Command{ - Use: "build [OPTIONS] PATH | URL | -", - Short: "Build an image from a Dockerfile", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - options.context = args[0] - return runBuild(dockerCli, options) - }, - } - - flags := cmd.Flags() - - flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") - flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") - flags.Var(options.ulimits, "ulimit", "Ulimit options") - flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") - flags.VarP(&options.memory, "memory", "m", "Memory limit") - flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm") - flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") - flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") - flags.Var(&options.labels, "label", "Set metadata for an image") - flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") - flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") - flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") - flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") - flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") - flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") - flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") - flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") - flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") - flags.SetAnnotation("network", "version", []string{"1.25"}) - flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") - flags.StringVar(&options.target, "target", "", "Set the target build stage to build.") - - command.AddTrustVerificationFlags(flags) - - flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") - flags.SetAnnotation("squash", "experimental", nil) - flags.SetAnnotation("squash", "version", []string{"1.25"}) - - return cmd -} - -// lastProgressOutput is the same as progress.Output except -// that it only output with the last update. It is used in -// non terminal scenarios to suppress verbose messages -type lastProgressOutput struct { - output progress.Output -} - -// WriteProgress formats progress information from a ProgressReader. -func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { - if !prog.LastUpdate { - return nil - } - - return out.output.WriteProgress(prog) -} - -func runBuild(dockerCli *command.DockerCli, options buildOptions) error { - var ( - buildCtx io.ReadCloser - dockerfileCtx io.ReadCloser - err error - contextDir string - tempDir string - relDockerfile string - progBuff io.Writer - buildBuff io.Writer - ) - - specifiedContext := options.context - progBuff = dockerCli.Out() - buildBuff = dockerCli.Out() - if options.quiet { - progBuff = bytes.NewBuffer(nil) - buildBuff = bytes.NewBuffer(nil) - } - - if options.dockerfileName == "-" { - if specifiedContext == "-" { - return errors.New("invalid argument: can't use stdin for both build context and dockerfile") - } - dockerfileCtx = dockerCli.In() - } - - switch { - case specifiedContext == "-": - buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName) - case isLocalDir(specifiedContext): - contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName) - case urlutil.IsGitURL(specifiedContext): - tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, options.dockerfileName) - case urlutil.IsURL(specifiedContext): - buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) - default: - return errors.Errorf("unable to prepare context: path %q not found", specifiedContext) - } - - if err != nil { - if options.quiet && urlutil.IsURL(specifiedContext) { - fmt.Fprintln(dockerCli.Err(), progBuff) - } - return errors.Errorf("unable to prepare context: %s", err) - } - - if tempDir != "" { - defer os.RemoveAll(tempDir) - contextDir = tempDir - } - - if buildCtx == nil { - // And canonicalize dockerfile name to a platform-independent one - relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) - if err != nil { - return errors.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) - } - - f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) - if err != nil && !os.IsNotExist(err) { - return err - } - defer f.Close() - - var excludes []string - if err == nil { - excludes, err = dockerignore.ReadAll(f) - if err != nil { - return err - } - } - - if err := build.ValidateContextDirectory(contextDir, excludes); err != nil { - return errors.Errorf("Error checking context: '%s'.", err) - } - - // If .dockerignore mentions .dockerignore or the Dockerfile then make - // sure we send both files over to the daemon because Dockerfile is, - // obviously, needed no matter what, and .dockerignore is needed to know - // if either one needs to be removed. The daemon will remove them - // if necessary, after it parses the Dockerfile. Ignore errors here, as - // they will have been caught by validateContextDirectory above. - // Excludes are used instead of includes to maintain the order of files - // in the archive. - if keep, _ := fileutils.Matches(".dockerignore", excludes); keep { - excludes = append(excludes, "!.dockerignore") - } - if keep, _ := fileutils.Matches(relDockerfile, excludes); keep && dockerfileCtx == nil { - excludes = append(excludes, "!"+relDockerfile) - } - - compression := archive.Uncompressed - if options.compress { - compression = archive.Gzip - } - buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ - Compression: compression, - ExcludePatterns: excludes, - }) - if err != nil { - return err - } - } - - // replace Dockerfile if added dynamically - if dockerfileCtx != nil { - buildCtx, relDockerfile, err = addDockerfileToBuildContext(dockerfileCtx, buildCtx) - if err != nil { - return err - } - } - - ctx := context.Background() - - var resolvedTags []*resolvedTag - if command.IsTrusted() { - translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { - return TrustedReference(ctx, dockerCli, ref, nil) - } - // Wrap the tar archive to replace the Dockerfile entry with the rewritten - // Dockerfile which uses trusted pulls. - buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags) - } - - // Setup an upload progress bar - progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) - if !dockerCli.Out().IsTerminal() { - progressOutput = &lastProgressOutput{output: progressOutput} - } - - var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") - - authConfigs, _ := dockerCli.GetAllCredentials() - buildOptions := types.ImageBuildOptions{ - Memory: options.memory.Value(), - MemorySwap: options.memorySwap.Value(), - Tags: options.tags.GetAll(), - SuppressOutput: options.quiet, - NoCache: options.noCache, - Remove: options.rm, - ForceRemove: options.forceRm, - PullParent: options.pull, - Isolation: container.Isolation(options.isolation), - CPUSetCPUs: options.cpuSetCpus, - CPUSetMems: options.cpuSetMems, - CPUShares: options.cpuShares, - CPUQuota: options.cpuQuota, - CPUPeriod: options.cpuPeriod, - CgroupParent: options.cgroupParent, - Dockerfile: relDockerfile, - ShmSize: options.shmSize.Value(), - Ulimits: options.ulimits.GetList(), - BuildArgs: runconfigopts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll()), - AuthConfigs: authConfigs, - Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), - CacheFrom: options.cacheFrom, - SecurityOpt: options.securityOpt, - NetworkMode: options.networkMode, - Squash: options.squash, - ExtraHosts: options.extraHosts.GetAll(), - Target: options.target, - } - - response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) - if err != nil { - if options.quiet { - fmt.Fprintf(dockerCli.Err(), "%s", progBuff) - } - return err - } - defer response.Body.Close() - - err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), nil) - if err != nil { - if jerr, ok := err.(*jsonmessage.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - if options.quiet { - fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) - } - return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - return err - } - - // Windows: show error message about modified file permissions if the - // daemon isn't running Windows. - if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { - fmt.Fprintln(dockerCli.Out(), "SECURITY WARNING: You are building a Docker "+ - "image from Windows against a non-Windows Docker host. All files and "+ - "directories added to build context will have '-rwxr-xr-x' permissions. "+ - "It is recommended to double check and reset permissions for sensitive "+ - "files and directories.") - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if options.quiet { - fmt.Fprintf(dockerCli.Out(), "%s", buildBuff) - } - - if command.IsTrusted() { - // Since the build was successful, now we must tag any of the resolved - // images from the above Dockerfile rewrite. - for _, resolved := range resolvedTags { - if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { - return err - } - } - } - - return nil -} - -func addDockerfileToBuildContext(dockerfileCtx io.ReadCloser, buildCtx io.ReadCloser) (io.ReadCloser, string, error) { - file, err := ioutil.ReadAll(dockerfileCtx) - dockerfileCtx.Close() - if err != nil { - return nil, "", err - } - now := time.Now() - hdrTmpl := &tar.Header{ - Mode: 0600, - Uid: 0, - Gid: 0, - ModTime: now, - Typeflag: tar.TypeReg, - AccessTime: now, - ChangeTime: now, - } - randomName := ".dockerfile." + stringid.GenerateRandomID()[:20] - - buildCtx = archive.ReplaceFileTarWrapper(buildCtx, map[string]archive.TarModifierFunc{ - // Add the dockerfile with a random filename - randomName: func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) { - return hdrTmpl, file, nil - }, - // Update .dockerignore to include the random filename - ".dockerignore": func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) { - if h == nil { - h = hdrTmpl - } - - b := &bytes.Buffer{} - if content != nil { - if _, err := b.ReadFrom(content); err != nil { - return nil, nil, err - } - } else { - b.WriteString(".dockerignore") - } - b.WriteString("\n" + randomName + "\n") - return h, b.Bytes(), nil - }, - }) - return buildCtx, randomName, nil -} - -func isLocalDir(c string) bool { - _, err := os.Stat(c) - return err == nil -} - -type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) - -// validateTag checks if the given image name can be resolved. -func validateTag(rawRepo string) (string, error) { - _, err := reference.ParseNormalizedNamed(rawRepo) - if err != nil { - return "", err - } - - return rawRepo, nil -} - -var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) - -// resolvedTag records the repository, tag, and resolved digest reference -// from a Dockerfile rewrite. -type resolvedTag struct { - digestRef reference.Canonical - tagRef reference.NamedTagged -} - -// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in -// "FROM " instructions to a digest reference. `translator` is a -// function that takes a repository name and tag reference and returns a -// trusted digest reference. -func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { - scanner := bufio.NewScanner(dockerfile) - buf := bytes.NewBuffer(nil) - - // Scan the lines of the Dockerfile, looking for a "FROM" line. - for scanner.Scan() { - line := scanner.Text() - - matches := dockerfileFromLinePattern.FindStringSubmatch(line) - if matches != nil && matches[1] != api.NoBaseImageSpecifier { - // Replace the line with a resolved "FROM repo@digest" - var ref reference.Named - ref, err = reference.ParseNormalizedNamed(matches[1]) - if err != nil { - return nil, nil, err - } - ref = reference.TagNameOnly(ref) - if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { - trustedRef, err := translator(ctx, ref) - if err != nil { - return nil, nil, err - } - - line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", reference.FamiliarString(trustedRef))) - resolvedTags = append(resolvedTags, &resolvedTag{ - digestRef: trustedRef, - tagRef: ref, - }) - } - } - - _, err := fmt.Fprintln(buf, line) - if err != nil { - return nil, nil, err - } - } - - return buf.Bytes(), resolvedTags, scanner.Err() -} - -// replaceDockerfileTarWrapper wraps the given input tar archive stream and -// replaces the entry with the given Dockerfile name with the contents of the -// new Dockerfile. Returns a new tar archive stream with the replaced -// Dockerfile. -func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - - defer inputTarStream.Close() - - for { - hdr, err := tarReader.Next() - if err == io.EOF { - // Signals end of archive. - tarWriter.Close() - pipeWriter.Close() - return - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - content := io.Reader(tarReader) - if hdr.Name == dockerfileName { - // This entry is the Dockerfile. Since the tar archive was - // generated from a directory on the local filesystem, the - // Dockerfile will only appear once in the archive. - var newDockerfile []byte - newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) - if err != nil { - pipeWriter.CloseWithError(err) - return - } - hdr.Size = int64(len(newDockerfile)) - content = bytes.NewBuffer(newDockerfile) - } - - if err := tarWriter.WriteHeader(hdr); err != nil { - pipeWriter.CloseWithError(err) - return - } - - if _, err := io.Copy(tarWriter, content); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - }() - - return pipeReader -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/build/context.go b/fn/vendor/github.com/docker/docker/cli/command/image/build/context.go deleted file mode 100644 index 348c72193..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/build/context.go +++ /dev/null @@ -1,275 +0,0 @@ -package build - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/gitutils" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/pkg/errors" -) - -const ( - // DefaultDockerfileName is the Default filename with Docker commands, read by docker build - DefaultDockerfileName string = "Dockerfile" -) - -// ValidateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read -// symlinks which point to non-existing files don't trigger an error -func ValidateContextDirectory(srcPath string, excludes []string) error { - contextRoot, err := getContextRoot(srcPath) - if err != nil { - return err - } - return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - if os.IsPermission(err) { - return errors.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return errors.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -// GetContextFromReader will read the contents of the given reader as either a -// Dockerfile or tar archive. Returns a tar archive used as a context and a -// path to the Dockerfile inside the tar. -func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { - buf := bufio.NewReader(r) - - magic, err := buf.Peek(archive.HeaderSize) - if err != nil && err != io.EOF { - return nil, "", errors.Errorf("failed to peek context header from STDIN: %v", err) - } - - if archive.IsArchive(magic) { - return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil - } - - if dockerfileName == "-" { - return nil, "", errors.New("build context is not an archive") - } - - // Input should be read as a Dockerfile. - tmpDir, err := ioutil.TempDir("", "docker-build-context-") - if err != nil { - return nil, "", errors.Errorf("unable to create temporary context directory: %v", err) - } - - f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName)) - if err != nil { - return nil, "", err - } - _, err = io.Copy(f, buf) - if err != nil { - f.Close() - return nil, "", err - } - - if err := f.Close(); err != nil { - return nil, "", err - } - if err := r.Close(); err != nil { - return nil, "", err - } - - tar, err := archive.Tar(tmpDir, archive.Uncompressed) - if err != nil { - return nil, "", err - } - - return ioutils.NewReadCloserWrapper(tar, func() error { - err := tar.Close() - os.RemoveAll(tmpDir) - return err - }), DefaultDockerfileName, nil - -} - -// GetContextFromGitURL uses a Git URL as context for a `docker build`. The -// git repo is cloned into a temporary directory used as the context directory. -// Returns the absolute path to the temporary context directory, the relative -// path of the dockerfile in that context directory, and a non-nil error on -// success. -func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { - if _, err := exec.LookPath("git"); err != nil { - return "", "", errors.Errorf("unable to find 'git': %v", err) - } - if absContextDir, err = gitutils.Clone(gitURL); err != nil { - return "", "", errors.Errorf("unable to 'git clone' to temporary context directory: %v", err) - } - - return getDockerfileRelPath(absContextDir, dockerfileName) -} - -// GetContextFromURL uses a remote URL as context for a `docker build`. The -// remote resource is downloaded as either a Dockerfile or a tar archive. -// Returns the tar archive used for the context and a path of the -// dockerfile inside the tar. -func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { - response, err := httputils.Download(remoteURL) - if err != nil { - return nil, "", errors.Errorf("unable to download remote context %s: %v", remoteURL, err) - } - progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) - - // Pass the response body through a progress reader. - progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) - - return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) -} - -// GetContextFromLocalDir uses the given local directory as context for a -// `docker build`. Returns the absolute path to the local context directory, -// the relative path of the dockerfile in that context directory, and a non-nil -// error on success. -func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { - // When using a local context directory, when the Dockerfile is specified - // with the `-f/--file` option then it is considered relative to the - // current directory and not the context directory. - if dockerfileName != "" && dockerfileName != "-" { - if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { - return "", "", errors.Errorf("unable to get absolute path to Dockerfile: %v", err) - } - } - - return getDockerfileRelPath(localDir, dockerfileName) -} - -// getDockerfileRelPath uses the given context directory for a `docker build` -// and returns the absolute path to the context directory, the relative path of -// the dockerfile in that context directory, and a non-nil error on success. -func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { - if absContextDir, err = filepath.Abs(givenContextDir); err != nil { - return "", "", errors.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) - } - - // The context dir might be a symbolic link, so follow it to the actual - // target directory. - // - // FIXME. We use isUNC (always false on non-Windows platforms) to workaround - // an issue in golang. On Windows, EvalSymLinks does not work on UNC file - // paths (those starting with \\). This hack means that when using links - // on UNC paths, they will not be followed. - if !isUNC(absContextDir) { - absContextDir, err = filepath.EvalSymlinks(absContextDir) - if err != nil { - return "", "", errors.Errorf("unable to evaluate symlinks in context path: %v", err) - } - } - - stat, err := os.Lstat(absContextDir) - if err != nil { - return "", "", errors.Errorf("unable to stat context directory %q: %v", absContextDir, err) - } - - if !stat.IsDir() { - return "", "", errors.Errorf("context must be a directory: %s", absContextDir) - } - - absDockerfile := givenDockerfile - if absDockerfile == "" { - // No -f/--file was specified so use the default relative to the - // context directory. - absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) - - // Just to be nice ;-) look for 'dockerfile' too but only - // use it if we found it, otherwise ignore this check - if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { - altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) - if _, err = os.Lstat(altPath); err == nil { - absDockerfile = altPath - } - } - } else if absDockerfile == "-" { - absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) - } - - // If not already an absolute path, the Dockerfile path should be joined to - // the base directory. - if !filepath.IsAbs(absDockerfile) { - absDockerfile = filepath.Join(absContextDir, absDockerfile) - } - - // Evaluate symlinks in the path to the Dockerfile too. - // - // FIXME. We use isUNC (always false on non-Windows platforms) to workaround - // an issue in golang. On Windows, EvalSymLinks does not work on UNC file - // paths (those starting with \\). This hack means that when using links - // on UNC paths, they will not be followed. - if givenDockerfile != "-" { - if !isUNC(absDockerfile) { - absDockerfile, err = filepath.EvalSymlinks(absDockerfile) - if err != nil { - return "", "", errors.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) - - } - } - - if _, err := os.Lstat(absDockerfile); err != nil { - if os.IsNotExist(err) { - return "", "", errors.Errorf("Cannot locate Dockerfile: %q", absDockerfile) - } - return "", "", errors.Errorf("unable to stat Dockerfile: %v", err) - } - } - - if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { - return "", "", errors.Errorf("unable to get relative Dockerfile path: %v", err) - } - - if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { - return "", "", errors.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) - } - - return absContextDir, relDockerfile, nil -} - -// isUNC returns true if the path is UNC (one starting \\). It always returns -// false on Linux. -func isUNC(path string) bool { - return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/build/context_test.go b/fn/vendor/github.com/docker/docker/cli/command/image/build/context_test.go deleted file mode 100644 index afa04a4fc..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/build/context_test.go +++ /dev/null @@ -1,383 +0,0 @@ -package build - -import ( - "archive/tar" - "bytes" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/docker/docker/pkg/archive" -) - -const dockerfileContents = "FROM busybox" - -var prepareEmpty = func(t *testing.T) (string, func()) { - return "", func() {} -} - -var prepareNoFiles = func(t *testing.T) (string, func()) { - return createTestTempDir(t, "", "builder-context-test") -} - -var prepareOneFile = func(t *testing.T) (string, func()) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - return contextDir, cleanup -} - -func testValidateContextDirectory(t *testing.T, prepare func(t *testing.T) (string, func()), excludes []string) { - contextDir, cleanup := prepare(t) - defer cleanup() - - err := ValidateContextDirectory(contextDir, excludes) - - if err != nil { - t.Fatalf("Error should be nil, got: %s", err) - } -} - -func TestGetContextFromLocalDirNoDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirNotExistingDir(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - fakePath := filepath.Join(contextDir, "fake") - - absContextDir, relDockerfile, err := GetContextFromLocalDir(fakePath, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirNotExistingDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - fakePath := filepath.Join(contextDir, "fake") - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, fakePath) - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirWithNoDirectory(t *testing.T) { - contextDir, dirCleanup := createTestTempDir(t, "", "builder-context-test") - defer dirCleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - chdirCleanup := chdir(t, contextDir) - defer chdirCleanup() - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") - - if err != nil { - t.Fatalf("Error when getting context from local dir: %s", err) - } - - if absContextDir != contextDir { - t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestGetContextFromLocalDirWithDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") - - if err != nil { - t.Fatalf("Error when getting context from local dir: %s", err) - } - - if absContextDir != contextDir { - t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestGetContextFromLocalDirLocalFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - testFilename := createTestTempFile(t, contextDir, "tmpTest", "test", 0777) - - absContextDir, relDockerfile, err := GetContextFromLocalDir(testFilename, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirWithCustomDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - chdirCleanup := chdir(t, contextDir) - defer chdirCleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, DefaultDockerfileName) - - if err != nil { - t.Fatalf("Error when getting context from local dir: %s", err) - } - - if absContextDir != contextDir { - t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) - } - -} - -func TestGetContextFromReaderString(t *testing.T) { - tarArchive, relDockerfile, err := GetContextFromReader(ioutil.NopCloser(strings.NewReader(dockerfileContents)), "") - - if err != nil { - t.Fatalf("Error when executing GetContextFromReader: %s", err) - } - - tarReader := tar.NewReader(tarArchive) - - _, err = tarReader.Next() - - if err != nil { - t.Fatalf("Error when reading tar archive: %s", err) - } - - buff := new(bytes.Buffer) - buff.ReadFrom(tarReader) - contents := buff.String() - - _, err = tarReader.Next() - - if err != io.EOF { - t.Fatalf("Tar stream too long: %s", err) - } - - if err = tarArchive.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - - if dockerfileContents != contents { - t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestGetContextFromReaderTar(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("Error when creating tar: %s", err) - } - - tarArchive, relDockerfile, err := GetContextFromReader(tarStream, DefaultDockerfileName) - - if err != nil { - t.Fatalf("Error when executing GetContextFromReader: %s", err) - } - - tarReader := tar.NewReader(tarArchive) - - header, err := tarReader.Next() - - if err != nil { - t.Fatalf("Error when reading tar archive: %s", err) - } - - if header.Name != DefaultDockerfileName { - t.Fatalf("Dockerfile name should be: %s, got: %s", DefaultDockerfileName, header.Name) - } - - buff := new(bytes.Buffer) - buff.ReadFrom(tarReader) - contents := buff.String() - - _, err = tarReader.Next() - - if err != io.EOF { - t.Fatalf("Tar stream too long: %s", err) - } - - if err = tarArchive.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - - if dockerfileContents != contents { - t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestValidateContextDirectoryEmptyContext(t *testing.T) { - // This isn't a valid test on Windows. See https://play.golang.org/p/RR6z6jxR81. - // The test will ultimately end up calling filepath.Abs(""). On Windows, - // golang will error. On Linux, golang will return /. Due to there being - // drive letters on Windows, this is probably the correct behaviour for - // Windows. - if runtime.GOOS == "windows" { - t.Skip("Invalid test on Windows") - } - testValidateContextDirectory(t, prepareEmpty, []string{}) -} - -func TestValidateContextDirectoryContextWithNoFiles(t *testing.T) { - testValidateContextDirectory(t, prepareNoFiles, []string{}) -} - -func TestValidateContextDirectoryWithOneFile(t *testing.T) { - testValidateContextDirectory(t, prepareOneFile, []string{}) -} - -func TestValidateContextDirectoryWithOneFileExcludes(t *testing.T) { - testValidateContextDirectory(t, prepareOneFile, []string{DefaultDockerfileName}) -} - -// createTestTempDir creates a temporary directory for testing. -// It returns the created path and a cleanup function which is meant to be used as deferred call. -// When an error occurs, it terminates the test. -func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { - path, err := ioutil.TempDir(dir, prefix) - - if err != nil { - t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) - } - - return path, func() { - err = os.RemoveAll(path) - - if err != nil { - t.Fatalf("Error when removing directory %s: %s", path, err) - } - } -} - -// createTestTempSubdir creates a temporary directory for testing. -// It returns the created path but doesn't provide a cleanup function, -// so createTestTempSubdir should be used only for creating temporary subdirectories -// whose parent directories are properly cleaned up. -// When an error occurs, it terminates the test. -func createTestTempSubdir(t *testing.T, dir, prefix string) string { - path, err := ioutil.TempDir(dir, prefix) - - if err != nil { - t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) - } - - return path -} - -// createTestTempFile creates a temporary file within dir with specific contents and permissions. -// When an error occurs, it terminates the test -func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { - filePath := filepath.Join(dir, filename) - err := ioutil.WriteFile(filePath, []byte(contents), perm) - - if err != nil { - t.Fatalf("Error when creating %s file: %s", filename, err) - } - - return filePath -} - -// chdir changes current working directory to dir. -// It returns a function which changes working directory back to the previous one. -// This function is meant to be executed as a deferred call. -// When an error occurs, it terminates the test. -func chdir(t *testing.T, dir string) func() { - workingDirectory, err := os.Getwd() - - if err != nil { - t.Fatalf("Error when retrieving working directory: %s", err) - } - - err = os.Chdir(dir) - - if err != nil { - t.Fatalf("Error when changing directory to %s: %s", dir, err) - } - - return func() { - err = os.Chdir(workingDirectory) - - if err != nil { - t.Fatalf("Error when changing back to working directory (%s): %s", workingDirectory, err) - } - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/build/context_unix.go b/fn/vendor/github.com/docker/docker/cli/command/image/build/context_unix.go deleted file mode 100644 index cb2634f07..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/build/context_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package build - -import ( - "path/filepath" -) - -func getContextRoot(srcPath string) (string, error) { - return filepath.Join(srcPath, "."), nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/build/context_windows.go b/fn/vendor/github.com/docker/docker/cli/command/image/build/context_windows.go deleted file mode 100644 index c577cfa7b..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/build/context_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build windows - -package build - -import ( - "path/filepath" - - "github.com/docker/docker/pkg/longpath" -) - -func getContextRoot(srcPath string) (string, error) { - cr, err := filepath.Abs(srcPath) - if err != nil { - return "", err - } - return longpath.AddPrefix(cr), nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/image/cmd.go deleted file mode 100644 index c3ca61f85..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/cmd.go +++ /dev/null @@ -1,33 +0,0 @@ -package image - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewImageCommand returns a cobra command for `image` subcommands -func NewImageCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "image", - Short: "Manage images", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - NewBuildCommand(dockerCli), - NewHistoryCommand(dockerCli), - NewImportCommand(dockerCli), - NewLoadCommand(dockerCli), - NewPullCommand(dockerCli), - NewPushCommand(dockerCli), - NewSaveCommand(dockerCli), - NewTagCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newInspectCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/history.go b/fn/vendor/github.com/docker/docker/cli/command/image/history.go deleted file mode 100644 index 91c8f75a6..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/history.go +++ /dev/null @@ -1,99 +0,0 @@ -package image - -import ( - "fmt" - "strconv" - "strings" - "text/tabwriter" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type historyOptions struct { - image string - - human bool - quiet bool - noTrunc bool -} - -// NewHistoryCommand creates a new `docker history` command -func NewHistoryCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts historyOptions - - cmd := &cobra.Command{ - Use: "history [OPTIONS] IMAGE", - Short: "Show the history of an image", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - return runHistory(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - - return cmd -} - -func runHistory(dockerCli *command.DockerCli, opts historyOptions) error { - ctx := context.Background() - - history, err := dockerCli.Client().ImageHistory(ctx, opts.image) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - - if opts.quiet { - for _, entry := range history { - if opts.noTrunc { - fmt.Fprintf(w, "%s\n", entry.ID) - } else { - fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) - } - } - w.Flush() - return nil - } - - var imageID string - var createdBy string - var created string - var size string - - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") - for _, entry := range history { - imageID = entry.ID - createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) - if !opts.noTrunc { - createdBy = stringutils.Ellipsis(createdBy, 45) - imageID = stringid.TruncateID(entry.ID) - } - - if opts.human { - created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" - size = units.HumanSizeWithPrecision(float64(entry.Size), 3) - } else { - created = time.Unix(entry.Created, 0).Format(time.RFC3339) - size = strconv.FormatInt(entry.Size, 10) - } - - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) - } - w.Flush() - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/import.go b/fn/vendor/github.com/docker/docker/cli/command/image/import.go deleted file mode 100644 index 60024fb53..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/import.go +++ /dev/null @@ -1,88 +0,0 @@ -package image - -import ( - "io" - "os" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - dockeropts "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/urlutil" - "github.com/spf13/cobra" -) - -type importOptions struct { - source string - reference string - changes dockeropts.ListOpts - message string -} - -// NewImportCommand creates a new `docker import` command -func NewImportCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts importOptions - - cmd := &cobra.Command{ - Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", - Short: "Import the contents from a tarball to create a filesystem image", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.source = args[0] - if len(args) > 1 { - opts.reference = args[1] - } - return runImport(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - opts.changes = dockeropts.NewListOpts(nil) - flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") - flags.StringVarP(&opts.message, "message", "m", "", "Set commit message for imported image") - - return cmd -} - -func runImport(dockerCli *command.DockerCli, opts importOptions) error { - var ( - in io.Reader - srcName = opts.source - ) - - if opts.source == "-" { - in = dockerCli.In() - } else if !urlutil.IsURL(opts.source) { - srcName = "-" - file, err := os.Open(opts.source) - if err != nil { - return err - } - defer file.Close() - in = file - } - - source := types.ImageImportSource{ - Source: in, - SourceName: srcName, - } - - options := types.ImageImportOptions{ - Message: opts.message, - Changes: opts.changes.GetAll(), - } - - clnt := dockerCli.Client() - - responseBody, err := clnt.ImageImport(context.Background(), source, opts.reference, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/inspect.go b/fn/vendor/github.com/docker/docker/cli/command/image/inspect.go deleted file mode 100644 index 217863c77..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/inspect.go +++ /dev/null @@ -1,44 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - refs []string -} - -// newInspectCommand creates a new cobra.Command for `docker image inspect` -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] IMAGE [IMAGE...]", - Short: "Display detailed information on one or more images", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.refs = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - getRefFunc := func(ref string) (interface{}, []byte, error) { - return client.ImageInspectWithRaw(ctx, ref) - } - return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/list.go b/fn/vendor/github.com/docker/docker/cli/command/image/list.go deleted file mode 100644 index 679604fc0..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/list.go +++ /dev/null @@ -1,96 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type imagesOptions struct { - matchName string - - quiet bool - all bool - noTrunc bool - showDigests bool - format string - filter opts.FilterOpt -} - -// NewImagesCommand creates a new `docker images` command -func NewImagesCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := imagesOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "images [OPTIONS] [REPOSITORY[:TAG]]", - Short: "List images", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - opts.matchName = args[0] - } - return runImages(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") - flags.BoolVarP(&opts.all, "all", "a", false, "Show all images (default hides intermediate images)") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.BoolVar(&opts.showDigests, "digests", false, "Show digests") - flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := *NewImagesCommand(dockerCli) - cmd.Aliases = []string{"images", "list"} - cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]" - return &cmd -} - -func runImages(dockerCli *command.DockerCli, opts imagesOptions) error { - ctx := context.Background() - - filters := opts.filter.Value() - if opts.matchName != "" { - filters.Add("reference", opts.matchName) - } - - options := types.ImageListOptions{ - All: opts.all, - Filters: filters, - } - - images, err := dockerCli.Client().ImageList(ctx, options) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().ImagesFormat - } else { - format = formatter.TableFormatKey - } - } - - imageCtx := formatter.ImageContext{ - Context: formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewImageFormat(format, opts.quiet, opts.showDigests), - Trunc: !opts.noTrunc, - }, - Digest: opts.showDigests, - } - return formatter.ImageWrite(imageCtx, images) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/load.go b/fn/vendor/github.com/docker/docker/cli/command/image/load.go deleted file mode 100644 index 24346f126..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/load.go +++ /dev/null @@ -1,77 +0,0 @@ -package image - -import ( - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type loadOptions struct { - input string - quiet bool -} - -// NewLoadCommand creates a new `docker load` command -func NewLoadCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts loadOptions - - cmd := &cobra.Command{ - Use: "load [OPTIONS]", - Short: "Load an image from a tar archive or STDIN", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runLoad(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") - - return cmd -} - -func runLoad(dockerCli *command.DockerCli, opts loadOptions) error { - - var input io.Reader = dockerCli.In() - if opts.input != "" { - // We use system.OpenSequential to use sequential file access on Windows, avoiding - // depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(opts.input) - if err != nil { - return err - } - defer file.Close() - input = file - } - - // To avoid getting stuck, verify that a tar file is given either in - // the input flag or through stdin and if not display an error message and exit. - if opts.input == "" && dockerCli.In().IsTerminal() { - return errors.Errorf("requested load from stdin, but stdin is empty") - } - - if !dockerCli.Out().IsTerminal() { - opts.quiet = true - } - response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) - if err != nil { - return err - } - defer response.Body.Close() - - if response.Body != nil && response.JSON { - return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) - } - - _, err = io.Copy(dockerCli.Out(), response.Body) - return err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/prune.go b/fn/vendor/github.com/docker/docker/cli/command/image/prune.go deleted file mode 100644 index f86bae39c..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/prune.go +++ /dev/null @@ -1,95 +0,0 @@ -package image - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - units "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type pruneOptions struct { - force bool - all bool - filter opts.FilterOpt -} - -// NewPruneCommand returns a new cobra prune command for images -func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := pruneOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove unused images", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - spaceReclaimed, output, err := runPrune(dockerCli, opts) - if err != nil { - return err - } - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) - return nil - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") - flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images, not just dangling ones") - flags.Var(&opts.filter, "filter", "Provide filter values (e.g. 'until=')") - - return cmd -} - -const ( - allImageWarning = `WARNING! This will remove all images without at least one container associated to them. -Are you sure you want to continue?` - danglingWarning = `WARNING! This will remove all dangling images. -Are you sure you want to continue?` -) - -func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { - pruneFilters := opts.filter.Value() - pruneFilters.Add("dangling", fmt.Sprintf("%v", !opts.all)) - pruneFilters = command.PruneFilters(dockerCli, pruneFilters) - - warning := danglingWarning - if opts.all { - warning = allImageWarning - } - if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { - return - } - - report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) - if err != nil { - return - } - - if len(report.ImagesDeleted) > 0 { - output = "Deleted Images:\n" - for _, st := range report.ImagesDeleted { - if st.Untagged != "" { - output += fmt.Sprintln("untagged:", st.Untagged) - } else { - output += fmt.Sprintln("deleted:", st.Deleted) - } - } - spaceReclaimed = report.SpaceReclaimed - } - - return -} - -// RunPrune calls the Image Prune API -// This returns the amount of space reclaimed and a detailed output string -func RunPrune(dockerCli *command.DockerCli, all bool, filter opts.FilterOpt) (uint64, string, error) { - return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter}) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/pull.go b/fn/vendor/github.com/docker/docker/cli/command/image/pull.go deleted file mode 100644 index 5dd523c6d..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/pull.go +++ /dev/null @@ -1,85 +0,0 @@ -package image - -import ( - "fmt" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/registry" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type pullOptions struct { - remote string - all bool -} - -// NewPullCommand creates a new `docker pull` command -func NewPullCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts pullOptions - - cmd := &cobra.Command{ - Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", - Short: "Pull an image or a repository from a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.remote = args[0] - return runPull(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") - command.AddTrustVerificationFlags(flags) - - return cmd -} - -func runPull(dockerCli *command.DockerCli, opts pullOptions) error { - distributionRef, err := reference.ParseNormalizedNamed(opts.remote) - if err != nil { - return err - } - if opts.all && !reference.IsNameOnly(distributionRef) { - return errors.New("tag can't be used with --all-tags/-a") - } - - if !opts.all && reference.IsNameOnly(distributionRef) { - distributionRef = reference.TagNameOnly(distributionRef) - if tagged, ok := distributionRef.(reference.Tagged); ok { - fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", tagged.Tag()) - } - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(distributionRef) - if err != nil { - return err - } - - ctx := context.Background() - - authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) - requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "pull") - - // Check if reference has a digest - _, isCanonical := distributionRef.(reference.Canonical) - if command.IsTrusted() && !isCanonical { - err = trustedPull(ctx, dockerCli, repoInfo, distributionRef, authConfig, requestPrivilege) - } else { - err = imagePullPrivileged(ctx, dockerCli, authConfig, reference.FamiliarString(distributionRef), requestPrivilege, opts.all) - } - if err != nil { - if strings.Contains(err.Error(), "when fetching 'plugin'") { - return errors.New(err.Error() + " - Use `docker plugin install`") - } - return err - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/push.go b/fn/vendor/github.com/docker/docker/cli/command/image/push.go deleted file mode 100644 index 3879d849d..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/push.go +++ /dev/null @@ -1,61 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -// NewPushCommand creates a new `docker push` command -func NewPushCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "push [OPTIONS] NAME[:TAG]", - Short: "Push an image or a repository to a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPush(dockerCli, args[0]) - }, - } - - flags := cmd.Flags() - - command.AddTrustSigningFlags(flags) - - return cmd -} - -func runPush(dockerCli *command.DockerCli, remote string) error { - ref, err := reference.ParseNormalizedNamed(remote) - if err != nil { - return err - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - - ctx := context.Background() - - // Resolve the Auth config relevant for this server - authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) - requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push") - - if command.IsTrusted() { - return trustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege) - } - - responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref, requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/remove.go b/fn/vendor/github.com/docker/docker/cli/command/image/remove.go deleted file mode 100644 index 48e8d2c2a..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/remove.go +++ /dev/null @@ -1,78 +0,0 @@ -package image - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type removeOptions struct { - force bool - noPrune bool -} - -// NewRemoveCommand creates a new `docker remove` command -func NewRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rmi [OPTIONS] IMAGE [IMAGE...]", - Short: "Remove one or more images", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, opts, args) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") - flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") - - return cmd -} - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := *NewRemoveCommand(dockerCli) - cmd.Aliases = []string{"rmi", "remove"} - cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]" - return &cmd -} - -func runRemove(dockerCli *command.DockerCli, opts removeOptions, images []string) error { - client := dockerCli.Client() - ctx := context.Background() - - options := types.ImageRemoveOptions{ - Force: opts.force, - PruneChildren: !opts.noPrune, - } - - var errs []string - for _, image := range images { - dels, err := client.ImageRemove(ctx, image, options) - if err != nil { - errs = append(errs, err.Error()) - } else { - for _, del := range dels { - if del.Deleted != "" { - fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) - } else { - fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) - } - } - } - } - - if len(errs) > 0 { - return errors.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/save.go b/fn/vendor/github.com/docker/docker/cli/command/image/save.go deleted file mode 100644 index e01d2c730..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/save.go +++ /dev/null @@ -1,56 +0,0 @@ -package image - -import ( - "io" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type saveOptions struct { - images []string - output string -} - -// NewSaveCommand creates a new `docker save` command -func NewSaveCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts saveOptions - - cmd := &cobra.Command{ - Use: "save [OPTIONS] IMAGE [IMAGE...]", - Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.images = args - return runSave(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") - - return cmd -} - -func runSave(dockerCli *command.DockerCli, opts saveOptions) error { - if opts.output == "" && dockerCli.Out().IsTerminal() { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) - if err != nil { - return err - } - defer responseBody.Close() - - if opts.output == "" { - _, err := io.Copy(dockerCli.Out(), responseBody) - return err - } - - return command.CopyToFile(opts.output, responseBody) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/tag.go b/fn/vendor/github.com/docker/docker/cli/command/image/tag.go deleted file mode 100644 index fb2b70385..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/tag.go +++ /dev/null @@ -1,41 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type tagOptions struct { - image string - name string -} - -// NewTagCommand creates a new `docker tag` command -func NewTagCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts tagOptions - - cmd := &cobra.Command{ - Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]", - Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - opts.name = args[1] - return runTag(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - return cmd -} - -func runTag(dockerCli *command.DockerCli, opts tagOptions) error { - ctx := context.Background() - - return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/trust.go b/fn/vendor/github.com/docker/docker/cli/command/image/trust.go deleted file mode 100644 index 75bae2eb5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/trust.go +++ /dev/null @@ -1,382 +0,0 @@ -package image - -import ( - "encoding/hex" - "encoding/json" - "fmt" - "io" - "path" - "sort" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/trust" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/registry" - "github.com/docker/notary/client" - "github.com/docker/notary/tuf/data" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -type target struct { - name string - digest digest.Digest - size int64 -} - -// trustedPush handles content trust pushing of an image -func trustedPush(ctx context.Context, cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { - responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref, requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - - return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody) -} - -// PushTrustedReference pushes a canonical reference to the trust server. -func PushTrustedReference(cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error { - // If it is a trusted push we would like to find the target entry which match the - // tag provided in the function and then do an AddTarget later. - target := &client.Target{} - // Count the times of calling for handleTarget, - // if it is called more that once, that should be considered an error in a trusted push. - cnt := 0 - handleTarget := func(aux *json.RawMessage) { - cnt++ - if cnt > 1 { - // handleTarget should only be called one. This will be treated as an error. - return - } - - var pushResult types.PushResult - err := json.Unmarshal(*aux, &pushResult) - if err == nil && pushResult.Tag != "" { - if dgst, err := digest.Parse(pushResult.Digest); err == nil { - h, err := hex.DecodeString(dgst.Hex()) - if err != nil { - target = nil - return - } - target.Name = pushResult.Tag - target.Hashes = data.Hashes{string(dgst.Algorithm()): h} - target.Length = int64(pushResult.Size) - } - } - } - - var tag string - switch x := ref.(type) { - case reference.Canonical: - return errors.New("cannot push a digest reference") - case reference.NamedTagged: - tag = x.Tag() - default: - // We want trust signatures to always take an explicit tag, - // otherwise it will act as an untrusted push. - if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), nil); err != nil { - return err - } - fmt.Fprintln(cli.Out(), "No tag specified, skipping trust metadata push") - return nil - } - - if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), handleTarget); err != nil { - return err - } - - if cnt > 1 { - return errors.Errorf("internal error: only one call to handleTarget expected") - } - - if target == nil { - fmt.Fprintln(cli.Out(), "No targets found, please provide a specific tag in order to sign it") - return nil - } - - fmt.Fprintln(cli.Out(), "Signing and pushing trust metadata") - - repo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "push", "pull") - if err != nil { - fmt.Fprintf(cli.Out(), "Error establishing connection to notary repository: %s\n", err) - return err - } - - // get the latest repository metadata so we can figure out which roles to sign - err = repo.Update(false) - - switch err.(type) { - case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: - keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) - var rootKeyID string - // always select the first root key - if len(keys) > 0 { - sort.Strings(keys) - rootKeyID = keys[0] - } else { - rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) - if err != nil { - return err - } - rootKeyID = rootPublicKey.ID() - } - - // Initialize the notary repository with a remotely managed snapshot key - if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { - return trust.NotaryError(repoInfo.Name.Name(), err) - } - fmt.Fprintf(cli.Out(), "Finished initializing %q\n", repoInfo.Name.Name()) - err = repo.AddTarget(target, data.CanonicalTargetsRole) - case nil: - // already initialized and we have successfully downloaded the latest metadata - err = addTargetToAllSignableRoles(repo, target) - default: - return trust.NotaryError(repoInfo.Name.Name(), err) - } - - if err == nil { - err = repo.Publish() - } - - if err != nil { - fmt.Fprintf(cli.Out(), "Failed to sign %q:%s - %s\n", repoInfo.Name.Name(), tag, err.Error()) - return trust.NotaryError(repoInfo.Name.Name(), err) - } - - fmt.Fprintf(cli.Out(), "Successfully signed %q:%s\n", repoInfo.Name.Name(), tag) - return nil -} - -// Attempt to add the image target to all the top level delegation roles we can -// (based on whether we have the signing key and whether the role's path allows -// us to). -// If there are no delegation roles, we add to the targets role. -func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error { - var signableRoles []string - - // translate the full key names, which includes the GUN, into just the key IDs - allCanonicalKeyIDs := make(map[string]struct{}) - for fullKeyID := range repo.CryptoService.ListAllKeys() { - allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{} - } - - allDelegationRoles, err := repo.GetDelegationRoles() - if err != nil { - return err - } - - // if there are no delegation roles, then just try to sign it into the targets role - if len(allDelegationRoles) == 0 { - return repo.AddTarget(target, data.CanonicalTargetsRole) - } - - // there are delegation roles, find every delegation role we have a key for, and - // attempt to sign into into all those roles. - for _, delegationRole := range allDelegationRoles { - // We do not support signing any delegation role that isn't a direct child of the targets role. - // Also don't bother checking the keys if we can't add the target - // to this role due to path restrictions - if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { - continue - } - - for _, canonicalKeyID := range delegationRole.KeyIDs { - if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok { - signableRoles = append(signableRoles, delegationRole.Name) - break - } - } - } - - if len(signableRoles) == 0 { - return errors.Errorf("no valid signing keys for delegation roles") - } - - return repo.AddTarget(target, signableRoles...) -} - -// imagePushPrivileged push the image -func imagePushPrivileged(ctx context.Context, cli *command.DockerCli, authConfig types.AuthConfig, ref reference.Named, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return nil, err - } - options := types.ImagePushOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - } - - return cli.Client().ImagePush(ctx, reference.FamiliarString(ref), options) -} - -// trustedPull handles content trust pulling of an image -func trustedPull(ctx context.Context, cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { - var refs []target - - notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") - if err != nil { - fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) - return err - } - - if tagged, isTagged := ref.(reference.NamedTagged); !isTagged { - // List all targets - targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return trust.NotaryError(ref.Name(), err) - } - for _, tgt := range targets { - t, err := convertTarget(tgt.Target) - if err != nil { - fmt.Fprintf(cli.Out(), "Skipping target for %q\n", reference.FamiliarName(ref)) - continue - } - // Only list tags in the top level targets role or the releases delegation role - ignore - // all other delegation roles - if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { - continue - } - refs = append(refs, t) - } - if len(refs) == 0 { - return trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name())) - } - } else { - t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return trust.NotaryError(ref.Name(), err) - } - // Only get the tag if it's in the top level targets role or the releases delegation role - // ignore it if it's in any other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { - return trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag())) - } - - logrus.Debugf("retrieving target for %s role\n", t.Role) - r, err := convertTarget(t.Target) - if err != nil { - return err - - } - refs = append(refs, r) - } - - for i, r := range refs { - displayTag := r.name - if displayTag != "" { - displayTag = ":" + displayTag - } - fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), reference.FamiliarName(ref), displayTag, r.digest) - - trustedRef, err := reference.WithDigest(reference.TrimNamed(ref), r.digest) - if err != nil { - return err - } - if err := imagePullPrivileged(ctx, cli, authConfig, reference.FamiliarString(trustedRef), requestPrivilege, false); err != nil { - return err - } - - tagged, err := reference.WithTag(reference.TrimNamed(ref), r.name) - if err != nil { - return err - } - - if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil { - return err - } - } - return nil -} - -// imagePullPrivileged pulls the image and displays it to the output -func imagePullPrivileged(ctx context.Context, cli *command.DockerCli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { - - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - options := types.ImagePullOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - All: all, - } - - responseBody, err := cli.Client().ImagePull(ctx, ref, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil) -} - -// TrustedReference returns the canonical trusted reference for an image reference -func TrustedReference(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) { - var ( - repoInfo *registry.RepositoryInfo - err error - ) - if rs != nil { - repoInfo, err = rs.ResolveRepository(ref) - } else { - repoInfo, err = registry.ParseRepositoryInfo(ref) - } - if err != nil { - return nil, err - } - - // Resolve the Auth config relevant for this server - authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) - - notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") - if err != nil { - fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) - return nil, err - } - - t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return nil, trust.NotaryError(repoInfo.Name.Name(), err) - } - // Only list tags in the top level targets role or the releases delegation role - ignore - // all other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { - return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", ref.Tag())) - } - r, err := convertTarget(t.Target) - if err != nil { - return nil, err - - } - - return reference.WithDigest(reference.TrimNamed(ref), r.digest) -} - -func convertTarget(t client.Target) (target, error) { - h, ok := t.Hashes["sha256"] - if !ok { - return target{}, errors.New("no valid hash, expecting sha256") - } - return target{ - name: t.Name, - digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), - size: t.Length, - }, nil -} - -// TagTrusted tags a trusted ref -func TagTrusted(ctx context.Context, cli *command.DockerCli, trustedRef reference.Canonical, ref reference.NamedTagged) error { - // Use familiar references when interacting with client and output - familiarRef := reference.FamiliarString(ref) - trustedFamiliarRef := reference.FamiliarString(trustedRef) - - fmt.Fprintf(cli.Out(), "Tagging %s as %s\n", trustedFamiliarRef, familiarRef) - - return cli.Client().ImageTag(ctx, trustedFamiliarRef, familiarRef) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/image/trust_test.go b/fn/vendor/github.com/docker/docker/cli/command/image/trust_test.go deleted file mode 100644 index 78146465e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/image/trust_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package image - -import ( - "os" - "testing" - - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/cli/trust" - "github.com/docker/docker/registry" -) - -func unsetENV() { - os.Unsetenv("DOCKER_CONTENT_TRUST") - os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") -} - -func TestENVTrustServer(t *testing.T) { - defer unsetENV() - indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} - if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { - t.Fatal("Failed to set ENV variable") - } - output, err := trust.Server(indexInfo) - expectedStr := "https://notary-test.com:5000" - if err != nil || output != expectedStr { - t.Fatalf("Expected server to be %s, got %s", expectedStr, output) - } -} - -func TestHTTPENVTrustServer(t *testing.T) { - defer unsetENV() - indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} - if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { - t.Fatal("Failed to set ENV variable") - } - _, err := trust.Server(indexInfo) - if err == nil { - t.Fatal("Expected error with invalid scheme") - } -} - -func TestOfficialTrustServer(t *testing.T) { - indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true} - output, err := trust.Server(indexInfo) - if err != nil || output != registry.NotaryServer { - t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output) - } -} - -func TestNonOfficialTrustServer(t *testing.T) { - indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: false} - output, err := trust.Server(indexInfo) - expectedStr := "https://" + indexInfo.Name - if err != nil || output != expectedStr { - t.Fatalf("Expected server to be %s, got %s", expectedStr, output) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/in.go b/fn/vendor/github.com/docker/docker/cli/command/in.go deleted file mode 100644 index 50de77ee9..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/in.go +++ /dev/null @@ -1,75 +0,0 @@ -package command - -import ( - "io" - "os" - "runtime" - - "github.com/docker/docker/pkg/term" - "github.com/pkg/errors" -) - -// InStream is an input stream used by the DockerCli to read user input -type InStream struct { - in io.ReadCloser - fd uintptr - isTerminal bool - state *term.State -} - -func (i *InStream) Read(p []byte) (int, error) { - return i.in.Read(p) -} - -// Close implements the Closer interface -func (i *InStream) Close() error { - return i.in.Close() -} - -// FD returns the file descriptor number for this stream -func (i *InStream) FD() uintptr { - return i.fd -} - -// IsTerminal returns true if this stream is connected to a terminal -func (i *InStream) IsTerminal() bool { - return i.isTerminal -} - -// SetRawTerminal sets raw mode on the input terminal -func (i *InStream) SetRawTerminal() (err error) { - if os.Getenv("NORAW") != "" || !i.isTerminal { - return nil - } - i.state, err = term.SetRawTerminal(i.fd) - return err -} - -// RestoreTerminal restores normal mode to the terminal -func (i *InStream) RestoreTerminal() { - if i.state != nil { - term.RestoreTerminal(i.fd, i.state) - } -} - -// CheckTty checks if we are trying to attach to a container tty -// from a non-tty client input stream, and if so, returns an error. -func (i *InStream) CheckTty(attachStdin, ttyMode bool) error { - // In order to attach to a container tty, input stream for the client must - // be a tty itself: redirecting or piping the client standard input is - // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. - if ttyMode && attachStdin && !i.isTerminal { - eText := "the input device is not a TTY" - if runtime.GOOS == "windows" { - return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") - } - return errors.New(eText) - } - return nil -} - -// NewInStream returns a new InStream object from a ReadCloser -func NewInStream(in io.ReadCloser) *InStream { - fd, isTerminal := term.GetFdInfo(in) - return &InStream{in: in, fd: fd, isTerminal: isTerminal} -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/inspect/inspector.go b/fn/vendor/github.com/docker/docker/cli/command/inspect/inspector.go deleted file mode 100644 index 13e584ab4..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/inspect/inspector.go +++ /dev/null @@ -1,198 +0,0 @@ -package inspect - -import ( - "bytes" - "encoding/json" - "io" - "strings" - "text/template" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/templates" - "github.com/pkg/errors" -) - -// Inspector defines an interface to implement to process elements -type Inspector interface { - Inspect(typedElement interface{}, rawElement []byte) error - Flush() error -} - -// TemplateInspector uses a text template to inspect elements. -type TemplateInspector struct { - outputStream io.Writer - buffer *bytes.Buffer - tmpl *template.Template -} - -// NewTemplateInspector creates a new inspector with a template. -func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { - return &TemplateInspector{ - outputStream: outputStream, - buffer: new(bytes.Buffer), - tmpl: tmpl, - } -} - -// NewTemplateInspectorFromString creates a new TemplateInspector from a string -// which is compiled into a template. -func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, error) { - if tmplStr == "" { - return NewIndentedInspector(out), nil - } - - tmpl, err := templates.Parse(tmplStr) - if err != nil { - return nil, errors.Errorf("Template parsing error: %s", err) - } - return NewTemplateInspector(out, tmpl), nil -} - -// GetRefFunc is a function which used by Inspect to fetch an object from a -// reference -type GetRefFunc func(ref string) (interface{}, []byte, error) - -// Inspect fetches objects by reference using GetRefFunc and writes the json -// representation to the output writer. -func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFunc) error { - inspector, err := NewTemplateInspectorFromString(out, tmplStr) - if err != nil { - return cli.StatusError{StatusCode: 64, Status: err.Error()} - } - - var inspectErrs []string - for _, ref := range references { - element, raw, err := getRef(ref) - if err != nil { - inspectErrs = append(inspectErrs, err.Error()) - continue - } - - if err := inspector.Inspect(element, raw); err != nil { - inspectErrs = append(inspectErrs, err.Error()) - } - } - - if err := inspector.Flush(); err != nil { - logrus.Errorf("%s\n", err) - } - - if len(inspectErrs) != 0 { - return cli.StatusError{ - StatusCode: 1, - Status: strings.Join(inspectErrs, "\n"), - } - } - return nil -} - -// Inspect executes the inspect template. -// It decodes the raw element into a map if the initial execution fails. -// This allows docker cli to parse inspect structs injected with Swarm fields. -func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { - buffer := new(bytes.Buffer) - if err := i.tmpl.Execute(buffer, typedElement); err != nil { - if rawElement == nil { - return errors.Errorf("Template parsing error: %v", err) - } - return i.tryRawInspectFallback(rawElement) - } - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} - -// tryRawInspectFallback executes the inspect template with a raw interface. -// This allows docker cli to parse inspect structs injected with Swarm fields. -func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error { - var raw interface{} - buffer := new(bytes.Buffer) - rdr := bytes.NewReader(rawElement) - dec := json.NewDecoder(rdr) - - if rawErr := dec.Decode(&raw); rawErr != nil { - return errors.Errorf("unable to read inspect data: %v", rawErr) - } - - tmplMissingKey := i.tmpl.Option("missingkey=error") - if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { - return errors.Errorf("Template parsing error: %v", rawErr) - } - - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} - -// Flush writes the result of inspecting all elements into the output stream. -func (i *TemplateInspector) Flush() error { - if i.buffer.Len() == 0 { - _, err := io.WriteString(i.outputStream, "\n") - return err - } - _, err := io.Copy(i.outputStream, i.buffer) - return err -} - -// IndentedInspector uses a buffer to stop the indented representation of an element. -type IndentedInspector struct { - outputStream io.Writer - elements []interface{} - rawElements [][]byte -} - -// NewIndentedInspector generates a new IndentedInspector. -func NewIndentedInspector(outputStream io.Writer) Inspector { - return &IndentedInspector{ - outputStream: outputStream, - } -} - -// Inspect writes the raw element with an indented json format. -func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { - if rawElement != nil { - i.rawElements = append(i.rawElements, rawElement) - } else { - i.elements = append(i.elements, typedElement) - } - return nil -} - -// Flush writes the result of inspecting all elements into the output stream. -func (i *IndentedInspector) Flush() error { - if len(i.elements) == 0 && len(i.rawElements) == 0 { - _, err := io.WriteString(i.outputStream, "[]\n") - return err - } - - var buffer io.Reader - if len(i.rawElements) > 0 { - bytesBuffer := new(bytes.Buffer) - bytesBuffer.WriteString("[") - for idx, r := range i.rawElements { - bytesBuffer.Write(r) - if idx < len(i.rawElements)-1 { - bytesBuffer.WriteString(",") - } - } - bytesBuffer.WriteString("]") - indented := new(bytes.Buffer) - if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { - return err - } - buffer = indented - } else { - b, err := json.MarshalIndent(i.elements, "", " ") - if err != nil { - return err - } - buffer = bytes.NewReader(b) - } - - if _, err := io.Copy(i.outputStream, buffer); err != nil { - return err - } - _, err := io.WriteString(i.outputStream, "\n") - return err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go b/fn/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go deleted file mode 100644 index 9085230ac..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package inspect - -import ( - "bytes" - "strings" - "testing" - - "github.com/docker/docker/pkg/templates" -) - -type testElement struct { - DNS string `json:"Dns"` -} - -func TestTemplateInspectorDefault(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n" { - t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorEmpty(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "\n" { - t.Fatalf("Expected `\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorTemplateError(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Foo}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - err = i.Inspect(testElement{"0.0.0.0"}, nil) - if err == nil { - t.Fatal("Expected error got nil") - } - - if !strings.HasPrefix(err.Error(), "Template parsing error") { - t.Fatalf("Expected template error, got %v", err) - } -} - -func TestTemplateInspectorRawFallback(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Dns}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n" { - t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorRawFallbackError(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Dns}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - err = i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Foo": "0.0.0.0"}`)) - if err == nil { - t.Fatal("Expected error got nil") - } - - if !strings.HasPrefix(err.Error(), "Template parsing error") { - t.Fatalf("Expected template error, got %v", err) - } -} - -func TestTemplateInspectorMultiple(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n1.1.1.1\n" { - t.Fatalf("Expected `0.0.0.0\\n1.1.1.1\\n`, got `%s`", b.String()) - } -} - -func TestIndentedInspectorDefault(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorMultiple(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0" - }, - { - "Dns": "1.1.1.1" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorEmpty(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := "[]\n" - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorRawElements(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0", "Node": "0"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Inspect(testElement{"1.1.1.1"}, []byte(`{"Dns": "1.1.1.1", "Node": "1"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0", - "Node": "0" - }, - { - "Dns": "1.1.1.1", - "Node": "1" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/network/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/network/cmd.go deleted file mode 100644 index ab8393cde..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/network/cmd.go +++ /dev/null @@ -1,28 +0,0 @@ -package network - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewNetworkCommand returns a cobra command for `network` subcommands -func NewNetworkCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "network", - Short: "Manage networks", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - newConnectCommand(dockerCli), - newCreateCommand(dockerCli), - newDisconnectCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/network/connect.go b/fn/vendor/github.com/docker/docker/cli/command/network/connect.go deleted file mode 100644 index bc90ddaba..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/network/connect.go +++ /dev/null @@ -1,63 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type connectOptions struct { - network string - container string - ipaddress string - ipv6address string - links opts.ListOpts - aliases []string - linklocalips []string -} - -func newConnectCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := connectOptions{ - links: opts.NewListOpts(opts.ValidateLink), - } - - cmd := &cobra.Command{ - Use: "connect [OPTIONS] NETWORK CONTAINER", - Short: "Connect a container to a network", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.network = args[0] - opts.container = args[1] - return runConnect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVar(&opts.ipaddress, "ip", "", "IPv4 address (e.g., 172.30.100.104)") - flags.StringVar(&opts.ipv6address, "ip6", "", "IPv6 address (e.g., 2001:db8::33)") - flags.Var(&opts.links, "link", "Add link to another container") - flags.StringSliceVar(&opts.aliases, "alias", []string{}, "Add network-scoped alias for the container") - flags.StringSliceVar(&opts.linklocalips, "link-local-ip", []string{}, "Add a link-local address for the container") - - return cmd -} - -func runConnect(dockerCli *command.DockerCli, opts connectOptions) error { - client := dockerCli.Client() - - epConfig := &network.EndpointSettings{ - IPAMConfig: &network.EndpointIPAMConfig{ - IPv4Address: opts.ipaddress, - IPv6Address: opts.ipv6address, - LinkLocalIPs: opts.linklocalips, - }, - Links: opts.links.GetAll(), - Aliases: opts.aliases, - } - - return client.NetworkConnect(context.Background(), opts.network, opts.container, epConfig) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/network/create.go b/fn/vendor/github.com/docker/docker/cli/command/network/create.go deleted file mode 100644 index 90119af91..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/network/create.go +++ /dev/null @@ -1,232 +0,0 @@ -package network - -import ( - "fmt" - "net" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type createOptions struct { - name string - driver string - driverOpts opts.MapOpts - labels opts.ListOpts - internal bool - ipv6 bool - attachable bool - ingress bool - - ipamDriver string - ipamSubnet []string - ipamIPRange []string - ipamGateway []string - ipamAux opts.MapOpts - ipamOpt opts.MapOpts -} - -func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := createOptions{ - driverOpts: *opts.NewMapOpts(nil, nil), - labels: opts.NewListOpts(opts.ValidateEnv), - ipamAux: *opts.NewMapOpts(nil, nil), - ipamOpt: *opts.NewMapOpts(nil, nil), - } - - cmd := &cobra.Command{ - Use: "create [OPTIONS] NETWORK", - Short: "Create a network", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.name = args[0] - return runCreate(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.driver, "driver", "d", "bridge", "Driver to manage the Network") - flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") - flags.Var(&opts.labels, "label", "Set metadata on a network") - flags.BoolVar(&opts.internal, "internal", false, "Restrict external access to the network") - flags.BoolVar(&opts.ipv6, "ipv6", false, "Enable IPv6 networking") - flags.BoolVar(&opts.attachable, "attachable", false, "Enable manual container attachment") - flags.SetAnnotation("attachable", "version", []string{"1.25"}) - flags.BoolVar(&opts.ingress, "ingress", false, "Create swarm routing-mesh network") - flags.SetAnnotation("ingress", "version", []string{"1.29"}) - - flags.StringVar(&opts.ipamDriver, "ipam-driver", "default", "IP Address Management Driver") - flags.StringSliceVar(&opts.ipamSubnet, "subnet", []string{}, "Subnet in CIDR format that represents a network segment") - flags.StringSliceVar(&opts.ipamIPRange, "ip-range", []string{}, "Allocate container ip from a sub-range") - flags.StringSliceVar(&opts.ipamGateway, "gateway", []string{}, "IPv4 or IPv6 Gateway for the master subnet") - - flags.Var(&opts.ipamAux, "aux-address", "Auxiliary IPv4 or IPv6 addresses used by Network driver") - flags.Var(&opts.ipamOpt, "ipam-opt", "Set IPAM driver specific options") - - return cmd -} - -func runCreate(dockerCli *command.DockerCli, opts createOptions) error { - client := dockerCli.Client() - - ipamCfg, err := consolidateIpam(opts.ipamSubnet, opts.ipamIPRange, opts.ipamGateway, opts.ipamAux.GetAll()) - if err != nil { - return err - } - - // Construct network create request body - nc := types.NetworkCreate{ - Driver: opts.driver, - Options: opts.driverOpts.GetAll(), - IPAM: &network.IPAM{ - Driver: opts.ipamDriver, - Config: ipamCfg, - Options: opts.ipamOpt.GetAll(), - }, - CheckDuplicate: true, - Internal: opts.internal, - EnableIPv6: opts.ipv6, - Attachable: opts.attachable, - Ingress: opts.ingress, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), - } - - resp, err := client.NetworkCreate(context.Background(), opts.name, nc) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID) - return nil -} - -// Consolidates the ipam configuration as a group from different related configurations -// user can configure network with multiple non-overlapping subnets and hence it is -// possible to correlate the various related parameters and consolidate them. -// consolidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into -// structured ipam data. -func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { - if len(subnets) < len(ranges) || len(subnets) < len(gateways) { - return nil, errors.Errorf("every ip-range or gateway must have a corresponding subnet") - } - iData := map[string]*network.IPAMConfig{} - - // Populate non-overlapping subnets into consolidation map - for _, s := range subnets { - for k := range iData { - ok1, err := subnetMatches(s, k) - if err != nil { - return nil, err - } - ok2, err := subnetMatches(k, s) - if err != nil { - return nil, err - } - if ok1 || ok2 { - return nil, errors.Errorf("multiple overlapping subnet configuration is not supported") - } - } - iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} - } - - // Validate and add valid ip ranges - for _, r := range ranges { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, r) - if err != nil { - return nil, err - } - if !ok { - continue - } - if iData[s].IPRange != "" { - return nil, errors.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) - } - d := iData[s] - d.IPRange = r - match = true - } - if !match { - return nil, errors.Errorf("no matching subnet for range %s", r) - } - } - - // Validate and add valid gateways - for _, g := range gateways { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, g) - if err != nil { - return nil, err - } - if !ok { - continue - } - if iData[s].Gateway != "" { - return nil, errors.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) - } - d := iData[s] - d.Gateway = g - match = true - } - if !match { - return nil, errors.Errorf("no matching subnet for gateway %s", g) - } - } - - // Validate and add aux-addresses - for key, aa := range auxaddrs { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, aa) - if err != nil { - return nil, err - } - if !ok { - continue - } - iData[s].AuxAddress[key] = aa - match = true - } - if !match { - return nil, errors.Errorf("no matching subnet for aux-address %s", aa) - } - } - - idl := []network.IPAMConfig{} - for _, v := range iData { - idl = append(idl, *v) - } - return idl, nil -} - -func subnetMatches(subnet, data string) (bool, error) { - var ( - ip net.IP - ) - - _, s, err := net.ParseCIDR(subnet) - if err != nil { - return false, errors.Errorf("Invalid subnet %s : %v", s, err) - } - - if strings.Contains(data, "/") { - ip, _, err = net.ParseCIDR(data) - if err != nil { - return false, errors.Errorf("Invalid cidr %s : %v", data, err) - } - } else { - ip = net.ParseIP(data) - } - - return s.Contains(ip), nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/network/disconnect.go b/fn/vendor/github.com/docker/docker/cli/command/network/disconnect.go deleted file mode 100644 index c9d9c14a1..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/network/disconnect.go +++ /dev/null @@ -1,41 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type disconnectOptions struct { - network string - container string - force bool -} - -func newDisconnectCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := disconnectOptions{} - - cmd := &cobra.Command{ - Use: "disconnect [OPTIONS] NETWORK CONTAINER", - Short: "Disconnect a container from a network", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.network = args[0] - opts.container = args[1] - return runDisconnect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force the container to disconnect from a network") - - return cmd -} - -func runDisconnect(dockerCli *command.DockerCli, opts disconnectOptions) error { - client := dockerCli.Client() - - return client.NetworkDisconnect(context.Background(), opts.network, opts.container, opts.force) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/network/inspect.go b/fn/vendor/github.com/docker/docker/cli/command/network/inspect.go deleted file mode 100644 index e58d66b77..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/network/inspect.go +++ /dev/null @@ -1,47 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - names []string - verbose bool -} - -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] NETWORK [NETWORK...]", - Short: "Display detailed information on one or more networks", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.names = args - return runInspect(dockerCli, opts) - }, - } - - cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - cmd.Flags().BoolVarP(&opts.verbose, "verbose", "v", false, "Verbose output for diagnostics") - - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - - ctx := context.Background() - - getNetFunc := func(name string) (interface{}, []byte, error) { - return client.NetworkInspectWithRaw(ctx, name, opts.verbose) - } - - return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getNetFunc) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/network/list.go b/fn/vendor/github.com/docker/docker/cli/command/network/list.go deleted file mode 100644 index 1a5d28510..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/network/list.go +++ /dev/null @@ -1,76 +0,0 @@ -package network - -import ( - "sort" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type byNetworkName []types.NetworkResource - -func (r byNetworkName) Len() int { return len(r) } -func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } - -type listOptions struct { - quiet bool - noTrunc bool - format string - filter opts.FilterOpt -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List networks", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display network IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate the output") - flags.StringVar(&opts.format, "format", "", "Pretty-print networks using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'driver=bridge')") - - return cmd -} - -func runList(dockerCli *command.DockerCli, opts listOptions) error { - client := dockerCli.Client() - options := types.NetworkListOptions{Filters: opts.filter.Value()} - networkResources, err := client.NetworkList(context.Background(), options) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().NetworksFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().NetworksFormat - } else { - format = formatter.TableFormatKey - } - } - - sort.Sort(byNetworkName(networkResources)) - - networksCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewNetworkFormat(format, opts.quiet), - Trunc: !opts.noTrunc, - } - return formatter.NetworkWrite(networksCtx, networkResources) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/network/prune.go b/fn/vendor/github.com/docker/docker/cli/command/network/prune.go deleted file mode 100644 index ec363ab91..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/network/prune.go +++ /dev/null @@ -1,77 +0,0 @@ -package network - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type pruneOptions struct { - force bool - filter opts.FilterOpt -} - -// NewPruneCommand returns a new cobra prune command for networks -func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := pruneOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove all unused networks", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - output, err := runPrune(dockerCli, opts) - if err != nil { - return err - } - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - return nil - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") - flags.Var(&opts.filter, "filter", "Provide filter values (e.g. 'until=')") - - return cmd -} - -const warning = `WARNING! This will remove all networks not used by at least one container. -Are you sure you want to continue?` - -func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (output string, err error) { - pruneFilters := command.PruneFilters(dockerCli, opts.filter.Value()) - - if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { - return - } - - report, err := dockerCli.Client().NetworksPrune(context.Background(), pruneFilters) - if err != nil { - return - } - - if len(report.NetworksDeleted) > 0 { - output = "Deleted Networks:\n" - for _, id := range report.NetworksDeleted { - output += id + "\n" - } - } - - return -} - -// RunPrune calls the Network Prune API -// This returns the amount of space reclaimed and a detailed output string -func RunPrune(dockerCli *command.DockerCli, filter opts.FilterOpt) (uint64, string, error) { - output, err := runPrune(dockerCli, pruneOptions{force: true, filter: filter}) - return 0, output, err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/network/remove.go b/fn/vendor/github.com/docker/docker/cli/command/network/remove.go deleted file mode 100644 index b5f074a98..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/network/remove.go +++ /dev/null @@ -1,53 +0,0 @@ -package network - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "rm NETWORK [NETWORK...]", - Aliases: []string{"remove"}, - Short: "Remove one or more networks", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } -} - -const ingressWarning = "WARNING! Before removing the routing-mesh network, " + - "make sure all the nodes in your swarm run the same docker engine version. " + - "Otherwise, removal may not be effective and functionality of newly create " + - "ingress networks will be impaired.\nAre you sure you want to continue?" - -func runRemove(dockerCli *command.DockerCli, networks []string) error { - client := dockerCli.Client() - ctx := context.Background() - status := 0 - - for _, name := range networks { - if nw, _, err := client.NetworkInspectWithRaw(ctx, name, false); err == nil && - nw.Ingress && - !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), ingressWarning) { - continue - } - if err := client.NetworkRemove(ctx, name); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - status = 1 - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/client_test.go b/fn/vendor/github.com/docker/docker/cli/command/node/client_test.go deleted file mode 100644 index 1f5cdc7ce..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/client_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package node - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "golang.org/x/net/context" -) - -type fakeClient struct { - client.Client - infoFunc func() (types.Info, error) - nodeInspectFunc func() (swarm.Node, []byte, error) - nodeListFunc func() ([]swarm.Node, error) - nodeRemoveFunc func() error - nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error - taskInspectFunc func(taskID string) (swarm.Task, []byte, error) - taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) -} - -func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { - if cli.nodeInspectFunc != nil { - return cli.nodeInspectFunc() - } - return swarm.Node{}, []byte{}, nil -} - -func (cli *fakeClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { - if cli.nodeListFunc != nil { - return cli.nodeListFunc() - } - return []swarm.Node{}, nil -} - -func (cli *fakeClient) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { - if cli.nodeRemoveFunc != nil { - return cli.nodeRemoveFunc() - } - return nil -} - -func (cli *fakeClient) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { - if cli.nodeUpdateFunc != nil { - return cli.nodeUpdateFunc(nodeID, version, node) - } - return nil -} - -func (cli *fakeClient) Info(ctx context.Context) (types.Info, error) { - if cli.infoFunc != nil { - return cli.infoFunc() - } - return types.Info{}, nil -} - -func (cli *fakeClient) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - if cli.taskInspectFunc != nil { - return cli.taskInspectFunc(taskID) - } - return swarm.Task{}, []byte{}, nil -} - -func (cli *fakeClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { - if cli.taskListFunc != nil { - return cli.taskListFunc(options) - } - return []swarm.Task{}, nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/node/cmd.go deleted file mode 100644 index ea8b40a9a..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/cmd.go +++ /dev/null @@ -1,57 +0,0 @@ -package node - -import ( - "errors" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - apiclient "github.com/docker/docker/client" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -// NewNodeCommand returns a cobra command for `node` subcommands -func NewNodeCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "node", - Short: "Manage Swarm nodes", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - Tags: map[string]string{"version": "1.24"}, - } - cmd.AddCommand( - newDemoteCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newPromoteCommand(dockerCli), - newRemoveCommand(dockerCli), - newPsCommand(dockerCli), - newUpdateCommand(dockerCli), - ) - return cmd -} - -// Reference returns the reference of a node. The special value "self" for a node -// reference is mapped to the current node, hence the node ID is retrieved using -// the `/info` endpoint. -func Reference(ctx context.Context, client apiclient.APIClient, ref string) (string, error) { - if ref == "self" { - info, err := client.Info(ctx) - if err != nil { - return "", err - } - if info.Swarm.NodeID == "" { - // If there's no node ID in /info, the node probably - // isn't a manager. Call a swarm-specific endpoint to - // get a more specific error message. - _, err = client.NodeList(ctx, types.NodeListOptions{}) - if err != nil { - return "", err - } - return "", errors.New("node ID not found in /info") - } - return info.Swarm.NodeID, nil - } - return ref, nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/demote.go b/fn/vendor/github.com/docker/docker/cli/command/node/demote.go deleted file mode 100644 index 72ed3ea63..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/demote.go +++ /dev/null @@ -1,36 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -func newDemoteCommand(dockerCli command.Cli) *cobra.Command { - return &cobra.Command{ - Use: "demote NODE [NODE...]", - Short: "Demote one or more nodes from manager in the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runDemote(dockerCli, args) - }, - } -} - -func runDemote(dockerCli command.Cli, nodes []string) error { - demote := func(node *swarm.Node) error { - if node.Spec.Role == swarm.NodeRoleWorker { - fmt.Fprintf(dockerCli.Out(), "Node %s is already a worker.\n", node.ID) - return errNoRoleChange - } - node.Spec.Role = swarm.NodeRoleWorker - return nil - } - success := func(nodeID string) { - fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", nodeID) - } - return updateNodes(dockerCli, nodes, demote, success) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/demote_test.go b/fn/vendor/github.com/docker/docker/cli/command/node/demote_test.go deleted file mode 100644 index 710455ff5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/demote_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package node - -import ( - "bytes" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestNodeDemoteErrors(t *testing.T) { - testCases := []struct { - args []string - nodeInspectFunc func() (swarm.Node, []byte, error) - nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error - expectedError string - }{ - { - expectedError: "requires at least 1 argument", - }, - { - args: []string{"nodeID"}, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") - }, - expectedError: "error inspecting the node", - }, - { - args: []string{"nodeID"}, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - return errors.Errorf("error updating the node") - }, - expectedError: "error updating the node", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newDemoteCommand( - test.NewFakeCli(&fakeClient{ - nodeInspectFunc: tc.nodeInspectFunc, - nodeUpdateFunc: tc.nodeUpdateFunc, - }, buf)) - cmd.SetArgs(tc.args) - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestNodeDemoteNoChange(t *testing.T) { - buf := new(bytes.Buffer) - cmd := newDemoteCommand( - test.NewFakeCli(&fakeClient{ - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(), []byte{}, nil - }, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - if node.Role != swarm.NodeRoleWorker { - return errors.Errorf("expected role worker, got %s", node.Role) - } - return nil - }, - }, buf)) - cmd.SetArgs([]string{"nodeID"}) - assert.NilError(t, cmd.Execute()) -} - -func TestNodeDemoteMultipleNode(t *testing.T) { - buf := new(bytes.Buffer) - cmd := newDemoteCommand( - test.NewFakeCli(&fakeClient{ - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(Manager()), []byte{}, nil - }, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - if node.Role != swarm.NodeRoleWorker { - return errors.Errorf("expected role worker, got %s", node.Role) - } - return nil - }, - }, buf)) - cmd.SetArgs([]string{"nodeID1", "nodeID2"}) - assert.NilError(t, cmd.Execute()) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/inspect.go b/fn/vendor/github.com/docker/docker/cli/command/node/inspect.go deleted file mode 100644 index 39b90bb72..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/inspect.go +++ /dev/null @@ -1,72 +0,0 @@ -package node - -import ( - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - nodeIds []string - format string - pretty bool -} - -func newInspectCommand(dockerCli command.Cli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] self|NODE [NODE...]", - Short: "Display detailed information on one or more nodes", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.nodeIds = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format") - return cmd -} - -func runInspect(dockerCli command.Cli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - if opts.pretty { - opts.format = "pretty" - } - - getRef := func(ref string) (interface{}, []byte, error) { - nodeRef, err := Reference(ctx, client, ref) - if err != nil { - return nil, nil, err - } - node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) - return node, nil, err - } - f := opts.format - - // check if the user is trying to apply a template to the pretty format, which - // is not supported - if strings.HasPrefix(f, "pretty") && f != "pretty" { - return fmt.Errorf("Cannot supply extra formatting options to the pretty template") - } - - nodeCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewNodeFormat(f, false), - } - - if err := formatter.NodeInspectWrite(nodeCtx, opts.nodeIds, getRef); err != nil { - return cli.StatusError{StatusCode: 1, Status: err.Error()} - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/inspect_test.go b/fn/vendor/github.com/docker/docker/cli/command/node/inspect_test.go deleted file mode 100644 index 9b6a04f11..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/inspect_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package node - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" -) - -func TestNodeInspectErrors(t *testing.T) { - testCases := []struct { - args []string - flags map[string]string - nodeInspectFunc func() (swarm.Node, []byte, error) - infoFunc func() (types.Info, error) - expectedError string - }{ - { - expectedError: "requires at least 1 argument", - }, - { - args: []string{"self"}, - infoFunc: func() (types.Info, error) { - return types.Info{}, errors.Errorf("error asking for node info") - }, - expectedError: "error asking for node info", - }, - { - args: []string{"nodeID"}, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") - }, - infoFunc: func() (types.Info, error) { - return types.Info{}, errors.Errorf("error asking for node info") - }, - expectedError: "error inspecting the node", - }, - { - args: []string{"self"}, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") - }, - infoFunc: func() (types.Info, error) { - return types.Info{Swarm: swarm.Info{NodeID: "abc"}}, nil - }, - expectedError: "error inspecting the node", - }, - { - args: []string{"self"}, - flags: map[string]string{ - "pretty": "true", - }, - infoFunc: func() (types.Info, error) { - return types.Info{}, errors.Errorf("error asking for node info") - }, - expectedError: "error asking for node info", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newInspectCommand( - test.NewFakeCli(&fakeClient{ - nodeInspectFunc: tc.nodeInspectFunc, - infoFunc: tc.infoFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestNodeInspectPretty(t *testing.T) { - testCases := []struct { - name string - nodeInspectFunc func() (swarm.Node, []byte, error) - }{ - { - name: "simple", - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(NodeLabels(map[string]string{ - "lbl1": "value1", - })), []byte{}, nil - }, - }, - { - name: "manager", - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(Manager()), []byte{}, nil - }, - }, - { - name: "manager-leader", - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(Manager(Leader())), []byte{}, nil - }, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newInspectCommand( - test.NewFakeCli(&fakeClient{ - nodeInspectFunc: tc.nodeInspectFunc, - }, buf)) - cmd.SetArgs([]string{"nodeID"}) - cmd.Flags().Set("pretty", "true") - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), fmt.Sprintf("node-inspect-pretty.%s.golden", tc.name)) - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/list.go b/fn/vendor/github.com/docker/docker/cli/command/node/list.go deleted file mode 100644 index 9c6224dd1..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/list.go +++ /dev/null @@ -1,73 +0,0 @@ -package node - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type listOptions struct { - quiet bool - format string - filter opts.FilterOpt -} - -func newListCommand(dockerCli command.Cli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List nodes in the swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.StringVar(&opts.format, "format", "", "Pretty-print nodes using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runList(dockerCli command.Cli, opts listOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - nodes, err := client.NodeList( - ctx, - types.NodeListOptions{Filters: opts.filter.Value()}) - if err != nil { - return err - } - - info := types.Info{} - if len(nodes) > 0 && !opts.quiet { - // only non-empty nodes and not quiet, should we call /info api - info, err = client.Info(ctx) - if err != nil { - return err - } - } - - format := opts.format - if len(format) == 0 { - format = formatter.TableFormatKey - if len(dockerCli.ConfigFile().NodesFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().NodesFormat - } - } - - nodesCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewNodeFormat(format, opts.quiet), - } - return formatter.NodeWrite(nodesCtx, nodes, info) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/list_test.go b/fn/vendor/github.com/docker/docker/cli/command/node/list_test.go deleted file mode 100644 index 4b8d906c3..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/list_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package node - -import ( - "bytes" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/config/configfile" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestNodeListErrorOnAPIFailure(t *testing.T) { - testCases := []struct { - nodeListFunc func() ([]swarm.Node, error) - infoFunc func() (types.Info, error) - expectedError string - }{ - { - nodeListFunc: func() ([]swarm.Node, error) { - return []swarm.Node{}, errors.Errorf("error listing nodes") - }, - expectedError: "error listing nodes", - }, - { - nodeListFunc: func() ([]swarm.Node, error) { - return []swarm.Node{ - { - ID: "nodeID", - }, - }, nil - }, - infoFunc: func() (types.Info, error) { - return types.Info{}, errors.Errorf("error asking for node info") - }, - expectedError: "error asking for node info", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - nodeListFunc: tc.nodeListFunc, - infoFunc: tc.infoFunc, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{}) - cmd := newListCommand(cli) - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestNodeList(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - nodeListFunc: func() ([]swarm.Node, error) { - return []swarm.Node{ - *Node(NodeID("nodeID1"), Hostname("nodeHostname1"), Manager(Leader())), - *Node(NodeID("nodeID2"), Hostname("nodeHostname2"), Manager()), - *Node(NodeID("nodeID3"), Hostname("nodeHostname3")), - }, nil - }, - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - NodeID: "nodeID1", - }, - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{}) - cmd := newListCommand(cli) - assert.NilError(t, cmd.Execute()) - assert.Contains(t, buf.String(), `nodeID1 * nodeHostname1 Ready Active Leader`) - assert.Contains(t, buf.String(), `nodeID2 nodeHostname2 Ready Active Reachable`) - assert.Contains(t, buf.String(), `nodeID3 nodeHostname3 Ready Active`) -} - -func TestNodeListQuietShouldOnlyPrintIDs(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - nodeListFunc: func() ([]swarm.Node, error) { - return []swarm.Node{ - *Node(), - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{}) - cmd := newListCommand(cli) - cmd.Flags().Set("quiet", "true") - assert.NilError(t, cmd.Execute()) - assert.Contains(t, buf.String(), "nodeID") -} - -// Test case for #24090 -func TestNodeListContainsHostname(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{}, buf) - cli.SetConfigfile(&configfile.ConfigFile{}) - cmd := newListCommand(cli) - assert.NilError(t, cmd.Execute()) - assert.Contains(t, buf.String(), "HOSTNAME") -} - -func TestNodeListDefaultFormat(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - nodeListFunc: func() ([]swarm.Node, error) { - return []swarm.Node{ - *Node(NodeID("nodeID1"), Hostname("nodeHostname1"), Manager(Leader())), - *Node(NodeID("nodeID2"), Hostname("nodeHostname2"), Manager()), - *Node(NodeID("nodeID3"), Hostname("nodeHostname3")), - }, nil - }, - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - NodeID: "nodeID1", - }, - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{ - NodesFormat: "{{.ID}}: {{.Hostname}} {{.Status}}/{{.ManagerStatus}}", - }) - cmd := newListCommand(cli) - assert.NilError(t, cmd.Execute()) - assert.Contains(t, buf.String(), `nodeID1: nodeHostname1 Ready/Leader`) - assert.Contains(t, buf.String(), `nodeID2: nodeHostname2 Ready/Reachable`) - assert.Contains(t, buf.String(), `nodeID3: nodeHostname3 Ready`) -} - -func TestNodeListFormat(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - nodeListFunc: func() ([]swarm.Node, error) { - return []swarm.Node{ - *Node(NodeID("nodeID1"), Hostname("nodeHostname1"), Manager(Leader())), - *Node(NodeID("nodeID2"), Hostname("nodeHostname2"), Manager()), - }, nil - }, - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - NodeID: "nodeID1", - }, - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{ - NodesFormat: "{{.ID}}: {{.Hostname}} {{.Status}}/{{.ManagerStatus}}", - }) - cmd := newListCommand(cli) - cmd.Flags().Set("format", "{{.Hostname}}: {{.ManagerStatus}}") - assert.NilError(t, cmd.Execute()) - assert.Contains(t, buf.String(), `nodeHostname1: Leader`) - assert.Contains(t, buf.String(), `nodeHostname2: Reachable`) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/opts.go b/fn/vendor/github.com/docker/docker/cli/command/node/opts.go deleted file mode 100644 index 0ad365f0c..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/opts.go +++ /dev/null @@ -1,24 +0,0 @@ -package node - -import ( - "github.com/docker/docker/opts" -) - -type nodeOptions struct { - annotations - role string - availability string -} - -type annotations struct { - name string - labels opts.ListOpts -} - -func newNodeOptions() *nodeOptions { - return &nodeOptions{ - annotations: annotations{ - labels: opts.NewListOpts(nil), - }, - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/promote.go b/fn/vendor/github.com/docker/docker/cli/command/node/promote.go deleted file mode 100644 index 94fff6400..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/promote.go +++ /dev/null @@ -1,36 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -func newPromoteCommand(dockerCli command.Cli) *cobra.Command { - return &cobra.Command{ - Use: "promote NODE [NODE...]", - Short: "Promote one or more nodes to manager in the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPromote(dockerCli, args) - }, - } -} - -func runPromote(dockerCli command.Cli, nodes []string) error { - promote := func(node *swarm.Node) error { - if node.Spec.Role == swarm.NodeRoleManager { - fmt.Fprintf(dockerCli.Out(), "Node %s is already a manager.\n", node.ID) - return errNoRoleChange - } - node.Spec.Role = swarm.NodeRoleManager - return nil - } - success := func(nodeID string) { - fmt.Fprintf(dockerCli.Out(), "Node %s promoted to a manager in the swarm.\n", nodeID) - } - return updateNodes(dockerCli, nodes, promote, success) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/promote_test.go b/fn/vendor/github.com/docker/docker/cli/command/node/promote_test.go deleted file mode 100644 index 9b646724d..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/promote_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package node - -import ( - "bytes" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestNodePromoteErrors(t *testing.T) { - testCases := []struct { - args []string - nodeInspectFunc func() (swarm.Node, []byte, error) - nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error - expectedError string - }{ - { - expectedError: "requires at least 1 argument", - }, - { - args: []string{"nodeID"}, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") - }, - expectedError: "error inspecting the node", - }, - { - args: []string{"nodeID"}, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - return errors.Errorf("error updating the node") - }, - expectedError: "error updating the node", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newPromoteCommand( - test.NewFakeCli(&fakeClient{ - nodeInspectFunc: tc.nodeInspectFunc, - nodeUpdateFunc: tc.nodeUpdateFunc, - }, buf)) - cmd.SetArgs(tc.args) - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestNodePromoteNoChange(t *testing.T) { - buf := new(bytes.Buffer) - cmd := newPromoteCommand( - test.NewFakeCli(&fakeClient{ - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(Manager()), []byte{}, nil - }, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - if node.Role != swarm.NodeRoleManager { - return errors.Errorf("expected role manager, got %s", node.Role) - } - return nil - }, - }, buf)) - cmd.SetArgs([]string{"nodeID"}) - assert.NilError(t, cmd.Execute()) -} - -func TestNodePromoteMultipleNode(t *testing.T) { - buf := new(bytes.Buffer) - cmd := newPromoteCommand( - test.NewFakeCli(&fakeClient{ - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(), []byte{}, nil - }, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - if node.Role != swarm.NodeRoleManager { - return errors.Errorf("expected role manager, got %s", node.Role) - } - return nil - }, - }, buf)) - cmd.SetArgs([]string{"nodeID1", "nodeID2"}) - assert.NilError(t, cmd.Execute()) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/ps.go b/fn/vendor/github.com/docker/docker/cli/command/node/ps.go deleted file mode 100644 index 0ab1c0b9f..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/ps.go +++ /dev/null @@ -1,109 +0,0 @@ -package node - -import ( - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/cli/command/idresolver" - "github.com/docker/docker/cli/command/task" - "github.com/docker/docker/opts" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type psOptions struct { - nodeIDs []string - noResolve bool - noTrunc bool - quiet bool - format string - filter opts.FilterOpt -} - -func newPsCommand(dockerCli command.Cli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS] [NODE...]", - Short: "List tasks running on one or more nodes, defaults to current node", - Args: cli.RequiresMinArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - opts.nodeIDs = []string{"self"} - - if len(args) != 0 { - opts.nodeIDs = args - } - - return runPs(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - flags.StringVar(&opts.format, "format", "", "Pretty-print tasks using a Go template") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display task IDs") - - return cmd -} - -func runPs(dockerCli command.Cli, opts psOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - var ( - errs []string - tasks []swarm.Task - ) - - for _, nodeID := range opts.nodeIDs { - nodeRef, err := Reference(ctx, client, nodeID) - if err != nil { - errs = append(errs, err.Error()) - continue - } - - node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) - if err != nil { - errs = append(errs, err.Error()) - continue - } - - filter := opts.filter.Value() - filter.Add("node", node.ID) - - nodeTasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) - if err != nil { - errs = append(errs, err.Error()) - continue - } - - tasks = append(tasks, nodeTasks...) - } - - format := opts.format - if len(format) == 0 { - if dockerCli.ConfigFile() != nil && len(dockerCli.ConfigFile().TasksFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().TasksFormat - } else { - format = formatter.TableFormatKey - } - } - - if len(errs) == 0 || len(tasks) != 0 { - if err := task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), !opts.noTrunc, opts.quiet, format); err != nil { - errs = append(errs, err.Error()) - } - } - - if len(errs) > 0 { - return errors.Errorf("%s", strings.Join(errs, "\n")) - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/ps_test.go b/fn/vendor/github.com/docker/docker/cli/command/node/ps_test.go deleted file mode 100644 index de6ff7d57..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/ps_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package node - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" -) - -func TestNodePsErrors(t *testing.T) { - testCases := []struct { - args []string - flags map[string]string - infoFunc func() (types.Info, error) - nodeInspectFunc func() (swarm.Node, []byte, error) - taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) - taskInspectFunc func(taskID string) (swarm.Task, []byte, error) - expectedError string - }{ - { - infoFunc: func() (types.Info, error) { - return types.Info{}, errors.Errorf("error asking for node info") - }, - expectedError: "error asking for node info", - }, - { - args: []string{"nodeID"}, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") - }, - expectedError: "error inspecting the node", - }, - { - args: []string{"nodeID"}, - taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { - return []swarm.Task{}, errors.Errorf("error returning the task list") - }, - expectedError: "error returning the task list", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newPsCommand( - test.NewFakeCli(&fakeClient{ - infoFunc: tc.infoFunc, - nodeInspectFunc: tc.nodeInspectFunc, - taskInspectFunc: tc.taskInspectFunc, - taskListFunc: tc.taskListFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestNodePs(t *testing.T) { - testCases := []struct { - name string - args []string - flags map[string]string - infoFunc func() (types.Info, error) - nodeInspectFunc func() (swarm.Node, []byte, error) - taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) - taskInspectFunc func(taskID string) (swarm.Task, []byte, error) - }{ - { - name: "simple", - args: []string{"nodeID"}, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(), []byte{}, nil - }, - taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { - return []swarm.Task{ - *Task(WithStatus(Timestamp(time.Now().Add(-2*time.Hour)), PortStatus([]swarm.PortConfig{ - { - TargetPort: 80, - PublishedPort: 80, - Protocol: "tcp", - }, - }))), - }, nil - }, - }, - { - name: "with-errors", - args: []string{"nodeID"}, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(), []byte{}, nil - }, - taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { - return []swarm.Task{ - *Task(TaskID("taskID1"), ServiceID("failure"), - WithStatus(Timestamp(time.Now().Add(-2*time.Hour)), StatusErr("a task error"))), - *Task(TaskID("taskID2"), ServiceID("failure"), - WithStatus(Timestamp(time.Now().Add(-3*time.Hour)), StatusErr("a task error"))), - *Task(TaskID("taskID3"), ServiceID("failure"), - WithStatus(Timestamp(time.Now().Add(-4*time.Hour)), StatusErr("a task error"))), - }, nil - }, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newPsCommand( - test.NewFakeCli(&fakeClient{ - infoFunc: tc.infoFunc, - nodeInspectFunc: tc.nodeInspectFunc, - taskInspectFunc: tc.taskInspectFunc, - taskListFunc: tc.taskListFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), fmt.Sprintf("node-ps.%s.golden", tc.name)) - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/remove.go b/fn/vendor/github.com/docker/docker/cli/command/node/remove.go deleted file mode 100644 index bd429ee45..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/remove.go +++ /dev/null @@ -1,57 +0,0 @@ -package node - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type removeOptions struct { - force bool -} - -func newRemoveCommand(dockerCli command.Cli) *cobra.Command { - opts := removeOptions{} - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] NODE [NODE...]", - Aliases: []string{"remove"}, - Short: "Remove one or more nodes from the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force remove a node from the swarm") - return cmd -} - -func runRemove(dockerCli command.Cli, args []string, opts removeOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - var errs []string - - for _, nodeID := range args { - err := client.NodeRemove(ctx, nodeID, types.NodeRemoveOptions{Force: opts.force}) - if err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID) - } - - if len(errs) > 0 { - return errors.Errorf("%s", strings.Join(errs, "\n")) - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/remove_test.go b/fn/vendor/github.com/docker/docker/cli/command/node/remove_test.go deleted file mode 100644 index d7e742aa4..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/remove_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package node - -import ( - "bytes" - "io/ioutil" - "testing" - - "github.com/docker/docker/cli/internal/test" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/pkg/errors" -) - -func TestNodeRemoveErrors(t *testing.T) { - testCases := []struct { - args []string - nodeRemoveFunc func() error - expectedError string - }{ - { - expectedError: "requires at least 1 argument", - }, - { - args: []string{"nodeID"}, - nodeRemoveFunc: func() error { - return errors.Errorf("error removing the node") - }, - expectedError: "error removing the node", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newRemoveCommand( - test.NewFakeCli(&fakeClient{ - nodeRemoveFunc: tc.nodeRemoveFunc, - }, buf)) - cmd.SetArgs(tc.args) - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestNodeRemoveMultiple(t *testing.T) { - buf := new(bytes.Buffer) - cmd := newRemoveCommand(test.NewFakeCli(&fakeClient{}, buf)) - cmd.SetArgs([]string{"nodeID1", "nodeID2"}) - assert.NilError(t, cmd.Execute()) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden b/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden deleted file mode 100644 index 461fc46ea..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden +++ /dev/null @@ -1,25 +0,0 @@ -ID: nodeID -Name: defaultNodeName -Hostname: defaultNodeHostname -Joined at: 2009-11-10 23:00:00 +0000 utc -Status: - State: Ready - Availability: Active - Address: 127.0.0.1 -Manager Status: - Address: 127.0.0.1 - Raft Status: Reachable - Leader: Yes -Platform: - Operating System: linux - Architecture: x86_64 -Resources: - CPUs: 0 - Memory: 20 MiB -Plugins: - Network: bridge, overlay - Volume: local -Engine Version: 1.13.0 -Engine Labels: - - engine = label - diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.manager.golden b/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.manager.golden deleted file mode 100644 index 2c660188d..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.manager.golden +++ /dev/null @@ -1,25 +0,0 @@ -ID: nodeID -Name: defaultNodeName -Hostname: defaultNodeHostname -Joined at: 2009-11-10 23:00:00 +0000 utc -Status: - State: Ready - Availability: Active - Address: 127.0.0.1 -Manager Status: - Address: 127.0.0.1 - Raft Status: Reachable - Leader: No -Platform: - Operating System: linux - Architecture: x86_64 -Resources: - CPUs: 0 - Memory: 20 MiB -Plugins: - Network: bridge, overlay - Volume: local -Engine Version: 1.13.0 -Engine Labels: - - engine = label - diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.simple.golden b/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.simple.golden deleted file mode 100644 index e63bc1259..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-inspect-pretty.simple.golden +++ /dev/null @@ -1,23 +0,0 @@ -ID: nodeID -Name: defaultNodeName -Labels: - - lbl1 = value1 -Hostname: defaultNodeHostname -Joined at: 2009-11-10 23:00:00 +0000 utc -Status: - State: Ready - Availability: Active - Address: 127.0.0.1 -Platform: - Operating System: linux - Architecture: x86_64 -Resources: - CPUs: 0 - Memory: 20 MiB -Plugins: - Network: bridge, overlay - Volume: local -Engine Version: 1.13.0 -Engine Labels: - - engine = label - diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-ps.simple.golden b/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-ps.simple.golden deleted file mode 100644 index f9555d879..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-ps.simple.golden +++ /dev/null @@ -1,2 +0,0 @@ -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -taskID rl02d5gwz6chzu7il5fhtb8be.1 myimage:mytag defaultNodeName Ready Ready 2 hours ago *:80->80/tcp diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-ps.with-errors.golden b/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-ps.with-errors.golden deleted file mode 100644 index 273b30fa1..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/testdata/node-ps.with-errors.golden +++ /dev/null @@ -1,4 +0,0 @@ -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -taskID1 failure.1 myimage:mytag defaultNodeName Ready Ready 2 hours ago "a task error" -taskID2 \_ failure.1 myimage:mytag defaultNodeName Ready Ready 3 hours ago "a task error" -taskID3 \_ failure.1 myimage:mytag defaultNodeName Ready Ready 4 hours ago "a task error" diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/update.go b/fn/vendor/github.com/docker/docker/cli/command/node/update.go deleted file mode 100644 index 82668595a..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/update.go +++ /dev/null @@ -1,121 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -var ( - errNoRoleChange = errors.New("role was already set to the requested value") -) - -func newUpdateCommand(dockerCli command.Cli) *cobra.Command { - nodeOpts := newNodeOptions() - - cmd := &cobra.Command{ - Use: "update [OPTIONS] NODE", - Short: "Update a node", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), args[0]) - }, - } - - flags := cmd.Flags() - flags.StringVar(&nodeOpts.role, flagRole, "", `Role of the node ("worker"|"manager")`) - flags.StringVar(&nodeOpts.availability, flagAvailability, "", `Availability of the node ("active"|"pause"|"drain")`) - flags.Var(&nodeOpts.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)") - labelKeys := opts.NewListOpts(nil) - flags.Var(&labelKeys, flagLabelRemove, "Remove a node label if exists") - return cmd -} - -func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, nodeID string) error { - success := func(_ string) { - fmt.Fprintln(dockerCli.Out(), nodeID) - } - return updateNodes(dockerCli, []string{nodeID}, mergeNodeUpdate(flags), success) -} - -func updateNodes(dockerCli command.Cli, nodes []string, mergeNode func(node *swarm.Node) error, success func(nodeID string)) error { - client := dockerCli.Client() - ctx := context.Background() - - for _, nodeID := range nodes { - node, _, err := client.NodeInspectWithRaw(ctx, nodeID) - if err != nil { - return err - } - - err = mergeNode(&node) - if err != nil { - if err == errNoRoleChange { - continue - } - return err - } - err = client.NodeUpdate(ctx, node.ID, node.Version, node.Spec) - if err != nil { - return err - } - success(nodeID) - } - return nil -} - -func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error { - return func(node *swarm.Node) error { - spec := &node.Spec - - if flags.Changed(flagRole) { - str, err := flags.GetString(flagRole) - if err != nil { - return err - } - spec.Role = swarm.NodeRole(str) - } - if flags.Changed(flagAvailability) { - str, err := flags.GetString(flagAvailability) - if err != nil { - return err - } - spec.Availability = swarm.NodeAvailability(str) - } - if spec.Annotations.Labels == nil { - spec.Annotations.Labels = make(map[string]string) - } - if flags.Changed(flagLabelAdd) { - labels := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() - for k, v := range runconfigopts.ConvertKVStringsToMap(labels) { - spec.Annotations.Labels[k] = v - } - } - if flags.Changed(flagLabelRemove) { - keys := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() - for _, k := range keys { - // if a key doesn't exist, fail the command explicitly - if _, exists := spec.Annotations.Labels[k]; !exists { - return errors.Errorf("key %s doesn't exist in node's labels", k) - } - delete(spec.Annotations.Labels, k) - } - } - return nil - } -} - -const ( - flagRole = "role" - flagAvailability = "availability" - flagLabelAdd = "label-add" - flagLabelRemove = "label-rm" -) diff --git a/fn/vendor/github.com/docker/docker/cli/command/node/update_test.go b/fn/vendor/github.com/docker/docker/cli/command/node/update_test.go deleted file mode 100644 index 493a38627..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/node/update_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package node - -import ( - "bytes" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestNodeUpdateErrors(t *testing.T) { - testCases := []struct { - args []string - flags map[string]string - nodeInspectFunc func() (swarm.Node, []byte, error) - nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error - expectedError string - }{ - { - expectedError: "requires exactly 1 argument", - }, - { - args: []string{"node1", "node2"}, - expectedError: "requires exactly 1 argument", - }, - { - args: []string{"nodeID"}, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") - }, - expectedError: "error inspecting the node", - }, - { - args: []string{"nodeID"}, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - return errors.Errorf("error updating the node") - }, - expectedError: "error updating the node", - }, - { - args: []string{"nodeID"}, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(NodeLabels(map[string]string{ - "key": "value", - })), []byte{}, nil - }, - flags: map[string]string{ - "label-rm": "notpresent", - }, - expectedError: "key notpresent doesn't exist in node's labels", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newUpdateCommand( - test.NewFakeCli(&fakeClient{ - nodeInspectFunc: tc.nodeInspectFunc, - nodeUpdateFunc: tc.nodeUpdateFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestNodeUpdate(t *testing.T) { - testCases := []struct { - args []string - flags map[string]string - nodeInspectFunc func() (swarm.Node, []byte, error) - nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error - }{ - { - args: []string{"nodeID"}, - flags: map[string]string{ - "role": "manager", - }, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(), []byte{}, nil - }, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - if node.Role != swarm.NodeRoleManager { - return errors.Errorf("expected role manager, got %s", node.Role) - } - return nil - }, - }, - { - args: []string{"nodeID"}, - flags: map[string]string{ - "availability": "drain", - }, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(), []byte{}, nil - }, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - if node.Availability != swarm.NodeAvailabilityDrain { - return errors.Errorf("expected drain availability, got %s", node.Availability) - } - return nil - }, - }, - { - args: []string{"nodeID"}, - flags: map[string]string{ - "label-add": "lbl", - }, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(), []byte{}, nil - }, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - if _, present := node.Annotations.Labels["lbl"]; !present { - return errors.Errorf("expected 'lbl' label, got %v", node.Annotations.Labels) - } - return nil - }, - }, - { - args: []string{"nodeID"}, - flags: map[string]string{ - "label-add": "key=value", - }, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(), []byte{}, nil - }, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - if value, present := node.Annotations.Labels["key"]; !present || value != "value" { - return errors.Errorf("expected 'key' label to be 'value', got %v", node.Annotations.Labels) - } - return nil - }, - }, - { - args: []string{"nodeID"}, - flags: map[string]string{ - "label-rm": "key", - }, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(NodeLabels(map[string]string{ - "key": "value", - })), []byte{}, nil - }, - nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { - if len(node.Annotations.Labels) > 0 { - return errors.Errorf("expected no labels, got %v", node.Annotations.Labels) - } - return nil - }, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newUpdateCommand( - test.NewFakeCli(&fakeClient{ - nodeInspectFunc: tc.nodeInspectFunc, - nodeUpdateFunc: tc.nodeUpdateFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - assert.NilError(t, cmd.Execute()) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/out.go b/fn/vendor/github.com/docker/docker/cli/command/out.go deleted file mode 100644 index 85718d7ac..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/out.go +++ /dev/null @@ -1,69 +0,0 @@ -package command - -import ( - "io" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/term" -) - -// OutStream is an output stream used by the DockerCli to write normal program -// output. -type OutStream struct { - out io.Writer - fd uintptr - isTerminal bool - state *term.State -} - -func (o *OutStream) Write(p []byte) (int, error) { - return o.out.Write(p) -} - -// FD returns the file descriptor number for this stream -func (o *OutStream) FD() uintptr { - return o.fd -} - -// IsTerminal returns true if this stream is connected to a terminal -func (o *OutStream) IsTerminal() bool { - return o.isTerminal -} - -// SetRawTerminal sets raw mode on the output terminal -func (o *OutStream) SetRawTerminal() (err error) { - if os.Getenv("NORAW") != "" || !o.isTerminal { - return nil - } - o.state, err = term.SetRawTerminalOutput(o.fd) - return err -} - -// RestoreTerminal restores normal mode to the terminal -func (o *OutStream) RestoreTerminal() { - if o.state != nil { - term.RestoreTerminal(o.fd, o.state) - } -} - -// GetTtySize returns the height and width in characters of the tty -func (o *OutStream) GetTtySize() (uint, uint) { - if !o.isTerminal { - return 0, 0 - } - ws, err := term.GetWinsize(o.fd) - if err != nil { - logrus.Debugf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return uint(ws.Height), uint(ws.Width) -} - -// NewOutStream returns a new OutStream object from a Writer -func NewOutStream(out io.Writer) *OutStream { - fd, isTerminal := term.GetFdInfo(out) - return &OutStream{out: out, fd: fd, isTerminal: isTerminal} -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/cmd.go deleted file mode 100644 index 33046d2cb..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/cmd.go +++ /dev/null @@ -1,32 +0,0 @@ -package plugin - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -// NewPluginCommand returns a cobra command for `plugin` subcommands -func NewPluginCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "plugin", - Short: "Manage plugins", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - Tags: map[string]string{"version": "1.25"}, - } - - cmd.AddCommand( - newDisableCommand(dockerCli), - newEnableCommand(dockerCli), - newInspectCommand(dockerCli), - newInstallCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newSetCommand(dockerCli), - newPushCommand(dockerCli), - newCreateCommand(dockerCli), - newUpgradeCommand(dockerCli), - ) - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/create.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/create.go deleted file mode 100644 index b51f1933d..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/create.go +++ /dev/null @@ -1,128 +0,0 @@ -package plugin - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/archive" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -// validateTag checks if the given repoName can be resolved. -func validateTag(rawRepo string) error { - _, err := reference.ParseNormalizedNamed(rawRepo) - - return err -} - -// validateConfig ensures that a valid config.json is available in the given path -func validateConfig(path string) error { - dt, err := os.Open(filepath.Join(path, "config.json")) - if err != nil { - return err - } - - m := types.PluginConfig{} - err = json.NewDecoder(dt).Decode(&m) - dt.Close() - - return err -} - -// validateContextDir validates the given dir and returns abs path on success. -func validateContextDir(contextDir string) (string, error) { - absContextDir, err := filepath.Abs(contextDir) - if err != nil { - return "", err - } - stat, err := os.Lstat(absContextDir) - if err != nil { - return "", err - } - - if !stat.IsDir() { - return "", errors.Errorf("context must be a directory") - } - - return absContextDir, nil -} - -type pluginCreateOptions struct { - repoName string - context string - compress bool -} - -func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - options := pluginCreateOptions{} - - cmd := &cobra.Command{ - Use: "create [OPTIONS] PLUGIN PLUGIN-DATA-DIR", - Short: "Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.", - Args: cli.RequiresMinArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - options.repoName = args[0] - options.context = args[1] - return runCreate(dockerCli, options) - }, - } - - flags := cmd.Flags() - - flags.BoolVar(&options.compress, "compress", false, "Compress the context using gzip") - - return cmd -} - -func runCreate(dockerCli *command.DockerCli, options pluginCreateOptions) error { - var ( - createCtx io.ReadCloser - err error - ) - - if err := validateTag(options.repoName); err != nil { - return err - } - - absContextDir, err := validateContextDir(options.context) - if err != nil { - return err - } - - if err := validateConfig(options.context); err != nil { - return err - } - - compression := archive.Uncompressed - if options.compress { - logrus.Debugf("compression enabled") - compression = archive.Gzip - } - - createCtx, err = archive.TarWithOptions(absContextDir, &archive.TarOptions{ - Compression: compression, - }) - - if err != nil { - return err - } - - ctx := context.Background() - - createOptions := types.PluginCreateOptions{RepoName: options.repoName} - if err = dockerCli.Client().PluginCreate(ctx, createCtx, createOptions); err != nil { - return err - } - fmt.Fprintln(dockerCli.Out(), options.repoName) - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/disable.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/disable.go deleted file mode 100644 index 07b0ec228..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/disable.go +++ /dev/null @@ -1,36 +0,0 @@ -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newDisableCommand(dockerCli *command.DockerCli) *cobra.Command { - var force bool - - cmd := &cobra.Command{ - Use: "disable [OPTIONS] PLUGIN", - Short: "Disable a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runDisable(dockerCli, args[0], force) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&force, "force", "f", false, "Force the disable of an active plugin") - return cmd -} - -func runDisable(dockerCli *command.DockerCli, name string, force bool) error { - if err := dockerCli.Client().PluginDisable(context.Background(), name, types.PluginDisableOptions{Force: force}); err != nil { - return err - } - fmt.Fprintln(dockerCli.Out(), name) - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/enable.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/enable.go deleted file mode 100644 index b1ca48f8f..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/enable.go +++ /dev/null @@ -1,48 +0,0 @@ -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type enableOpts struct { - timeout int - name string -} - -func newEnableCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts enableOpts - - cmd := &cobra.Command{ - Use: "enable [OPTIONS] PLUGIN", - Short: "Enable a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.name = args[0] - return runEnable(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.IntVar(&opts.timeout, "timeout", 0, "HTTP client timeout (in seconds)") - return cmd -} - -func runEnable(dockerCli *command.DockerCli, opts *enableOpts) error { - name := opts.name - if opts.timeout < 0 { - return errors.Errorf("negative timeout %d is invalid", opts.timeout) - } - - if err := dockerCli.Client().PluginEnable(context.Background(), name, types.PluginEnableOptions{Timeout: opts.timeout}); err != nil { - return err - } - fmt.Fprintln(dockerCli.Out(), name) - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/inspect.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/inspect.go deleted file mode 100644 index c2c7a0d6b..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/inspect.go +++ /dev/null @@ -1,42 +0,0 @@ -package plugin - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - pluginNames []string - format string -} - -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] PLUGIN [PLUGIN...]", - Short: "Display detailed information on one or more plugins", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.pluginNames = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - getRef := func(ref string) (interface{}, []byte, error) { - return client.PluginInspectWithRaw(ctx, ref) - } - - return inspect.Inspect(dockerCli.Out(), opts.pluginNames, opts.format, getRef) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/install.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/install.go deleted file mode 100644 index 18b3fa373..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/install.go +++ /dev/null @@ -1,168 +0,0 @@ -package plugin - -import ( - "fmt" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/image" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/registry" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -type pluginOptions struct { - remote string - localName string - grantPerms bool - disable bool - args []string - skipRemoteCheck bool -} - -func loadPullFlags(opts *pluginOptions, flags *pflag.FlagSet) { - flags.BoolVar(&opts.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin") - command.AddTrustVerificationFlags(flags) -} - -func newInstallCommand(dockerCli *command.DockerCli) *cobra.Command { - var options pluginOptions - cmd := &cobra.Command{ - Use: "install [OPTIONS] PLUGIN [KEY=VALUE...]", - Short: "Install a plugin", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - options.remote = args[0] - if len(args) > 1 { - options.args = args[1:] - } - return runInstall(dockerCli, options) - }, - } - - flags := cmd.Flags() - loadPullFlags(&options, flags) - flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install") - flags.StringVar(&options.localName, "alias", "", "Local name for plugin") - return cmd -} - -type pluginRegistryService struct { - registry.Service -} - -func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { - repoInfo, err = s.Service.ResolveRepository(name) - if repoInfo != nil { - repoInfo.Class = "plugin" - } - return -} - -func newRegistryService() registry.Service { - return pluginRegistryService{ - Service: registry.NewService(registry.ServiceOptions{V2Only: true}), - } -} - -func buildPullConfig(ctx context.Context, dockerCli *command.DockerCli, opts pluginOptions, cmdName string) (types.PluginInstallOptions, error) { - // Names with both tag and digest will be treated by the daemon - // as a pull by digest with a local name for the tag - // (if no local name is provided). - ref, err := reference.ParseNormalizedNamed(opts.remote) - if err != nil { - return types.PluginInstallOptions{}, err - } - - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return types.PluginInstallOptions{}, err - } - - remote := ref.String() - - _, isCanonical := ref.(reference.Canonical) - if command.IsTrusted() && !isCanonical { - ref = reference.TagNameOnly(ref) - nt, ok := ref.(reference.NamedTagged) - if !ok { - return types.PluginInstallOptions{}, errors.Errorf("invalid name: %s", ref.String()) - } - - ctx := context.Background() - trusted, err := image.TrustedReference(ctx, dockerCli, nt, newRegistryService()) - if err != nil { - return types.PluginInstallOptions{}, err - } - remote = reference.FamiliarString(trusted) - } - - authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) - - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return types.PluginInstallOptions{}, err - } - registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, cmdName) - - options := types.PluginInstallOptions{ - RegistryAuth: encodedAuth, - RemoteRef: remote, - Disabled: opts.disable, - AcceptAllPermissions: opts.grantPerms, - AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.remote), - // TODO: Rename PrivilegeFunc, it has nothing to do with privileges - PrivilegeFunc: registryAuthFunc, - Args: opts.args, - } - return options, nil -} - -func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error { - var localName string - if opts.localName != "" { - aref, err := reference.ParseNormalizedNamed(opts.localName) - if err != nil { - return err - } - if _, ok := aref.(reference.Canonical); ok { - return errors.Errorf("invalid name: %s", opts.localName) - } - localName = reference.FamiliarString(reference.TagNameOnly(aref)) - } - - ctx := context.Background() - options, err := buildPullConfig(ctx, dockerCli, opts, "plugin install") - if err != nil { - return err - } - responseBody, err := dockerCli.Client().PluginInstall(ctx, localName, options) - if err != nil { - if strings.Contains(err.Error(), "(image) when fetching") { - return errors.New(err.Error() + " - Use `docker image pull`") - } - return err - } - defer responseBody.Close() - if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "Installed plugin %s\n", opts.remote) // todo: return proper values from the API for this result - return nil -} - -func acceptPrivileges(dockerCli *command.DockerCli, name string) func(privileges types.PluginPrivileges) (bool, error) { - return func(privileges types.PluginPrivileges) (bool, error) { - fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name) - for _, privilege := range privileges { - fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value) - } - return command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), "Do you grant the above permissions?"), nil - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/list.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/list.go deleted file mode 100644 index a1b231f57..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/list.go +++ /dev/null @@ -1,63 +0,0 @@ -package plugin - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type listOptions struct { - quiet bool - noTrunc bool - format string - filter opts.FilterOpt -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Short: "List plugins", - Aliases: []string{"list"}, - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display plugin IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.StringVar(&opts.format, "format", "", "Pretty-print plugins using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'enabled=true')") - - return cmd -} - -func runList(dockerCli *command.DockerCli, opts listOptions) error { - plugins, err := dockerCli.Client().PluginList(context.Background(), opts.filter.Value()) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().PluginsFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().PluginsFormat - } else { - format = formatter.TableFormatKey - } - } - - pluginsCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewPluginFormat(format, opts.quiet), - Trunc: !opts.noTrunc, - } - return formatter.PluginWrite(pluginsCtx, plugins) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/push.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/push.go deleted file mode 100644 index de4f95cce..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/push.go +++ /dev/null @@ -1,69 +0,0 @@ -package plugin - -import ( - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/image" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/registry" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -func newPushCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "push [OPTIONS] PLUGIN[:TAG]", - Short: "Push a plugin to a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPush(dockerCli, args[0]) - }, - } - - flags := cmd.Flags() - - command.AddTrustSigningFlags(flags) - - return cmd -} - -func runPush(dockerCli *command.DockerCli, name string) error { - named, err := reference.ParseNormalizedNamed(name) - if err != nil { - return err - } - if _, ok := named.(reference.Canonical); ok { - return errors.Errorf("invalid name: %s", name) - } - - named = reference.TagNameOnly(named) - - ctx := context.Background() - - repoInfo, err := registry.ParseRepositoryInfo(named) - if err != nil { - return err - } - authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) - - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - responseBody, err := dockerCli.Client().PluginPush(ctx, reference.FamiliarString(named), encodedAuth) - if err != nil { - return err - } - defer responseBody.Close() - - if command.IsTrusted() { - repoInfo.Class = "plugin" - return image.PushTrustedReference(dockerCli, repoInfo, named, authConfig, responseBody) - } - - return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/remove.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/remove.go deleted file mode 100644 index 9f3aba9a0..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/remove.go +++ /dev/null @@ -1,55 +0,0 @@ -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type rmOptions struct { - force bool - - plugins []string -} - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts rmOptions - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] PLUGIN [PLUGIN...]", - Short: "Remove one or more plugins", - Aliases: []string{"remove"}, - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.plugins = args - return runRemove(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of an active plugin") - return cmd -} - -func runRemove(dockerCli *command.DockerCli, opts *rmOptions) error { - ctx := context.Background() - - var errs cli.Errors - for _, name := range opts.plugins { - // TODO: pass names to api instead of making multiple api calls - if err := dockerCli.Client().PluginRemove(ctx, name, types.PluginRemoveOptions{Force: opts.force}); err != nil { - errs = append(errs, err) - continue - } - fmt.Fprintln(dockerCli.Out(), name) - } - // Do not simplify to `return errs` because even if errs == nil, it is not a nil-error interface value. - if errs != nil { - return errs - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/set.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/set.go deleted file mode 100644 index 52b09fb50..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/set.go +++ /dev/null @@ -1,22 +0,0 @@ -package plugin - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -func newSetCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "set PLUGIN KEY=VALUE [KEY=VALUE...]", - Short: "Change settings for a plugin", - Args: cli.RequiresMinArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return dockerCli.Client().PluginSet(context.Background(), args[0], args[1:]) - }, - } - - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go b/fn/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go deleted file mode 100644 index cbcbe17ec..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go +++ /dev/null @@ -1,90 +0,0 @@ -package plugin - -import ( - "context" - "fmt" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -func newUpgradeCommand(dockerCli *command.DockerCli) *cobra.Command { - var options pluginOptions - cmd := &cobra.Command{ - Use: "upgrade [OPTIONS] PLUGIN [REMOTE]", - Short: "Upgrade an existing plugin", - Args: cli.RequiresRangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - options.localName = args[0] - if len(args) == 2 { - options.remote = args[1] - } - return runUpgrade(dockerCli, options) - }, - Tags: map[string]string{"version": "1.26"}, - } - - flags := cmd.Flags() - loadPullFlags(&options, flags) - flags.BoolVar(&options.skipRemoteCheck, "skip-remote-check", false, "Do not check if specified remote plugin matches existing plugin image") - return cmd -} - -func runUpgrade(dockerCli *command.DockerCli, opts pluginOptions) error { - ctx := context.Background() - p, _, err := dockerCli.Client().PluginInspectWithRaw(ctx, opts.localName) - if err != nil { - return errors.Errorf("error reading plugin data: %v", err) - } - - if p.Enabled { - return errors.Errorf("the plugin must be disabled before upgrading") - } - - opts.localName = p.Name - if opts.remote == "" { - opts.remote = p.PluginReference - } - remote, err := reference.ParseNormalizedNamed(opts.remote) - if err != nil { - return errors.Wrap(err, "error parsing remote upgrade image reference") - } - remote = reference.TagNameOnly(remote) - - old, err := reference.ParseNormalizedNamed(p.PluginReference) - if err != nil { - return errors.Wrap(err, "error parsing current image reference") - } - old = reference.TagNameOnly(old) - - fmt.Fprintf(dockerCli.Out(), "Upgrading plugin %s from %s to %s\n", p.Name, reference.FamiliarString(old), reference.FamiliarString(remote)) - if !opts.skipRemoteCheck && remote.String() != old.String() { - if !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), "Plugin images do not match, are you sure?") { - return errors.New("canceling upgrade request") - } - } - - options, err := buildPullConfig(ctx, dockerCli, opts, "plugin upgrade") - if err != nil { - return err - } - - responseBody, err := dockerCli.Client().PluginUpgrade(ctx, opts.localName, options) - if err != nil { - if strings.Contains(err.Error(), "target is image") { - return errors.New(err.Error() + " - Use `docker image pull`") - } - return err - } - defer responseBody.Close() - if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "Upgraded plugin %s to %s\n", opts.localName, opts.remote) // todo: return proper values from the API for this result - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/prune/prune.go b/fn/vendor/github.com/docker/docker/cli/command/prune/prune.go deleted file mode 100644 index 26153ed7c..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/prune/prune.go +++ /dev/null @@ -1,51 +0,0 @@ -package prune - -import ( - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/container" - "github.com/docker/docker/cli/command/image" - "github.com/docker/docker/cli/command/network" - "github.com/docker/docker/cli/command/volume" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -// NewContainerPruneCommand returns a cobra prune command for containers -func NewContainerPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - return container.NewPruneCommand(dockerCli) -} - -// NewVolumePruneCommand returns a cobra prune command for volumes -func NewVolumePruneCommand(dockerCli *command.DockerCli) *cobra.Command { - return volume.NewPruneCommand(dockerCli) -} - -// NewImagePruneCommand returns a cobra prune command for images -func NewImagePruneCommand(dockerCli *command.DockerCli) *cobra.Command { - return image.NewPruneCommand(dockerCli) -} - -// NewNetworkPruneCommand returns a cobra prune command for Networks -func NewNetworkPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - return network.NewPruneCommand(dockerCli) -} - -// RunContainerPrune executes a prune command for containers -func RunContainerPrune(dockerCli *command.DockerCli, filter opts.FilterOpt) (uint64, string, error) { - return container.RunPrune(dockerCli, filter) -} - -// RunVolumePrune executes a prune command for volumes -func RunVolumePrune(dockerCli *command.DockerCli, filter opts.FilterOpt) (uint64, string, error) { - return volume.RunPrune(dockerCli, filter) -} - -// RunImagePrune executes a prune command for images -func RunImagePrune(dockerCli *command.DockerCli, all bool, filter opts.FilterOpt) (uint64, string, error) { - return image.RunPrune(dockerCli, all, filter) -} - -// RunNetworkPrune executes a prune command for networks -func RunNetworkPrune(dockerCli *command.DockerCli, filter opts.FilterOpt) (uint64, string, error) { - return network.RunPrune(dockerCli, filter) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/registry.go b/fn/vendor/github.com/docker/docker/cli/command/registry.go deleted file mode 100644 index e13bba775..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/registry.go +++ /dev/null @@ -1,187 +0,0 @@ -package command - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "os" - "runtime" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/registry" - "github.com/pkg/errors" -) - -// ElectAuthServer returns the default registry to use (by asking the daemon) -func ElectAuthServer(ctx context.Context, cli *DockerCli) string { - // The daemon `/info` endpoint informs us of the default registry being - // used. This is essential in cross-platforms environment, where for - // example a Linux client might be interacting with a Windows daemon, hence - // the default registry URL might be Windows specific. - serverAddress := registry.IndexServer - if info, err := cli.Client().Info(ctx); err != nil { - fmt.Fprintf(cli.Out(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) - } else { - serverAddress = info.IndexServerAddress - } - return serverAddress -} - -// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload -func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", err - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info -// for the given command. -func RegistryAuthenticationPrivilegedFunc(cli *DockerCli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { - return func() (string, error) { - fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) - indexServer := registry.GetAuthConfigKey(index) - isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) - authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry) - if err != nil { - return "", err - } - return EncodeAuthToBase64(authConfig) - } -} - -// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the -// default index, it uses the default index name for the daemon's platform, -// not the client's platform. -func ResolveAuthConfig(ctx context.Context, cli *DockerCli, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := index.Name - if index.Official { - configKey = ElectAuthServer(ctx, cli) - } - - a, _ := cli.CredentialsStore(configKey).Get(configKey) - return a -} - -// ConfigureAuth returns an AuthConfig from the specified user, password and server. -func ConfigureAuth(cli *DockerCli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { - // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 - if runtime.GOOS == "windows" { - cli.in = NewInStream(os.Stdin) - } - - if !isDefaultRegistry { - serverAddress = registry.ConvertToHostname(serverAddress) - } - - authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress) - if err != nil { - return authconfig, err - } - - // Some links documenting this: - // - https://code.google.com/archive/p/mintty/issues/56 - // - https://github.com/docker/docker/issues/15272 - // - https://mintty.github.io/ (compatibility) - // Linux will hit this if you attempt `cat | docker login`, and Windows - // will hit this if you attempt docker login from mintty where stdin - // is a pipe, not a character based console. - if flPassword == "" && !cli.In().IsTerminal() { - return authconfig, errors.Errorf("Error: Cannot perform an interactive login from a non TTY device") - } - - authconfig.Username = strings.TrimSpace(authconfig.Username) - - if flUser = strings.TrimSpace(flUser); flUser == "" { - if isDefaultRegistry { - // if this is a default registry (docker hub), then display the following message. - fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") - } - promptWithDefault(cli.Out(), "Username", authconfig.Username) - flUser = readInput(cli.In(), cli.Out()) - flUser = strings.TrimSpace(flUser) - if flUser == "" { - flUser = authconfig.Username - } - } - if flUser == "" { - return authconfig, errors.Errorf("Error: Non-null Username Required") - } - if flPassword == "" { - oldState, err := term.SaveState(cli.In().FD()) - if err != nil { - return authconfig, err - } - fmt.Fprintf(cli.Out(), "Password: ") - term.DisableEcho(cli.In().FD(), oldState) - - flPassword = readInput(cli.In(), cli.Out()) - fmt.Fprint(cli.Out(), "\n") - - term.RestoreTerminal(cli.In().FD(), oldState) - if flPassword == "" { - return authconfig, errors.Errorf("Error: Password Required") - } - } - - authconfig.Username = flUser - authconfig.Password = flPassword - authconfig.ServerAddress = serverAddress - authconfig.IdentityToken = "" - - return authconfig, nil -} - -func readInput(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) -} - -func promptWithDefault(out io.Writer, prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(out, "%s: ", prompt) - } else { - fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) - } -} - -// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image -func RetrieveAuthTokenFromImage(ctx context.Context, cli *DockerCli, image string) (string, error) { - // Retrieve encoded auth token from the image reference - authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) - if err != nil { - return "", err - } - encodedAuth, err := EncodeAuthToBase64(authConfig) - if err != nil { - return "", err - } - return encodedAuth, nil -} - -// resolveAuthConfigFromImage retrieves that AuthConfig using the image string -func resolveAuthConfigFromImage(ctx context.Context, cli *DockerCli, image string) (types.AuthConfig, error) { - registryRef, err := reference.ParseNormalizedNamed(image) - if err != nil { - return types.AuthConfig{}, err - } - repoInfo, err := registry.ParseRepositoryInfo(registryRef) - if err != nil { - return types.AuthConfig{}, err - } - return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/registry/login.go b/fn/vendor/github.com/docker/docker/cli/command/registry/login.go deleted file mode 100644 index 343d107dc..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/registry/login.go +++ /dev/null @@ -1,87 +0,0 @@ -package registry - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/registry" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type loginOptions struct { - serverAddress string - user string - password string - email string -} - -// NewLoginCommand creates a new `docker login` command -func NewLoginCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts loginOptions - - cmd := &cobra.Command{ - Use: "login [OPTIONS] [SERVER]", - Short: "Log in to a Docker registry", - Long: "Log in to a Docker registry.\nIf no server is specified, the default is defined by the daemon.", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - opts.serverAddress = args[0] - } - return runLogin(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.user, "username", "u", "", "Username") - flags.StringVarP(&opts.password, "password", "p", "", "Password") - - // Deprecated in 1.11: Should be removed in docker 17.06 - flags.StringVarP(&opts.email, "email", "e", "", "Email") - flags.MarkDeprecated("email", "will be removed in 17.06.") - - return cmd -} - -func runLogin(dockerCli *command.DockerCli, opts loginOptions) error { - ctx := context.Background() - clnt := dockerCli.Client() - - var ( - serverAddress string - authServer = command.ElectAuthServer(ctx, dockerCli) - ) - if opts.serverAddress != "" && opts.serverAddress != registry.DefaultNamespace { - serverAddress = opts.serverAddress - } else { - serverAddress = authServer - } - - isDefaultRegistry := serverAddress == authServer - - authConfig, err := command.ConfigureAuth(dockerCli, opts.user, opts.password, serverAddress, isDefaultRegistry) - if err != nil { - return err - } - response, err := clnt.RegistryLogin(ctx, authConfig) - if err != nil { - return err - } - if response.IdentityToken != "" { - authConfig.Password = "" - authConfig.IdentityToken = response.IdentityToken - } - if err := dockerCli.CredentialsStore(serverAddress).Store(authConfig); err != nil { - return errors.Errorf("Error saving credentials: %v", err) - } - - if response.Status != "" { - fmt.Fprintln(dockerCli.Out(), response.Status) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/registry/logout.go b/fn/vendor/github.com/docker/docker/cli/command/registry/logout.go deleted file mode 100644 index f1f397fa0..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/registry/logout.go +++ /dev/null @@ -1,77 +0,0 @@ -package registry - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -// NewLogoutCommand creates a new `docker logout` command -func NewLogoutCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "logout [SERVER]", - Short: "Log out from a Docker registry", - Long: "Log out from a Docker registry.\nIf no server is specified, the default is defined by the daemon.", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - var serverAddress string - if len(args) > 0 { - serverAddress = args[0] - } - return runLogout(dockerCli, serverAddress) - }, - } - - return cmd -} - -func runLogout(dockerCli *command.DockerCli, serverAddress string) error { - ctx := context.Background() - var isDefaultRegistry bool - - if serverAddress == "" { - serverAddress = command.ElectAuthServer(ctx, dockerCli) - isDefaultRegistry = true - } - - var ( - loggedIn bool - regsToLogout []string - hostnameAddress = serverAddress - regsToTry = []string{serverAddress} - ) - if !isDefaultRegistry { - hostnameAddress = registry.ConvertToHostname(serverAddress) - // the tries below are kept for backward compatibility where a user could have - // saved the registry in one of the following format. - regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress) - } - - // check if we're logged in based on the records in the config file - // which means it couldn't have user/pass cause they may be in the creds store - for _, s := range regsToTry { - if _, ok := dockerCli.ConfigFile().AuthConfigs[s]; ok { - loggedIn = true - regsToLogout = append(regsToLogout, s) - } - } - - if !loggedIn { - fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", hostnameAddress) - return nil - } - - fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress) - for _, r := range regsToLogout { - if err := dockerCli.CredentialsStore(r).Erase(r); err != nil { - fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) - } - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/registry/search.go b/fn/vendor/github.com/docker/docker/cli/command/registry/search.go deleted file mode 100644 index f534082d3..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/registry/search.go +++ /dev/null @@ -1,126 +0,0 @@ -package registry - -import ( - "fmt" - "sort" - "strings" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -type searchOptions struct { - term string - noTrunc bool - limit int - filter opts.FilterOpt - - // Deprecated - stars uint - automated bool -} - -// NewSearchCommand creates a new `docker search` command -func NewSearchCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := searchOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "search [OPTIONS] TERM", - Short: "Search the Docker Hub for images", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.term = args[0] - return runSearch(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - flags.IntVar(&opts.limit, "limit", registry.DefaultSearchLimit, "Max number of search results") - - flags.BoolVar(&opts.automated, "automated", false, "Only show automated builds") - flags.UintVarP(&opts.stars, "stars", "s", 0, "Only displays with at least x stars") - - flags.MarkDeprecated("automated", "use --filter=is-automated=true instead") - flags.MarkDeprecated("stars", "use --filter=stars=3 instead") - - return cmd -} - -func runSearch(dockerCli *command.DockerCli, opts searchOptions) error { - indexInfo, err := registry.ParseSearchIndexInfo(opts.term) - if err != nil { - return err - } - - ctx := context.Background() - - authConfig := command.ResolveAuthConfig(ctx, dockerCli, indexInfo) - requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, indexInfo, "search") - - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - options := types.ImageSearchOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - Filters: opts.filter.Value(), - Limit: opts.limit, - } - - clnt := dockerCli.Client() - - unorderedResults, err := clnt.ImageSearch(ctx, opts.term, options) - if err != nil { - return err - } - - results := searchResultsByStars(unorderedResults) - sort.Sort(results) - - w := tabwriter.NewWriter(dockerCli.Out(), 10, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") - for _, res := range results { - // --automated and -s, --stars are deprecated since Docker 1.12 - if (opts.automated && !res.IsAutomated) || (int(opts.stars) > res.StarCount) { - continue - } - desc := strings.Replace(res.Description, "\n", " ", -1) - desc = strings.Replace(desc, "\r", " ", -1) - if !opts.noTrunc { - desc = stringutils.Ellipsis(desc, 45) - } - fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) - if res.IsOfficial { - fmt.Fprint(w, "[OK]") - - } - fmt.Fprint(w, "\t") - if res.IsAutomated { - fmt.Fprint(w, "[OK]") - } - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} - -// searchResultsByStars sorts search results in descending order by number of stars. -type searchResultsByStars []registrytypes.SearchResult - -func (r searchResultsByStars) Len() int { return len(r) } -func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/client_test.go b/fn/vendor/github.com/docker/docker/cli/command/secret/client_test.go deleted file mode 100644 index bb4b412fc..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/client_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package secret - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "golang.org/x/net/context" -) - -type fakeClient struct { - client.Client - secretCreateFunc func(swarm.SecretSpec) (types.SecretCreateResponse, error) - secretInspectFunc func(string) (swarm.Secret, []byte, error) - secretListFunc func(types.SecretListOptions) ([]swarm.Secret, error) - secretRemoveFunc func(string) error -} - -func (c *fakeClient) SecretCreate(ctx context.Context, spec swarm.SecretSpec) (types.SecretCreateResponse, error) { - if c.secretCreateFunc != nil { - return c.secretCreateFunc(spec) - } - return types.SecretCreateResponse{}, nil -} - -func (c *fakeClient) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if c.secretInspectFunc != nil { - return c.secretInspectFunc(id) - } - return swarm.Secret{}, nil, nil -} - -func (c *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if c.secretListFunc != nil { - return c.secretListFunc(options) - } - return []swarm.Secret{}, nil -} - -func (c *fakeClient) SecretRemove(ctx context.Context, name string) error { - if c.secretRemoveFunc != nil { - return c.secretRemoveFunc(name) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/secret/cmd.go deleted file mode 100644 index acaef4dca..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/cmd.go +++ /dev/null @@ -1,26 +0,0 @@ -package secret - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewSecretCommand returns a cobra command for `secret` subcommands -func NewSecretCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "secret", - Short: "Manage Docker secrets", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - Tags: map[string]string{"version": "1.25"}, - } - cmd.AddCommand( - newSecretListCommand(dockerCli), - newSecretCreateCommand(dockerCli), - newSecretInspectCommand(dockerCli), - newSecretRemoveCommand(dockerCli), - ) - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/create.go b/fn/vendor/github.com/docker/docker/cli/command/secret/create.go deleted file mode 100644 index 59b079817..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/create.go +++ /dev/null @@ -1,80 +0,0 @@ -package secret - -import ( - "fmt" - "io" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/system" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type createOptions struct { - name string - file string - labels opts.ListOpts -} - -func newSecretCreateCommand(dockerCli command.Cli) *cobra.Command { - createOpts := createOptions{ - labels: opts.NewListOpts(opts.ValidateEnv), - } - - cmd := &cobra.Command{ - Use: "create [OPTIONS] SECRET file|-", - Short: "Create a secret from a file or STDIN as content", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - createOpts.name = args[0] - createOpts.file = args[1] - return runSecretCreate(dockerCli, createOpts) - }, - } - flags := cmd.Flags() - flags.VarP(&createOpts.labels, "label", "l", "Secret labels") - - return cmd -} - -func runSecretCreate(dockerCli command.Cli, options createOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - var in io.Reader = dockerCli.In() - if options.file != "-" { - file, err := system.OpenSequential(options.file) - if err != nil { - return err - } - in = file - defer file.Close() - } - - secretData, err := ioutil.ReadAll(in) - if err != nil { - return errors.Errorf("Error reading content from %q: %v", options.file, err) - } - - spec := swarm.SecretSpec{ - Annotations: swarm.Annotations{ - Name: options.name, - Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), - }, - Data: secretData, - } - - r, err := client.SecretCreate(ctx, spec) - if err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), r.ID) - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/create_test.go b/fn/vendor/github.com/docker/docker/cli/command/secret/create_test.go deleted file mode 100644 index cbdfd6333..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/create_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package secret - -import ( - "bytes" - "io/ioutil" - "path/filepath" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" - "github.com/pkg/errors" -) - -const secretDataFile = "secret-create-with-name.golden" - -func TestSecretCreateErrors(t *testing.T) { - testCases := []struct { - args []string - secretCreateFunc func(swarm.SecretSpec) (types.SecretCreateResponse, error) - expectedError string - }{ - { - args: []string{"too_few"}, - expectedError: "requires exactly 2 argument(s)", - }, - {args: []string{"too", "many", "arguments"}, - expectedError: "requires exactly 2 argument(s)", - }, - { - args: []string{"name", filepath.Join("testdata", secretDataFile)}, - secretCreateFunc: func(secretSpec swarm.SecretSpec) (types.SecretCreateResponse, error) { - return types.SecretCreateResponse{}, errors.Errorf("error creating secret") - }, - expectedError: "error creating secret", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newSecretCreateCommand( - test.NewFakeCli(&fakeClient{ - secretCreateFunc: tc.secretCreateFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSecretCreateWithName(t *testing.T) { - name := "foo" - buf := new(bytes.Buffer) - var actual []byte - cli := test.NewFakeCli(&fakeClient{ - secretCreateFunc: func(spec swarm.SecretSpec) (types.SecretCreateResponse, error) { - if spec.Name != name { - return types.SecretCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) - } - - actual = spec.Data - - return types.SecretCreateResponse{ - ID: "ID-" + spec.Name, - }, nil - }, - }, buf) - - cmd := newSecretCreateCommand(cli) - cmd.SetArgs([]string{name, filepath.Join("testdata", secretDataFile)}) - assert.NilError(t, cmd.Execute()) - expected := golden.Get(t, actual, secretDataFile) - assert.Equal(t, string(actual), string(expected)) - assert.Equal(t, strings.TrimSpace(buf.String()), "ID-"+name) -} - -func TestSecretCreateWithLabels(t *testing.T) { - expectedLabels := map[string]string{ - "lbl1": "Label-foo", - "lbl2": "Label-bar", - } - name := "foo" - - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - secretCreateFunc: func(spec swarm.SecretSpec) (types.SecretCreateResponse, error) { - if spec.Name != name { - return types.SecretCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) - } - - if !compareMap(spec.Labels, expectedLabels) { - return types.SecretCreateResponse{}, errors.Errorf("expected labels %v, got %v", expectedLabels, spec.Labels) - } - - return types.SecretCreateResponse{ - ID: "ID-" + spec.Name, - }, nil - }, - }, buf) - - cmd := newSecretCreateCommand(cli) - cmd.SetArgs([]string{name, filepath.Join("testdata", secretDataFile)}) - cmd.Flags().Set("label", "lbl1=Label-foo") - cmd.Flags().Set("label", "lbl2=Label-bar") - assert.NilError(t, cmd.Execute()) - assert.Equal(t, strings.TrimSpace(buf.String()), "ID-"+name) -} - -func compareMap(actual map[string]string, expected map[string]string) bool { - if len(actual) != len(expected) { - return false - } - for key, value := range actual { - if expectedValue, ok := expected[key]; ok { - if expectedValue != value { - return false - } - } else { - return false - } - } - return true -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/inspect.go b/fn/vendor/github.com/docker/docker/cli/command/secret/inspect.go deleted file mode 100644 index 8b3c3c682..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/inspect.go +++ /dev/null @@ -1,41 +0,0 @@ -package secret - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - names []string - format string -} - -func newSecretInspectCommand(dockerCli command.Cli) *cobra.Command { - opts := inspectOptions{} - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] SECRET [SECRET...]", - Short: "Display detailed information on one or more secrets", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.names = args - return runSecretInspect(dockerCli, opts) - }, - } - - cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - return cmd -} - -func runSecretInspect(dockerCli command.Cli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - getRef := func(id string) (interface{}, []byte, error) { - return client.SecretInspectWithRaw(ctx, id) - } - - return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getRef) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/inspect_test.go b/fn/vendor/github.com/docker/docker/cli/command/secret/inspect_test.go deleted file mode 100644 index 558e23d7c..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/inspect_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package secret - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" -) - -func TestSecretInspectErrors(t *testing.T) { - testCases := []struct { - args []string - flags map[string]string - secretInspectFunc func(secretID string) (swarm.Secret, []byte, error) - expectedError string - }{ - { - expectedError: "requires at least 1 argument", - }, - { - args: []string{"foo"}, - secretInspectFunc: func(secretID string) (swarm.Secret, []byte, error) { - return swarm.Secret{}, nil, errors.Errorf("error while inspecting the secret") - }, - expectedError: "error while inspecting the secret", - }, - { - args: []string{"foo"}, - flags: map[string]string{ - "format": "{{invalid format}}", - }, - expectedError: "Template parsing error", - }, - { - args: []string{"foo", "bar"}, - secretInspectFunc: func(secretID string) (swarm.Secret, []byte, error) { - if secretID == "foo" { - return *Secret(SecretName("foo")), nil, nil - } - return swarm.Secret{}, nil, errors.Errorf("error while inspecting the secret") - }, - expectedError: "error while inspecting the secret", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newSecretInspectCommand( - test.NewFakeCli(&fakeClient{ - secretInspectFunc: tc.secretInspectFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSecretInspectWithoutFormat(t *testing.T) { - testCases := []struct { - name string - args []string - secretInspectFunc func(secretID string) (swarm.Secret, []byte, error) - }{ - { - name: "single-secret", - args: []string{"foo"}, - secretInspectFunc: func(name string) (swarm.Secret, []byte, error) { - if name != "foo" { - return swarm.Secret{}, nil, errors.Errorf("Invalid name, expected %s, got %s", "foo", name) - } - return *Secret(SecretID("ID-foo"), SecretName("foo")), nil, nil - }, - }, - { - name: "multiple-secrets-with-labels", - args: []string{"foo", "bar"}, - secretInspectFunc: func(name string) (swarm.Secret, []byte, error) { - return *Secret(SecretID("ID-"+name), SecretName(name), SecretLabels(map[string]string{ - "label1": "label-foo", - })), nil, nil - }, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newSecretInspectCommand( - test.NewFakeCli(&fakeClient{ - secretInspectFunc: tc.secretInspectFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), fmt.Sprintf("secret-inspect-without-format.%s.golden", tc.name)) - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) - } -} - -func TestSecretInspectWithFormat(t *testing.T) { - secretInspectFunc := func(name string) (swarm.Secret, []byte, error) { - return *Secret(SecretName("foo"), SecretLabels(map[string]string{ - "label1": "label-foo", - })), nil, nil - } - testCases := []struct { - name string - format string - args []string - secretInspectFunc func(name string) (swarm.Secret, []byte, error) - }{ - { - name: "simple-template", - format: "{{.Spec.Name}}", - args: []string{"foo"}, - secretInspectFunc: secretInspectFunc, - }, - { - name: "json-template", - format: "{{json .Spec.Labels}}", - args: []string{"foo"}, - secretInspectFunc: secretInspectFunc, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newSecretInspectCommand( - test.NewFakeCli(&fakeClient{ - secretInspectFunc: tc.secretInspectFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - cmd.Flags().Set("format", tc.format) - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), fmt.Sprintf("secret-inspect-with-format.%s.golden", tc.name)) - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/ls.go b/fn/vendor/github.com/docker/docker/cli/command/secret/ls.go deleted file mode 100644 index 384ee2650..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/ls.go +++ /dev/null @@ -1,61 +0,0 @@ -package secret - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type listOptions struct { - quiet bool - format string - filter opts.FilterOpt -} - -func newSecretListCommand(dockerCli command.Cli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List secrets", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runSecretList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.StringVarP(&opts.format, "format", "", "", "Pretty-print secrets using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runSecretList(dockerCli command.Cli, opts listOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - secrets, err := client.SecretList(ctx, types.SecretListOptions{Filters: opts.filter.Value()}) - if err != nil { - return err - } - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().SecretFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().SecretFormat - } else { - format = formatter.TableFormatKey - } - } - secretCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewSecretFormat(format, opts.quiet), - } - return formatter.SecretWrite(secretCtx, secrets) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/ls_test.go b/fn/vendor/github.com/docker/docker/cli/command/secret/ls_test.go deleted file mode 100644 index d9a4324b7..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/ls_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package secret - -import ( - "bytes" - "io/ioutil" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/config/configfile" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" -) - -func TestSecretListErrors(t *testing.T) { - testCases := []struct { - args []string - secretListFunc func(types.SecretListOptions) ([]swarm.Secret, error) - expectedError string - }{ - { - args: []string{"foo"}, - expectedError: "accepts no argument", - }, - { - secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { - return []swarm.Secret{}, errors.Errorf("error listing secrets") - }, - expectedError: "error listing secrets", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newSecretListCommand( - test.NewFakeCli(&fakeClient{ - secretListFunc: tc.secretListFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSecretList(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { - return []swarm.Secret{ - *Secret(SecretID("ID-foo"), - SecretName("foo"), - SecretVersion(swarm.Version{Index: 10}), - SecretCreatedAt(time.Now().Add(-2*time.Hour)), - SecretUpdatedAt(time.Now().Add(-1*time.Hour)), - ), - *Secret(SecretID("ID-bar"), - SecretName("bar"), - SecretVersion(swarm.Version{Index: 11}), - SecretCreatedAt(time.Now().Add(-2*time.Hour)), - SecretUpdatedAt(time.Now().Add(-1*time.Hour)), - ), - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{}) - cmd := newSecretListCommand(cli) - cmd.SetOutput(buf) - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), "secret-list.golden") - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) -} - -func TestSecretListWithQuietOption(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { - return []swarm.Secret{ - *Secret(SecretID("ID-foo"), SecretName("foo")), - *Secret(SecretID("ID-bar"), SecretName("bar"), SecretLabels(map[string]string{ - "label": "label-bar", - })), - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{}) - cmd := newSecretListCommand(cli) - cmd.Flags().Set("quiet", "true") - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), "secret-list-with-quiet-option.golden") - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) -} - -func TestSecretListWithConfigFormat(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { - return []swarm.Secret{ - *Secret(SecretID("ID-foo"), SecretName("foo")), - *Secret(SecretID("ID-bar"), SecretName("bar"), SecretLabels(map[string]string{ - "label": "label-bar", - })), - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{ - SecretFormat: "{{ .Name }} {{ .Labels }}", - }) - cmd := newSecretListCommand(cli) - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), "secret-list-with-config-format.golden") - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) -} - -func TestSecretListWithFormat(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { - return []swarm.Secret{ - *Secret(SecretID("ID-foo"), SecretName("foo")), - *Secret(SecretID("ID-bar"), SecretName("bar"), SecretLabels(map[string]string{ - "label": "label-bar", - })), - }, nil - }, - }, buf) - cmd := newSecretListCommand(cli) - cmd.Flags().Set("format", "{{ .Name }} {{ .Labels }}") - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), "secret-list-with-format.golden") - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) -} - -func TestSecretListWithFilter(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { - assert.Equal(t, options.Filters.Get("name")[0], "foo") - assert.Equal(t, options.Filters.Get("label")[0], "lbl1=Label-bar") - return []swarm.Secret{ - *Secret(SecretID("ID-foo"), - SecretName("foo"), - SecretVersion(swarm.Version{Index: 10}), - SecretCreatedAt(time.Now().Add(-2*time.Hour)), - SecretUpdatedAt(time.Now().Add(-1*time.Hour)), - ), - *Secret(SecretID("ID-bar"), - SecretName("bar"), - SecretVersion(swarm.Version{Index: 11}), - SecretCreatedAt(time.Now().Add(-2*time.Hour)), - SecretUpdatedAt(time.Now().Add(-1*time.Hour)), - ), - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{}) - cmd := newSecretListCommand(cli) - cmd.Flags().Set("filter", "name=foo") - cmd.Flags().Set("filter", "label=lbl1=Label-bar") - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), "secret-list-with-filter.golden") - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/remove.go b/fn/vendor/github.com/docker/docker/cli/command/secret/remove.go deleted file mode 100644 index a4b501d17..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/remove.go +++ /dev/null @@ -1,53 +0,0 @@ -package secret - -import ( - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type removeOptions struct { - names []string -} - -func newSecretRemoveCommand(dockerCli command.Cli) *cobra.Command { - return &cobra.Command{ - Use: "rm SECRET [SECRET...]", - Aliases: []string{"remove"}, - Short: "Remove one or more secrets", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts := removeOptions{ - names: args, - } - return runSecretRemove(dockerCli, opts) - }, - } -} - -func runSecretRemove(dockerCli command.Cli, opts removeOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - var errs []string - - for _, name := range opts.names { - if err := client.SecretRemove(ctx, name); err != nil { - errs = append(errs, err.Error()) - continue - } - - fmt.Fprintln(dockerCli.Out(), name) - } - - if len(errs) > 0 { - return errors.Errorf("%s", strings.Join(errs, "\n")) - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/remove_test.go b/fn/vendor/github.com/docker/docker/cli/command/secret/remove_test.go deleted file mode 100644 index 92ca9b9b9..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/remove_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package secret - -import ( - "bytes" - "io/ioutil" - "strings" - "testing" - - "github.com/docker/docker/cli/internal/test" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/pkg/errors" -) - -func TestSecretRemoveErrors(t *testing.T) { - testCases := []struct { - args []string - secretRemoveFunc func(string) error - expectedError string - }{ - { - args: []string{}, - expectedError: "requires at least 1 argument(s).", - }, - { - args: []string{"foo"}, - secretRemoveFunc: func(name string) error { - return errors.Errorf("error removing secret") - }, - expectedError: "error removing secret", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newSecretRemoveCommand( - test.NewFakeCli(&fakeClient{ - secretRemoveFunc: tc.secretRemoveFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSecretRemoveWithName(t *testing.T) { - names := []string{"foo", "bar"} - buf := new(bytes.Buffer) - var removedSecrets []string - cli := test.NewFakeCli(&fakeClient{ - secretRemoveFunc: func(name string) error { - removedSecrets = append(removedSecrets, name) - return nil - }, - }, buf) - cmd := newSecretRemoveCommand(cli) - cmd.SetArgs(names) - assert.NilError(t, cmd.Execute()) - assert.EqualStringSlice(t, strings.Split(strings.TrimSpace(buf.String()), "\n"), names) - assert.EqualStringSlice(t, removedSecrets, names) -} - -func TestSecretRemoveContinueAfterError(t *testing.T) { - names := []string{"foo", "bar"} - buf := new(bytes.Buffer) - var removedSecrets []string - - cli := test.NewFakeCli(&fakeClient{ - secretRemoveFunc: func(name string) error { - removedSecrets = append(removedSecrets, name) - if name == "foo" { - return errors.Errorf("error removing secret: %s", name) - } - return nil - }, - }, buf) - - cmd := newSecretRemoveCommand(cli) - cmd.SetArgs(names) - assert.Error(t, cmd.Execute(), "error removing secret: foo") - assert.EqualStringSlice(t, removedSecrets, names) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-create-with-name.golden b/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-create-with-name.golden deleted file mode 100644 index 788642a93..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-create-with-name.golden +++ /dev/null @@ -1 +0,0 @@ -secret_foo_bar diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden b/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden deleted file mode 100644 index aab678f85..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden +++ /dev/null @@ -1 +0,0 @@ -{"label1":"label-foo"} diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden b/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden deleted file mode 100644 index 257cc5642..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden +++ /dev/null @@ -1 +0,0 @@ -foo diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden b/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden deleted file mode 100644 index 6887c185f..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden +++ /dev/null @@ -1,26 +0,0 @@ -[ - { - "ID": "ID-foo", - "Version": {}, - "CreatedAt": "0001-01-01T00:00:00Z", - "UpdatedAt": "0001-01-01T00:00:00Z", - "Spec": { - "Name": "foo", - "Labels": { - "label1": "label-foo" - } - } - }, - { - "ID": "ID-bar", - "Version": {}, - "CreatedAt": "0001-01-01T00:00:00Z", - "UpdatedAt": "0001-01-01T00:00:00Z", - "Spec": { - "Name": "bar", - "Labels": { - "label1": "label-foo" - } - } - } -] diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden b/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden deleted file mode 100644 index ea42ec6f4..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "ID": "ID-foo", - "Version": {}, - "CreatedAt": "0001-01-01T00:00:00Z", - "UpdatedAt": "0001-01-01T00:00:00Z", - "Spec": { - "Name": "foo", - "Labels": null - } - } -] diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-config-format.golden b/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-config-format.golden deleted file mode 100644 index 9a4753880..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-config-format.golden +++ /dev/null @@ -1,2 +0,0 @@ -foo -bar label=label-bar diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-filter.golden b/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-filter.golden deleted file mode 100644 index 29983de8e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-filter.golden +++ /dev/null @@ -1,3 +0,0 @@ -ID NAME CREATED UPDATED -ID-foo foo 2 hours ago About an hour ago -ID-bar bar 2 hours ago About an hour ago diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-format.golden b/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-format.golden deleted file mode 100644 index 9a4753880..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-format.golden +++ /dev/null @@ -1,2 +0,0 @@ -foo -bar label=label-bar diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-quiet-option.golden b/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-quiet-option.golden deleted file mode 100644 index 83fb6e897..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list-with-quiet-option.golden +++ /dev/null @@ -1,2 +0,0 @@ -ID-foo -ID-bar diff --git a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list.golden b/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list.golden deleted file mode 100644 index 29983de8e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/secret/testdata/secret-list.golden +++ /dev/null @@ -1,3 +0,0 @@ -ID NAME CREATED UPDATED -ID-foo foo 2 hours ago About an hour ago -ID-bar bar 2 hours ago About an hour ago diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/service/cmd.go deleted file mode 100644 index 51208b80c..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/cmd.go +++ /dev/null @@ -1,30 +0,0 @@ -package service - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewServiceCommand returns a cobra command for `service` subcommands -func NewServiceCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "service", - Short: "Manage services", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - Tags: map[string]string{"version": "1.24"}, - } - cmd.AddCommand( - newCreateCommand(dockerCli), - newInspectCommand(dockerCli), - newPsCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newScaleCommand(dockerCli), - newUpdateCommand(dockerCli), - newLogsCommand(dockerCli), - ) - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/create.go b/fn/vendor/github.com/docker/docker/cli/command/service/create.go deleted file mode 100644 index bb2a1fe3b..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/create.go +++ /dev/null @@ -1,118 +0,0 @@ -package service - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := newServiceOptions() - - cmd := &cobra.Command{ - Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Create a new service", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - if len(args) > 1 { - opts.args = args[1:] - } - return runCreate(dockerCli, cmd.Flags(), opts) - }, - } - flags := cmd.Flags() - flags.StringVar(&opts.mode, flagMode, "replicated", "Service mode (replicated or global)") - flags.StringVar(&opts.name, flagName, "", "Service name") - - addServiceFlags(flags, opts, buildServiceDefaultFlagMapping()) - - flags.VarP(&opts.labels, flagLabel, "l", "Service labels") - flags.Var(&opts.containerLabels, flagContainerLabel, "Container labels") - flags.VarP(&opts.env, flagEnv, "e", "Set environment variables") - flags.Var(&opts.envFile, flagEnvFile, "Read in a file of environment variables") - flags.Var(&opts.mounts, flagMount, "Attach a filesystem mount to the service") - flags.Var(&opts.constraints, flagConstraint, "Placement constraints") - flags.Var(&opts.placementPrefs, flagPlacementPref, "Add a placement preference") - flags.SetAnnotation(flagPlacementPref, "version", []string{"1.28"}) - flags.Var(&opts.networks, flagNetwork, "Network attachments") - flags.Var(&opts.secrets, flagSecret, "Specify secrets to expose to the service") - flags.SetAnnotation(flagSecret, "version", []string{"1.25"}) - flags.VarP(&opts.endpoint.publishPorts, flagPublish, "p", "Publish a port as a node port") - flags.Var(&opts.groups, flagGroup, "Set one or more supplementary user groups for the container") - flags.SetAnnotation(flagGroup, "version", []string{"1.25"}) - flags.Var(&opts.dns, flagDNS, "Set custom DNS servers") - flags.SetAnnotation(flagDNS, "version", []string{"1.25"}) - flags.Var(&opts.dnsOption, flagDNSOption, "Set DNS options") - flags.SetAnnotation(flagDNSOption, "version", []string{"1.25"}) - flags.Var(&opts.dnsSearch, flagDNSSearch, "Set custom DNS search domains") - flags.SetAnnotation(flagDNSSearch, "version", []string{"1.25"}) - flags.Var(&opts.hosts, flagHost, "Set one or more custom host-to-IP mappings (host:ip)") - flags.SetAnnotation(flagHost, "version", []string{"1.25"}) - - flags.SetInterspersed(false) - return cmd -} - -func runCreate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *serviceOptions) error { - apiClient := dockerCli.Client() - createOpts := types.ServiceCreateOptions{} - - ctx := context.Background() - - service, err := opts.ToService(ctx, apiClient, flags) - if err != nil { - return err - } - - specifiedSecrets := opts.secrets.Value() - if len(specifiedSecrets) > 0 { - // parse and validate secrets - secrets, err := ParseSecrets(apiClient, specifiedSecrets) - if err != nil { - return err - } - service.TaskTemplate.ContainerSpec.Secrets = secrets - - } - - if err := resolveServiceImageDigest(dockerCli, &service); err != nil { - return err - } - - // only send auth if flag was set - if opts.registryAuth { - // Retrieve encoded auth token from the image reference - encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, opts.image) - if err != nil { - return err - } - createOpts.EncodedRegistryAuth = encodedAuth - } - - response, err := apiClient.ServiceCreate(ctx, service, createOpts) - if err != nil { - return err - } - - for _, warning := range response.Warnings { - fmt.Fprintln(dockerCli.Err(), warning) - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) - - if opts.detach { - if !flags.Changed("detach") { - fmt.Fprintln(dockerCli.Err(), "Since --detach=false was not specified, tasks will be created in the background.\n"+ - "In a future release, --detach=false will become the default.") - } - return nil - } - - return waitOnService(ctx, dockerCli, response.ID, opts) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/helpers.go b/fn/vendor/github.com/docker/docker/cli/command/service/helpers.go deleted file mode 100644 index 228936990..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/helpers.go +++ /dev/null @@ -1,39 +0,0 @@ -package service - -import ( - "io" - - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/service/progress" - "github.com/docker/docker/pkg/jsonmessage" - "golang.org/x/net/context" -) - -// waitOnService waits for the service to converge. It outputs a progress bar, -// if appopriate based on the CLI flags. -func waitOnService(ctx context.Context, dockerCli *command.DockerCli, serviceID string, opts *serviceOptions) error { - errChan := make(chan error, 1) - pipeReader, pipeWriter := io.Pipe() - - go func() { - errChan <- progress.ServiceProgress(ctx, dockerCli.Client(), serviceID, pipeWriter) - }() - - if opts.quiet { - go func() { - for { - var buf [1024]byte - if _, err := pipeReader.Read(buf[:]); err != nil { - return - } - } - }() - return <-errChan - } - - err := jsonmessage.DisplayJSONMessagesToStream(pipeReader, dockerCli.Out(), nil) - if err == nil { - err = <-errChan - } - return err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/inspect.go b/fn/vendor/github.com/docker/docker/cli/command/service/inspect.go deleted file mode 100644 index fae24eeaf..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/inspect.go +++ /dev/null @@ -1,94 +0,0 @@ -package service - -import ( - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - apiclient "github.com/docker/docker/client" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - refs []string - format string - pretty bool -} - -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] SERVICE [SERVICE...]", - Short: "Display detailed information on one or more services", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.refs = args - - if opts.pretty && len(opts.format) > 0 { - return errors.Errorf("--format is incompatible with human friendly format") - } - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format") - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - if opts.pretty { - opts.format = "pretty" - } - - getRef := func(ref string) (interface{}, []byte, error) { - // Service inspect shows defaults values in empty fields. - service, _, err := client.ServiceInspectWithRaw(ctx, ref, types.ServiceInspectOptions{InsertDefaults: true}) - if err == nil || !apiclient.IsErrServiceNotFound(err) { - return service, nil, err - } - return nil, nil, errors.Errorf("Error: no such service: %s", ref) - } - - getNetwork := func(ref string) (interface{}, []byte, error) { - network, _, err := client.NetworkInspectWithRaw(ctx, ref, false) - if err == nil || !apiclient.IsErrNetworkNotFound(err) { - return network, nil, err - } - return nil, nil, errors.Errorf("Error: no such network: %s", ref) - } - - f := opts.format - if len(f) == 0 { - f = "raw" - if len(dockerCli.ConfigFile().ServiceInspectFormat) > 0 { - f = dockerCli.ConfigFile().ServiceInspectFormat - } - } - - // check if the user is trying to apply a template to the pretty format, which - // is not supported - if strings.HasPrefix(f, "pretty") && f != "pretty" { - return errors.Errorf("Cannot supply extra formatting options to the pretty template") - } - - serviceCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewServiceFormat(f), - } - - if err := formatter.ServiceInspectWrite(serviceCtx, opts.refs, getRef, getNetwork); err != nil { - return cli.StatusError{StatusCode: 1, Status: err.Error()} - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/inspect_test.go b/fn/vendor/github.com/docker/docker/cli/command/service/inspect_test.go deleted file mode 100644 index 44d9df917..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/inspect_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package service - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/pkg/testutil/assert" -) - -func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string { - b := new(bytes.Buffer) - - endpointSpec := &swarm.EndpointSpec{ - Mode: "vip", - Ports: []swarm.PortConfig{ - { - Protocol: swarm.PortConfigProtocolTCP, - TargetPort: 5000, - }, - }, - } - - two := uint64(2) - - s := swarm.Service{ - ID: "de179gar9d0o7ltdybungplod", - Meta: swarm.Meta{ - Version: swarm.Version{Index: 315}, - CreatedAt: now, - UpdatedAt: now, - }, - Spec: swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: "my_service", - Labels: map[string]string{"com.label": "foo"}, - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: "foo/bar@sha256:this_is_a_test", - }, - Networks: []swarm.NetworkAttachmentConfig{ - { - Target: "5vpyomhb6ievnk0i0o60gcnei", - Aliases: []string{"web"}, - }, - }, - }, - Mode: swarm.ServiceMode{ - Replicated: &swarm.ReplicatedService{ - Replicas: &two, - }, - }, - EndpointSpec: endpointSpec, - }, - Endpoint: swarm.Endpoint{ - Spec: *endpointSpec, - Ports: []swarm.PortConfig{ - { - Protocol: swarm.PortConfigProtocolTCP, - TargetPort: 5000, - PublishedPort: 30000, - }, - }, - VirtualIPs: []swarm.EndpointVirtualIP{ - { - NetworkID: "6o4107cj2jx9tihgb0jyts6pj", - Addr: "10.255.0.4/16", - }, - }, - }, - UpdateStatus: &swarm.UpdateStatus{ - StartedAt: &now, - CompletedAt: &now, - }, - } - - ctx := formatter.Context{ - Output: b, - Format: format, - } - - err := formatter.ServiceInspectWrite(ctx, []string{"de179gar9d0o7ltdybungplod"}, - func(ref string) (interface{}, []byte, error) { - return s, nil, nil - }, - func(ref string) (interface{}, []byte, error) { - return types.NetworkResource{ - ID: "5vpyomhb6ievnk0i0o60gcnei", - Name: "mynetwork", - }, nil, nil - }, - ) - if err != nil { - t.Fatal(err) - } - return b.String() -} - -func TestPrettyPrintWithNoUpdateConfig(t *testing.T) { - s := formatServiceInspect(t, formatter.NewServiceFormat("pretty"), time.Now()) - if strings.Contains(s, "UpdateStatus") { - t.Fatal("Pretty print failed before parsing UpdateStatus") - } - if !strings.Contains(s, "mynetwork") { - t.Fatal("network name not found in inspect output") - } -} - -func TestJSONFormatWithNoUpdateConfig(t *testing.T) { - now := time.Now() - // s1: [{"ID":..}] - // s2: {"ID":..} - s1 := formatServiceInspect(t, formatter.NewServiceFormat(""), now) - t.Log("// s1") - t.Logf("%s", s1) - s2 := formatServiceInspect(t, formatter.NewServiceFormat("{{json .}}"), now) - t.Log("// s2") - t.Logf("%s", s2) - var m1Wrap []map[string]interface{} - if err := json.Unmarshal([]byte(s1), &m1Wrap); err != nil { - t.Fatal(err) - } - if len(m1Wrap) != 1 { - t.Fatalf("strange s1=%s", s1) - } - m1 := m1Wrap[0] - t.Logf("m1=%+v", m1) - var m2 map[string]interface{} - if err := json.Unmarshal([]byte(s2), &m2); err != nil { - t.Fatal(err) - } - t.Logf("m2=%+v", m2) - assert.DeepEqual(t, m2, m1) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/list.go b/fn/vendor/github.com/docker/docker/cli/command/service/list.go deleted file mode 100644 index ca3e741fa..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/list.go +++ /dev/null @@ -1,128 +0,0 @@ -package service - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type listOptions struct { - quiet bool - format string - filter opts.FilterOpt -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List services", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.StringVar(&opts.format, "format", "", "Pretty-print services using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runList(dockerCli *command.DockerCli, opts listOptions) error { - ctx := context.Background() - client := dockerCli.Client() - - services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: opts.filter.Value()}) - if err != nil { - return err - } - - info := map[string]formatter.ServiceListInfo{} - if len(services) > 0 && !opts.quiet { - // only non-empty services and not quiet, should we call TaskList and NodeList api - taskFilter := filters.NewArgs() - for _, service := range services { - taskFilter.Add("service", service.ID) - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) - if err != nil { - return err - } - - nodes, err := client.NodeList(ctx, types.NodeListOptions{}) - if err != nil { - return err - } - - info = GetServicesStatus(services, nodes, tasks) - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().ServicesFormat - } else { - format = formatter.TableFormatKey - } - } - - servicesCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewServiceListFormat(format, opts.quiet), - } - return formatter.ServiceListWrite(servicesCtx, services, info) -} - -// GetServicesStatus returns a map of mode and replicas -func GetServicesStatus(services []swarm.Service, nodes []swarm.Node, tasks []swarm.Task) map[string]formatter.ServiceListInfo { - running := map[string]int{} - tasksNoShutdown := map[string]int{} - - activeNodes := make(map[string]struct{}) - for _, n := range nodes { - if n.Status.State != swarm.NodeStateDown { - activeNodes[n.ID] = struct{}{} - } - } - - for _, task := range tasks { - if task.DesiredState != swarm.TaskStateShutdown { - tasksNoShutdown[task.ServiceID]++ - } - - if _, nodeActive := activeNodes[task.NodeID]; nodeActive && task.Status.State == swarm.TaskStateRunning { - running[task.ServiceID]++ - } - } - - info := map[string]formatter.ServiceListInfo{} - for _, service := range services { - info[service.ID] = formatter.ServiceListInfo{} - if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { - info[service.ID] = formatter.ServiceListInfo{ - Mode: "replicated", - Replicas: fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas), - } - } else if service.Spec.Mode.Global != nil { - info[service.ID] = formatter.ServiceListInfo{ - Mode: "global", - Replicas: fmt.Sprintf("%d/%d", running[service.ID], tasksNoShutdown[service.ID]), - } - } - } - return info -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/logs.go b/fn/vendor/github.com/docker/docker/cli/command/service/logs.go deleted file mode 100644 index 2440c1680..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/logs.go +++ /dev/null @@ -1,298 +0,0 @@ -package service - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/idresolver" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/pkg/stringid" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type logsOptions struct { - noResolve bool - noTrunc bool - noTaskIDs bool - follow bool - since string - timestamps bool - tail string - - target string -} - -// TODO(dperny) the whole CLI for this is kind of a mess IMHOIRL and it needs -// to be refactored agressively. There may be changes to the implementation of -// details, which will be need to be reflected in this code. The refactoring -// should be put off until we make those changes, tho, because I think the -// decisions made WRT details will impact the design of the CLI. -func newLogsCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts logsOptions - - cmd := &cobra.Command{ - Use: "logs [OPTIONS] SERVICE|TASK", - Short: "Fetch the logs of a service or task", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.target = args[0] - return runLogs(dockerCli, &opts) - }, - Tags: map[string]string{"version": "1.29"}, - } - - flags := cmd.Flags() - // options specific to service logs - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names in output") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") - flags.BoolVar(&opts.noTaskIDs, "no-task-ids", false, "Do not include task IDs in output") - // options identical to container logs - flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") - flags.StringVar(&opts.since, "since", "", "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") - flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") - flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") - return cmd -} - -func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { - ctx := context.Background() - - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Since: opts.since, - Timestamps: opts.timestamps, - Follow: opts.follow, - Tail: opts.tail, - Details: true, - } - - cli := dockerCli.Client() - - var ( - maxLength = 1 - responseBody io.ReadCloser - tty bool - ) - - service, _, err := cli.ServiceInspectWithRaw(ctx, opts.target, types.ServiceInspectOptions{}) - if err != nil { - // if it's any error other than service not found, it's Real - if !client.IsErrServiceNotFound(err) { - return err - } - task, _, err := cli.TaskInspectWithRaw(ctx, opts.target) - tty = task.Spec.ContainerSpec.TTY - // TODO(dperny) hot fix until we get a nice details system squared away, - // ignores details (including task context) if we have a TTY log - // if we don't do this, we'll vomit the huge context verbatim into the - // TTY log lines and that's Undesirable. - if tty { - options.Details = false - } - - responseBody, err = cli.TaskLogs(ctx, opts.target, options) - if err != nil { - if client.IsErrTaskNotFound(err) { - // if the task ALSO isn't found, rewrite the error to be clear - // that we looked for services AND tasks - err = fmt.Errorf("No such task or service") - } - return err - } - maxLength = getMaxLength(task.Slot) - responseBody, err = cli.TaskLogs(ctx, opts.target, options) - } else { - tty = service.Spec.TaskTemplate.ContainerSpec.TTY - // TODO(dperny) hot fix until we get a nice details system squared away, - // ignores details (including task context) if we have a TTY log - if tty { - options.Details = false - } - - responseBody, err = cli.ServiceLogs(ctx, opts.target, options) - if err != nil { - return err - } - if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { - // if replicas are initialized, figure out if we need to pad them - replicas := *service.Spec.Mode.Replicated.Replicas - maxLength = getMaxLength(int(replicas)) - } - } - defer responseBody.Close() - - if tty { - _, err = io.Copy(dockerCli.Out(), responseBody) - return err - } - - taskFormatter := newTaskFormatter(cli, opts, maxLength) - - stdout := &logWriter{ctx: ctx, opts: opts, f: taskFormatter, w: dockerCli.Out()} - stderr := &logWriter{ctx: ctx, opts: opts, f: taskFormatter, w: dockerCli.Err()} - - // TODO(aluzzardi): Do an io.Copy for services with TTY enabled. - _, err = stdcopy.StdCopy(stdout, stderr, responseBody) - return err -} - -// getMaxLength gets the maximum length of the number in base 10 -func getMaxLength(i int) int { - return len(strconv.FormatInt(int64(i), 10)) -} - -type taskFormatter struct { - client client.APIClient - opts *logsOptions - padding int - - r *idresolver.IDResolver - cache map[logContext]string -} - -func newTaskFormatter(client client.APIClient, opts *logsOptions, padding int) *taskFormatter { - return &taskFormatter{ - client: client, - opts: opts, - padding: padding, - r: idresolver.New(client, opts.noResolve), - cache: make(map[logContext]string), - } -} - -func (f *taskFormatter) format(ctx context.Context, logCtx logContext) (string, error) { - if cached, ok := f.cache[logCtx]; ok { - return cached, nil - } - - nodeName, err := f.r.Resolve(ctx, swarm.Node{}, logCtx.nodeID) - if err != nil { - return "", err - } - - serviceName, err := f.r.Resolve(ctx, swarm.Service{}, logCtx.serviceID) - if err != nil { - return "", err - } - - task, _, err := f.client.TaskInspectWithRaw(ctx, logCtx.taskID) - if err != nil { - return "", err - } - - taskName := fmt.Sprintf("%s.%d", serviceName, task.Slot) - if !f.opts.noTaskIDs { - if f.opts.noTrunc { - taskName += fmt.Sprintf(".%s", task.ID) - } else { - taskName += fmt.Sprintf(".%s", stringid.TruncateID(task.ID)) - } - } - - padding := strings.Repeat(" ", f.padding-getMaxLength(task.Slot)) - formatted := fmt.Sprintf("%s@%s%s", taskName, nodeName, padding) - f.cache[logCtx] = formatted - return formatted, nil -} - -type logWriter struct { - ctx context.Context - opts *logsOptions - f *taskFormatter - w io.Writer -} - -func (lw *logWriter) Write(buf []byte) (int, error) { - contextIndex := 0 - numParts := 2 - if lw.opts.timestamps { - contextIndex++ - numParts++ - } - - parts := bytes.SplitN(buf, []byte(" "), numParts) - if len(parts) != numParts { - return 0, errors.Errorf("invalid context in log message: %v", string(buf)) - } - - logCtx, err := lw.parseContext(string(parts[contextIndex])) - if err != nil { - return 0, err - } - - output := []byte{} - for i, part := range parts { - // First part doesn't get space separation. - if i > 0 { - output = append(output, []byte(" ")...) - } - - if i == contextIndex { - formatted, err := lw.f.format(lw.ctx, logCtx) - if err != nil { - return 0, err - } - output = append(output, []byte(fmt.Sprintf("%s |", formatted))...) - } else { - output = append(output, part...) - } - } - _, err = lw.w.Write(output) - if err != nil { - return 0, err - } - - return len(buf), nil -} - -func (lw *logWriter) parseContext(input string) (logContext, error) { - context := make(map[string]string) - - components := strings.Split(input, ",") - for _, component := range components { - parts := strings.SplitN(component, "=", 2) - if len(parts) != 2 { - return logContext{}, errors.Errorf("invalid context: %s", input) - } - context[parts[0]] = parts[1] - } - - nodeID, ok := context["com.docker.swarm.node.id"] - if !ok { - return logContext{}, errors.Errorf("missing node id in context: %s", input) - } - - serviceID, ok := context["com.docker.swarm.service.id"] - if !ok { - return logContext{}, errors.Errorf("missing service id in context: %s", input) - } - - taskID, ok := context["com.docker.swarm.task.id"] - if !ok { - return logContext{}, errors.Errorf("missing task id in context: %s", input) - } - - return logContext{ - nodeID: nodeID, - serviceID: serviceID, - taskID: taskID, - }, nil -} - -type logContext struct { - nodeID string - serviceID string - taskID string -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/opts.go b/fn/vendor/github.com/docker/docker/cli/command/service/opts.go deleted file mode 100644 index 4211c5bf8..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/opts.go +++ /dev/null @@ -1,912 +0,0 @@ -package service - -import ( - "fmt" - "sort" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/api/defaults" - shlex "github.com/flynn-archive/go-shlex" - gogotypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -type int64Value interface { - Value() int64 -} - -// PositiveDurationOpt is an option type for time.Duration that uses a pointer. -// It bahave similarly to DurationOpt but only allows positive duration values. -type PositiveDurationOpt struct { - DurationOpt -} - -// Set a new value on the option. Setting a negative duration value will cause -// an error to be returned. -func (d *PositiveDurationOpt) Set(s string) error { - err := d.DurationOpt.Set(s) - if err != nil { - return err - } - if *d.DurationOpt.value < 0 { - return errors.Errorf("duration cannot be negative") - } - return nil -} - -// DurationOpt is an option type for time.Duration that uses a pointer. This -// allows us to get nil values outside, instead of defaulting to 0 -type DurationOpt struct { - value *time.Duration -} - -// Set a new value on the option -func (d *DurationOpt) Set(s string) error { - v, err := time.ParseDuration(s) - d.value = &v - return err -} - -// Type returns the type of this option, which will be displayed in `--help` output -func (d *DurationOpt) Type() string { - return "duration" -} - -// String returns a string repr of this option -func (d *DurationOpt) String() string { - if d.value != nil { - return d.value.String() - } - return "" -} - -// Value returns the time.Duration -func (d *DurationOpt) Value() *time.Duration { - return d.value -} - -// Uint64Opt represents a uint64. -type Uint64Opt struct { - value *uint64 -} - -// Set a new value on the option -func (i *Uint64Opt) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - i.value = &v - return err -} - -// Type returns the type of this option, which will be displayed in `--help` output -func (i *Uint64Opt) Type() string { - return "uint" -} - -// String returns a string repr of this option -func (i *Uint64Opt) String() string { - if i.value != nil { - return fmt.Sprintf("%v", *i.value) - } - return "" -} - -// Value returns the uint64 -func (i *Uint64Opt) Value() *uint64 { - return i.value -} - -type floatValue float32 - -func (f *floatValue) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = floatValue(v) - return err -} - -func (f *floatValue) Type() string { - return "float" -} - -func (f *floatValue) String() string { - return strconv.FormatFloat(float64(*f), 'g', -1, 32) -} - -func (f *floatValue) Value() float32 { - return float32(*f) -} - -// placementPrefOpts holds a list of placement preferences. -type placementPrefOpts struct { - prefs []swarm.PlacementPreference - strings []string -} - -func (opts *placementPrefOpts) String() string { - if len(opts.strings) == 0 { - return "" - } - return fmt.Sprintf("%v", opts.strings) -} - -// Set validates the input value and adds it to the internal slices. -// Note: in the future strategies other than "spread", may be supported, -// as well as additional comma-separated options. -func (opts *placementPrefOpts) Set(value string) error { - fields := strings.Split(value, "=") - if len(fields) != 2 { - return errors.New(`placement preference must be of the format "="`) - } - if fields[0] != "spread" { - return errors.Errorf("unsupported placement preference %s (only spread is supported)", fields[0]) - } - - opts.prefs = append(opts.prefs, swarm.PlacementPreference{ - Spread: &swarm.SpreadOver{ - SpreadDescriptor: fields[1], - }, - }) - opts.strings = append(opts.strings, value) - return nil -} - -// Type returns a string name for this Option type -func (opts *placementPrefOpts) Type() string { - return "pref" -} - -// ShlexOpt is a flag Value which parses a string as a list of shell words -type ShlexOpt []string - -// Set the value -func (s *ShlexOpt) Set(value string) error { - valueSlice, err := shlex.Split(value) - *s = ShlexOpt(valueSlice) - return err -} - -// Type returns the tyep of the value -func (s *ShlexOpt) Type() string { - return "command" -} - -func (s *ShlexOpt) String() string { - if len(*s) == 0 { - return "" - } - return fmt.Sprint(*s) -} - -// Value returns the value as a string slice -func (s *ShlexOpt) Value() []string { - return []string(*s) -} - -type updateOptions struct { - parallelism uint64 - delay time.Duration - monitor time.Duration - onFailure string - maxFailureRatio floatValue - order string -} - -func updateConfigFromDefaults(defaultUpdateConfig *api.UpdateConfig) *swarm.UpdateConfig { - defaultFailureAction := strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaultUpdateConfig.FailureAction)]) - defaultMonitor, _ := gogotypes.DurationFromProto(defaultUpdateConfig.Monitor) - return &swarm.UpdateConfig{ - Parallelism: defaultUpdateConfig.Parallelism, - Delay: defaultUpdateConfig.Delay, - Monitor: defaultMonitor, - FailureAction: defaultFailureAction, - MaxFailureRatio: defaultUpdateConfig.MaxFailureRatio, - Order: defaultOrder(defaultUpdateConfig.Order), - } -} - -func (opts updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig { - if !anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio) { - return nil - } - - updateConfig := updateConfigFromDefaults(defaults.Service.Update) - - if flags.Changed(flagUpdateParallelism) { - updateConfig.Parallelism = opts.parallelism - } - if flags.Changed(flagUpdateDelay) { - updateConfig.Delay = opts.delay - } - if flags.Changed(flagUpdateMonitor) { - updateConfig.Monitor = opts.monitor - } - if flags.Changed(flagUpdateFailureAction) { - updateConfig.FailureAction = opts.onFailure - } - if flags.Changed(flagUpdateMaxFailureRatio) { - updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value() - } - if flags.Changed(flagUpdateOrder) { - updateConfig.Order = opts.order - } - - return updateConfig -} - -func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig { - if !anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio) { - return nil - } - - updateConfig := updateConfigFromDefaults(defaults.Service.Rollback) - - if flags.Changed(flagRollbackParallelism) { - updateConfig.Parallelism = opts.parallelism - } - if flags.Changed(flagRollbackDelay) { - updateConfig.Delay = opts.delay - } - if flags.Changed(flagRollbackMonitor) { - updateConfig.Monitor = opts.monitor - } - if flags.Changed(flagRollbackFailureAction) { - updateConfig.FailureAction = opts.onFailure - } - if flags.Changed(flagRollbackMaxFailureRatio) { - updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value() - } - if flags.Changed(flagRollbackOrder) { - updateConfig.Order = opts.order - } - - return updateConfig -} - -type resourceOptions struct { - limitCPU opts.NanoCPUs - limitMemBytes opts.MemBytes - resCPU opts.NanoCPUs - resMemBytes opts.MemBytes -} - -func (r *resourceOptions) ToResourceRequirements() *swarm.ResourceRequirements { - return &swarm.ResourceRequirements{ - Limits: &swarm.Resources{ - NanoCPUs: r.limitCPU.Value(), - MemoryBytes: r.limitMemBytes.Value(), - }, - Reservations: &swarm.Resources{ - NanoCPUs: r.resCPU.Value(), - MemoryBytes: r.resMemBytes.Value(), - }, - } -} - -type restartPolicyOptions struct { - condition string - delay DurationOpt - maxAttempts Uint64Opt - window DurationOpt -} - -func defaultRestartPolicy() *swarm.RestartPolicy { - defaultMaxAttempts := defaults.Service.Task.Restart.MaxAttempts - rp := &swarm.RestartPolicy{ - MaxAttempts: &defaultMaxAttempts, - } - - if defaults.Service.Task.Restart.Delay != nil { - defaultRestartDelay, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay) - rp.Delay = &defaultRestartDelay - } - if defaults.Service.Task.Restart.Window != nil { - defaultRestartWindow, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Window) - rp.Window = &defaultRestartWindow - } - rp.Condition = defaultRestartCondition() - - return rp -} - -func defaultRestartCondition() swarm.RestartPolicyCondition { - switch defaults.Service.Task.Restart.Condition { - case api.RestartOnNone: - return "none" - case api.RestartOnFailure: - return "on-failure" - case api.RestartOnAny: - return "any" - default: - return "" - } -} - -func defaultOrder(order api.UpdateConfig_UpdateOrder) string { - switch order { - case api.UpdateConfig_STOP_FIRST: - return "stop-first" - case api.UpdateConfig_START_FIRST: - return "start-first" - default: - return "" - } -} - -func (r *restartPolicyOptions) ToRestartPolicy(flags *pflag.FlagSet) *swarm.RestartPolicy { - if !anyChanged(flags, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow, flagRestartCondition) { - return nil - } - - restartPolicy := defaultRestartPolicy() - - if flags.Changed(flagRestartDelay) { - restartPolicy.Delay = r.delay.Value() - } - if flags.Changed(flagRestartCondition) { - restartPolicy.Condition = swarm.RestartPolicyCondition(r.condition) - } - if flags.Changed(flagRestartMaxAttempts) { - restartPolicy.MaxAttempts = r.maxAttempts.Value() - } - if flags.Changed(flagRestartWindow) { - restartPolicy.Window = r.window.Value() - } - - return restartPolicy -} - -type credentialSpecOpt struct { - value *swarm.CredentialSpec - source string -} - -func (c *credentialSpecOpt) Set(value string) error { - c.source = value - c.value = &swarm.CredentialSpec{} - switch { - case strings.HasPrefix(value, "file://"): - c.value.File = strings.TrimPrefix(value, "file://") - case strings.HasPrefix(value, "registry://"): - c.value.Registry = strings.TrimPrefix(value, "registry://") - default: - return errors.New("Invalid credential spec - value must be prefixed file:// or registry:// followed by a value") - } - - return nil -} - -func (c *credentialSpecOpt) Type() string { - return "credential-spec" -} - -func (c *credentialSpecOpt) String() string { - return c.source -} - -func (c *credentialSpecOpt) Value() *swarm.CredentialSpec { - return c.value -} - -func convertNetworks(ctx context.Context, apiClient client.NetworkAPIClient, networks []string) ([]swarm.NetworkAttachmentConfig, error) { - nets := []swarm.NetworkAttachmentConfig{} - for _, networkIDOrName := range networks { - network, err := apiClient.NetworkInspect(ctx, networkIDOrName, false) - if err != nil { - return nil, err - } - nets = append(nets, swarm.NetworkAttachmentConfig{Target: network.ID}) - } - sort.Sort(byNetworkTarget(nets)) - return nets, nil -} - -type endpointOptions struct { - mode string - publishPorts opts.PortOpt -} - -func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec { - return &swarm.EndpointSpec{ - Mode: swarm.ResolutionMode(strings.ToLower(e.mode)), - Ports: e.publishPorts.Value(), - } -} - -type logDriverOptions struct { - name string - opts opts.ListOpts -} - -func newLogDriverOptions() logDriverOptions { - return logDriverOptions{opts: opts.NewListOpts(opts.ValidateEnv)} -} - -func (ldo *logDriverOptions) toLogDriver() *swarm.Driver { - if ldo.name == "" { - return nil - } - - // set the log driver only if specified. - return &swarm.Driver{ - Name: ldo.name, - Options: runconfigopts.ConvertKVStringsToMap(ldo.opts.GetAll()), - } -} - -type healthCheckOptions struct { - cmd string - interval PositiveDurationOpt - timeout PositiveDurationOpt - retries int - startPeriod PositiveDurationOpt - noHealthcheck bool -} - -func (opts *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) { - var healthConfig *container.HealthConfig - haveHealthSettings := opts.cmd != "" || - opts.interval.Value() != nil || - opts.timeout.Value() != nil || - opts.retries != 0 - if opts.noHealthcheck { - if haveHealthSettings { - return nil, errors.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) - } - healthConfig = &container.HealthConfig{Test: []string{"NONE"}} - } else if haveHealthSettings { - var test []string - if opts.cmd != "" { - test = []string{"CMD-SHELL", opts.cmd} - } - var interval, timeout, startPeriod time.Duration - if ptr := opts.interval.Value(); ptr != nil { - interval = *ptr - } - if ptr := opts.timeout.Value(); ptr != nil { - timeout = *ptr - } - if ptr := opts.startPeriod.Value(); ptr != nil { - startPeriod = *ptr - } - healthConfig = &container.HealthConfig{ - Test: test, - Interval: interval, - Timeout: timeout, - Retries: opts.retries, - StartPeriod: startPeriod, - } - } - return healthConfig, nil -} - -// convertExtraHostsToSwarmHosts converts an array of extra hosts in cli -// : -// into a swarmkit host format: -// IP_address canonical_hostname [aliases...] -// This assumes input value (:) has already been validated -func convertExtraHostsToSwarmHosts(extraHosts []string) []string { - hosts := []string{} - for _, extraHost := range extraHosts { - parts := strings.SplitN(extraHost, ":", 2) - hosts = append(hosts, fmt.Sprintf("%s %s", parts[1], parts[0])) - } - return hosts -} - -type serviceOptions struct { - detach bool - quiet bool - - name string - labels opts.ListOpts - containerLabels opts.ListOpts - image string - entrypoint ShlexOpt - args []string - hostname string - env opts.ListOpts - envFile opts.ListOpts - workdir string - user string - groups opts.ListOpts - credentialSpec credentialSpecOpt - stopSignal string - tty bool - readOnly bool - mounts opts.MountOpt - dns opts.ListOpts - dnsSearch opts.ListOpts - dnsOption opts.ListOpts - hosts opts.ListOpts - - resources resourceOptions - stopGrace DurationOpt - - replicas Uint64Opt - mode string - - restartPolicy restartPolicyOptions - constraints opts.ListOpts - placementPrefs placementPrefOpts - update updateOptions - rollback updateOptions - networks opts.ListOpts - endpoint endpointOptions - - registryAuth bool - - logDriver logDriverOptions - - healthcheck healthCheckOptions - secrets opts.SecretOpt -} - -func newServiceOptions() *serviceOptions { - return &serviceOptions{ - labels: opts.NewListOpts(opts.ValidateEnv), - constraints: opts.NewListOpts(nil), - containerLabels: opts.NewListOpts(opts.ValidateEnv), - env: opts.NewListOpts(opts.ValidateEnv), - envFile: opts.NewListOpts(nil), - groups: opts.NewListOpts(nil), - logDriver: newLogDriverOptions(), - dns: opts.NewListOpts(opts.ValidateIPAddress), - dnsOption: opts.NewListOpts(nil), - dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), - hosts: opts.NewListOpts(opts.ValidateExtraHost), - networks: opts.NewListOpts(nil), - } -} - -func (opts *serviceOptions) ToServiceMode() (swarm.ServiceMode, error) { - serviceMode := swarm.ServiceMode{} - switch opts.mode { - case "global": - if opts.replicas.Value() != nil { - return serviceMode, errors.Errorf("replicas can only be used with replicated mode") - } - - serviceMode.Global = &swarm.GlobalService{} - case "replicated": - serviceMode.Replicated = &swarm.ReplicatedService{ - Replicas: opts.replicas.Value(), - } - default: - return serviceMode, errors.Errorf("Unknown mode: %s, only replicated and global supported", opts.mode) - } - return serviceMode, nil -} - -func (opts *serviceOptions) ToStopGracePeriod(flags *pflag.FlagSet) *time.Duration { - if flags.Changed(flagStopGracePeriod) { - return opts.stopGrace.Value() - } - return nil -} - -func (opts *serviceOptions) ToService(ctx context.Context, apiClient client.APIClient, flags *pflag.FlagSet) (swarm.ServiceSpec, error) { - var service swarm.ServiceSpec - - envVariables, err := runconfigopts.ReadKVStrings(opts.envFile.GetAll(), opts.env.GetAll()) - if err != nil { - return service, err - } - - currentEnv := make([]string, 0, len(envVariables)) - for _, env := range envVariables { // need to process each var, in order - k := strings.SplitN(env, "=", 2)[0] - for i, current := range currentEnv { // remove duplicates - if current == env { - continue // no update required, may hide this behind flag to preserve order of envVariables - } - if strings.HasPrefix(current, k+"=") { - currentEnv = append(currentEnv[:i], currentEnv[i+1:]...) - } - } - currentEnv = append(currentEnv, env) - } - - healthConfig, err := opts.healthcheck.toHealthConfig() - if err != nil { - return service, err - } - - serviceMode, err := opts.ToServiceMode() - if err != nil { - return service, err - } - - networks, err := convertNetworks(ctx, apiClient, opts.networks.GetAll()) - if err != nil { - return service, err - } - - service = swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: opts.name, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: opts.image, - Args: opts.args, - Command: opts.entrypoint.Value(), - Env: currentEnv, - Hostname: opts.hostname, - Labels: runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()), - Dir: opts.workdir, - User: opts.user, - Groups: opts.groups.GetAll(), - StopSignal: opts.stopSignal, - TTY: opts.tty, - ReadOnly: opts.readOnly, - Mounts: opts.mounts.Value(), - DNSConfig: &swarm.DNSConfig{ - Nameservers: opts.dns.GetAll(), - Search: opts.dnsSearch.GetAll(), - Options: opts.dnsOption.GetAll(), - }, - Hosts: convertExtraHostsToSwarmHosts(opts.hosts.GetAll()), - StopGracePeriod: opts.ToStopGracePeriod(flags), - Secrets: nil, - Healthcheck: healthConfig, - }, - Networks: networks, - Resources: opts.resources.ToResourceRequirements(), - RestartPolicy: opts.restartPolicy.ToRestartPolicy(flags), - Placement: &swarm.Placement{ - Constraints: opts.constraints.GetAll(), - Preferences: opts.placementPrefs.prefs, - }, - LogDriver: opts.logDriver.toLogDriver(), - }, - Mode: serviceMode, - UpdateConfig: opts.update.updateConfig(flags), - RollbackConfig: opts.update.rollbackConfig(flags), - EndpointSpec: opts.endpoint.ToEndpointSpec(), - } - - if opts.credentialSpec.Value() != nil { - service.TaskTemplate.ContainerSpec.Privileges = &swarm.Privileges{ - CredentialSpec: opts.credentialSpec.Value(), - } - } - - return service, nil -} - -type flagDefaults map[string]interface{} - -func (fd flagDefaults) getUint64(flagName string) uint64 { - if val, ok := fd[flagName].(uint64); ok { - return val - } - return 0 -} - -func (fd flagDefaults) getString(flagName string) string { - if val, ok := fd[flagName].(string); ok { - return val - } - return "" -} - -func buildServiceDefaultFlagMapping() flagDefaults { - defaultFlagValues := make(map[string]interface{}) - - defaultFlagValues[flagStopGracePeriod], _ = gogotypes.DurationFromProto(defaults.Service.Task.GetContainer().StopGracePeriod) - defaultFlagValues[flagRestartCondition] = `"` + defaultRestartCondition() + `"` - defaultFlagValues[flagRestartDelay], _ = gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay) - - if defaults.Service.Task.Restart.MaxAttempts != 0 { - defaultFlagValues[flagRestartMaxAttempts] = defaults.Service.Task.Restart.MaxAttempts - } - - defaultRestartWindow, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Window) - if defaultRestartWindow != 0 { - defaultFlagValues[flagRestartWindow] = defaultRestartWindow - } - - defaultFlagValues[flagUpdateParallelism] = defaults.Service.Update.Parallelism - defaultFlagValues[flagUpdateDelay] = defaults.Service.Update.Delay - defaultFlagValues[flagUpdateMonitor], _ = gogotypes.DurationFromProto(defaults.Service.Update.Monitor) - defaultFlagValues[flagUpdateFailureAction] = `"` + strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaults.Service.Update.FailureAction)]) + `"` - defaultFlagValues[flagUpdateMaxFailureRatio] = defaults.Service.Update.MaxFailureRatio - defaultFlagValues[flagUpdateOrder] = `"` + defaultOrder(defaults.Service.Update.Order) + `"` - - defaultFlagValues[flagRollbackParallelism] = defaults.Service.Rollback.Parallelism - defaultFlagValues[flagRollbackDelay] = defaults.Service.Rollback.Delay - defaultFlagValues[flagRollbackMonitor], _ = gogotypes.DurationFromProto(defaults.Service.Rollback.Monitor) - defaultFlagValues[flagRollbackFailureAction] = `"` + strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaults.Service.Rollback.FailureAction)]) + `"` - defaultFlagValues[flagRollbackMaxFailureRatio] = defaults.Service.Rollback.MaxFailureRatio - defaultFlagValues[flagRollbackOrder] = `"` + defaultOrder(defaults.Service.Rollback.Order) + `"` - - defaultFlagValues[flagEndpointMode] = "vip" - - return defaultFlagValues -} - -// addServiceFlags adds all flags that are common to both `create` and `update`. -// Any flags that are not common are added separately in the individual command -func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValues flagDefaults) { - flagDesc := func(flagName string, desc string) string { - if defaultValue, ok := defaultFlagValues[flagName]; ok { - return fmt.Sprintf("%s (default %v)", desc, defaultValue) - } - return desc - } - - flags.BoolVarP(&opts.detach, "detach", "d", true, "Exit immediately instead of waiting for the service to converge") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress progress output") - - flags.StringVarP(&opts.workdir, flagWorkdir, "w", "", "Working directory inside the container") - flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID (format: [:])") - flags.Var(&opts.credentialSpec, flagCredentialSpec, "Credential spec for managed service account (Windows only)") - flags.SetAnnotation(flagCredentialSpec, "version", []string{"1.29"}) - flags.StringVar(&opts.hostname, flagHostname, "", "Container hostname") - flags.SetAnnotation(flagHostname, "version", []string{"1.25"}) - flags.Var(&opts.entrypoint, flagEntrypoint, "Overwrite the default ENTRYPOINT of the image") - - flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs") - flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory") - flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs") - flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory") - - flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)")) - flags.Var(&opts.replicas, flagReplicas, "Number of tasks") - - flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none"|"on-failure"|"any")`)) - flags.Var(&opts.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)")) - flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, flagDesc(flagRestartMaxAttempts, "Maximum number of restarts before giving up")) - - flags.Var(&opts.restartPolicy.window, flagRestartWindow, flagDesc(flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)")) - - flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, defaultFlagValues.getUint64(flagUpdateParallelism), "Maximum number of tasks updated simultaneously (0 to update all at once)") - flags.DurationVar(&opts.update.delay, flagUpdateDelay, 0, flagDesc(flagUpdateDelay, "Delay between updates (ns|us|ms|s|m|h)")) - flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, 0, flagDesc(flagUpdateMonitor, "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)")) - flags.SetAnnotation(flagUpdateMonitor, "version", []string{"1.25"}) - flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "", flagDesc(flagUpdateFailureAction, `Action on update failure ("pause"|"continue"|"rollback")`)) - flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, flagDesc(flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update")) - flags.SetAnnotation(flagUpdateMaxFailureRatio, "version", []string{"1.25"}) - flags.StringVar(&opts.update.order, flagUpdateOrder, "", flagDesc(flagUpdateOrder, `Update order ("start-first"|"stop-first")`)) - flags.SetAnnotation(flagUpdateOrder, "version", []string{"1.29"}) - - flags.Uint64Var(&opts.rollback.parallelism, flagRollbackParallelism, defaultFlagValues.getUint64(flagRollbackParallelism), "Maximum number of tasks rolled back simultaneously (0 to roll back all at once)") - flags.SetAnnotation(flagRollbackParallelism, "version", []string{"1.28"}) - flags.DurationVar(&opts.rollback.delay, flagRollbackDelay, 0, flagDesc(flagRollbackDelay, "Delay between task rollbacks (ns|us|ms|s|m|h)")) - flags.SetAnnotation(flagRollbackDelay, "version", []string{"1.28"}) - flags.DurationVar(&opts.rollback.monitor, flagRollbackMonitor, 0, flagDesc(flagRollbackMonitor, "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)")) - flags.SetAnnotation(flagRollbackMonitor, "version", []string{"1.28"}) - flags.StringVar(&opts.rollback.onFailure, flagRollbackFailureAction, "", flagDesc(flagRollbackFailureAction, `Action on rollback failure ("pause"|"continue")`)) - flags.SetAnnotation(flagRollbackFailureAction, "version", []string{"1.28"}) - flags.Var(&opts.rollback.maxFailureRatio, flagRollbackMaxFailureRatio, flagDesc(flagRollbackMaxFailureRatio, "Failure rate to tolerate during a rollback")) - flags.SetAnnotation(flagRollbackMaxFailureRatio, "version", []string{"1.28"}) - flags.StringVar(&opts.rollback.order, flagRollbackOrder, "", flagDesc(flagRollbackOrder, `Rollback order ("start-first"|"stop-first")`)) - flags.SetAnnotation(flagRollbackOrder, "version", []string{"1.29"}) - - flags.StringVar(&opts.endpoint.mode, flagEndpointMode, defaultFlagValues.getString(flagEndpointMode), "Endpoint mode (vip or dnsrr)") - - flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents") - - flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service") - flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options") - - flags.StringVar(&opts.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health") - flags.SetAnnotation(flagHealthCmd, "version", []string{"1.25"}) - flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check (ns|us|ms|s|m|h)") - flags.SetAnnotation(flagHealthInterval, "version", []string{"1.25"}) - flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ns|us|ms|s|m|h)") - flags.SetAnnotation(flagHealthTimeout, "version", []string{"1.25"}) - flags.IntVar(&opts.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy") - flags.SetAnnotation(flagHealthRetries, "version", []string{"1.25"}) - flags.Var(&opts.healthcheck.startPeriod, flagHealthStartPeriod, "Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h)") - flags.SetAnnotation(flagHealthStartPeriod, "version", []string{"1.29"}) - flags.BoolVar(&opts.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK") - flags.SetAnnotation(flagNoHealthcheck, "version", []string{"1.25"}) - - flags.BoolVarP(&opts.tty, flagTTY, "t", false, "Allocate a pseudo-TTY") - flags.SetAnnotation(flagTTY, "version", []string{"1.25"}) - - flags.BoolVar(&opts.readOnly, flagReadOnly, false, "Mount the container's root filesystem as read only") - flags.SetAnnotation(flagReadOnly, "version", []string{"1.28"}) - - flags.StringVar(&opts.stopSignal, flagStopSignal, "", "Signal to stop the container") - flags.SetAnnotation(flagStopSignal, "version", []string{"1.28"}) -} - -const ( - flagCredentialSpec = "credential-spec" - flagPlacementPref = "placement-pref" - flagPlacementPrefAdd = "placement-pref-add" - flagPlacementPrefRemove = "placement-pref-rm" - flagConstraint = "constraint" - flagConstraintRemove = "constraint-rm" - flagConstraintAdd = "constraint-add" - flagContainerLabel = "container-label" - flagContainerLabelRemove = "container-label-rm" - flagContainerLabelAdd = "container-label-add" - flagDNS = "dns" - flagDNSRemove = "dns-rm" - flagDNSAdd = "dns-add" - flagDNSOption = "dns-option" - flagDNSOptionRemove = "dns-option-rm" - flagDNSOptionAdd = "dns-option-add" - flagDNSSearch = "dns-search" - flagDNSSearchRemove = "dns-search-rm" - flagDNSSearchAdd = "dns-search-add" - flagEndpointMode = "endpoint-mode" - flagEntrypoint = "entrypoint" - flagHost = "host" - flagHostAdd = "host-add" - flagHostRemove = "host-rm" - flagHostname = "hostname" - flagEnv = "env" - flagEnvFile = "env-file" - flagEnvRemove = "env-rm" - flagEnvAdd = "env-add" - flagGroup = "group" - flagGroupAdd = "group-add" - flagGroupRemove = "group-rm" - flagLabel = "label" - flagLabelRemove = "label-rm" - flagLabelAdd = "label-add" - flagLimitCPU = "limit-cpu" - flagLimitMemory = "limit-memory" - flagMode = "mode" - flagMount = "mount" - flagMountRemove = "mount-rm" - flagMountAdd = "mount-add" - flagName = "name" - flagNetwork = "network" - flagNetworkAdd = "network-add" - flagNetworkRemove = "network-rm" - flagPublish = "publish" - flagPublishRemove = "publish-rm" - flagPublishAdd = "publish-add" - flagReadOnly = "read-only" - flagReplicas = "replicas" - flagReserveCPU = "reserve-cpu" - flagReserveMemory = "reserve-memory" - flagRestartCondition = "restart-condition" - flagRestartDelay = "restart-delay" - flagRestartMaxAttempts = "restart-max-attempts" - flagRestartWindow = "restart-window" - flagRollbackDelay = "rollback-delay" - flagRollbackFailureAction = "rollback-failure-action" - flagRollbackMaxFailureRatio = "rollback-max-failure-ratio" - flagRollbackMonitor = "rollback-monitor" - flagRollbackOrder = "rollback-order" - flagRollbackParallelism = "rollback-parallelism" - flagStopGracePeriod = "stop-grace-period" - flagStopSignal = "stop-signal" - flagTTY = "tty" - flagUpdateDelay = "update-delay" - flagUpdateFailureAction = "update-failure-action" - flagUpdateMaxFailureRatio = "update-max-failure-ratio" - flagUpdateMonitor = "update-monitor" - flagUpdateOrder = "update-order" - flagUpdateParallelism = "update-parallelism" - flagUser = "user" - flagWorkdir = "workdir" - flagRegistryAuth = "with-registry-auth" - flagLogDriver = "log-driver" - flagLogOpt = "log-opt" - flagHealthCmd = "health-cmd" - flagHealthInterval = "health-interval" - flagHealthRetries = "health-retries" - flagHealthTimeout = "health-timeout" - flagHealthStartPeriod = "health-start-period" - flagNoHealthcheck = "no-healthcheck" - flagSecret = "secret" - flagSecretAdd = "secret-add" - flagSecretRemove = "secret-rm" -) diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/opts_test.go b/fn/vendor/github.com/docker/docker/cli/command/service/opts_test.go deleted file mode 100644 index 46db5fc83..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/opts_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package service - -import ( - "reflect" - "testing" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestMemBytesString(t *testing.T) { - var mem opts.MemBytes = 1048576 - assert.Equal(t, mem.String(), "1MiB") -} - -func TestMemBytesSetAndValue(t *testing.T) { - var mem opts.MemBytes - assert.NilError(t, mem.Set("5kb")) - assert.Equal(t, mem.Value(), int64(5120)) -} - -func TestNanoCPUsString(t *testing.T) { - var cpus opts.NanoCPUs = 6100000000 - assert.Equal(t, cpus.String(), "6.100") -} - -func TestNanoCPUsSetAndValue(t *testing.T) { - var cpus opts.NanoCPUs - assert.NilError(t, cpus.Set("0.35")) - assert.Equal(t, cpus.Value(), int64(350000000)) -} - -func TestDurationOptString(t *testing.T) { - dur := time.Duration(300 * 10e8) - duration := DurationOpt{value: &dur} - assert.Equal(t, duration.String(), "5m0s") -} - -func TestDurationOptSetAndValue(t *testing.T) { - var duration DurationOpt - assert.NilError(t, duration.Set("300s")) - assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) - assert.NilError(t, duration.Set("-300s")) - assert.Equal(t, *duration.Value(), time.Duration(-300*10e8)) -} - -func TestPositiveDurationOptSetAndValue(t *testing.T) { - var duration PositiveDurationOpt - assert.NilError(t, duration.Set("300s")) - assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) - assert.Error(t, duration.Set("-300s"), "cannot be negative") -} - -func TestUint64OptString(t *testing.T) { - value := uint64(2345678) - opt := Uint64Opt{value: &value} - assert.Equal(t, opt.String(), "2345678") - - opt = Uint64Opt{} - assert.Equal(t, opt.String(), "") -} - -func TestUint64OptSetAndValue(t *testing.T) { - var opt Uint64Opt - assert.NilError(t, opt.Set("14445")) - assert.Equal(t, *opt.Value(), uint64(14445)) -} - -func TestHealthCheckOptionsToHealthConfig(t *testing.T) { - dur := time.Second - opt := healthCheckOptions{ - cmd: "curl", - interval: PositiveDurationOpt{DurationOpt{value: &dur}}, - timeout: PositiveDurationOpt{DurationOpt{value: &dur}}, - startPeriod: PositiveDurationOpt{DurationOpt{value: &dur}}, - retries: 10, - } - config, err := opt.toHealthConfig() - assert.NilError(t, err) - assert.Equal(t, reflect.DeepEqual(config, &container.HealthConfig{ - Test: []string{"CMD-SHELL", "curl"}, - Interval: time.Second, - Timeout: time.Second, - StartPeriod: time.Second, - Retries: 10, - }), true) -} - -func TestHealthCheckOptionsToHealthConfigNoHealthcheck(t *testing.T) { - opt := healthCheckOptions{ - noHealthcheck: true, - } - config, err := opt.toHealthConfig() - assert.NilError(t, err) - assert.Equal(t, reflect.DeepEqual(config, &container.HealthConfig{ - Test: []string{"NONE"}, - }), true) -} - -func TestHealthCheckOptionsToHealthConfigConflict(t *testing.T) { - opt := healthCheckOptions{ - cmd: "curl", - noHealthcheck: true, - } - _, err := opt.toHealthConfig() - assert.Error(t, err, "--no-healthcheck conflicts with --health-* options") -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/parse.go b/fn/vendor/github.com/docker/docker/cli/command/service/parse.go deleted file mode 100644 index acee08761..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/parse.go +++ /dev/null @@ -1,59 +0,0 @@ -package service - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -// ParseSecrets retrieves the secrets with the requested names and fills -// secret IDs into the secret references. -func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) { - secretRefs := make(map[string]*swarmtypes.SecretReference) - ctx := context.Background() - - for _, secret := range requestedSecrets { - if _, exists := secretRefs[secret.File.Name]; exists { - return nil, errors.Errorf("duplicate secret target for %s not allowed", secret.SecretName) - } - secretRef := new(swarmtypes.SecretReference) - *secretRef = *secret - secretRefs[secret.File.Name] = secretRef - } - - args := filters.NewArgs() - for _, s := range secretRefs { - args.Add("name", s.SecretName) - } - - secrets, err := client.SecretList(ctx, types.SecretListOptions{ - Filters: args, - }) - if err != nil { - return nil, err - } - - foundSecrets := make(map[string]string) - for _, secret := range secrets { - foundSecrets[secret.Spec.Annotations.Name] = secret.ID - } - - addedSecrets := []*swarmtypes.SecretReference{} - - for _, ref := range secretRefs { - id, ok := foundSecrets[ref.SecretName] - if !ok { - return nil, errors.Errorf("secret not found: %s", ref.SecretName) - } - - // set the id for the ref to properly assign in swarm - // since swarm needs the ID instead of the name - ref.SecretID = id - addedSecrets = append(addedSecrets, ref) - } - - return addedSecrets, nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/progress/progress.go b/fn/vendor/github.com/docker/docker/cli/command/service/progress/progress.go deleted file mode 100644 index bfeaa314a..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/progress/progress.go +++ /dev/null @@ -1,409 +0,0 @@ -package progress - -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "golang.org/x/net/context" -) - -var ( - numberedStates = map[swarm.TaskState]int64{ - swarm.TaskStateNew: 1, - swarm.TaskStateAllocated: 2, - swarm.TaskStatePending: 3, - swarm.TaskStateAssigned: 4, - swarm.TaskStateAccepted: 5, - swarm.TaskStatePreparing: 6, - swarm.TaskStateReady: 7, - swarm.TaskStateStarting: 8, - swarm.TaskStateRunning: 9, - } - - longestState int -) - -const ( - maxProgress = 9 - maxProgressBars = 20 -) - -type progressUpdater interface { - update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]swarm.Node, rollback bool) (bool, error) -} - -func init() { - for state := range numberedStates { - if len(state) > longestState { - longestState = len(state) - } - } -} - -func stateToProgress(state swarm.TaskState, rollback bool) int64 { - if !rollback { - return numberedStates[state] - } - return int64(len(numberedStates)) - numberedStates[state] -} - -// ServiceProgress outputs progress information for convergence of a service. -func ServiceProgress(ctx context.Context, client client.APIClient, serviceID string, progressWriter io.WriteCloser) error { - defer progressWriter.Close() - - progressOut := streamformatter.NewJSONStreamFormatter().NewProgressOutput(progressWriter, false) - - sigint := make(chan os.Signal, 1) - signal.Notify(sigint, os.Interrupt) - defer signal.Stop(sigint) - - taskFilter := filters.NewArgs() - taskFilter.Add("service", serviceID) - taskFilter.Add("_up-to-date", "true") - - getUpToDateTasks := func() ([]swarm.Task, error) { - return client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) - } - - var ( - updater progressUpdater - converged bool - convergedAt time.Time - monitor = 5 * time.Second - rollback bool - ) - - for { - service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) - if err != nil { - return err - } - - if service.Spec.UpdateConfig != nil && service.Spec.UpdateConfig.Monitor != 0 { - monitor = service.Spec.UpdateConfig.Monitor - } - - if updater == nil { - updater, err = initializeUpdater(service, progressOut) - if err != nil { - return err - } - } - - if service.UpdateStatus != nil { - switch service.UpdateStatus.State { - case swarm.UpdateStateUpdating: - rollback = false - case swarm.UpdateStateCompleted: - if !converged { - return nil - } - case swarm.UpdateStatePaused: - return fmt.Errorf("service update paused: %s", service.UpdateStatus.Message) - case swarm.UpdateStateRollbackStarted: - if !rollback && service.UpdateStatus.Message != "" { - progressOut.WriteProgress(progress.Progress{ - ID: "rollback", - Action: service.UpdateStatus.Message, - }) - } - rollback = true - case swarm.UpdateStateRollbackPaused: - return fmt.Errorf("service rollback paused: %s", service.UpdateStatus.Message) - case swarm.UpdateStateRollbackCompleted: - if !converged { - return fmt.Errorf("service rolled back: %s", service.UpdateStatus.Message) - } - } - } - if converged && time.Since(convergedAt) >= monitor { - return nil - } - - tasks, err := getUpToDateTasks() - if err != nil { - return err - } - - activeNodes, err := getActiveNodes(ctx, client) - if err != nil { - return err - } - - converged, err = updater.update(service, tasks, activeNodes, rollback) - if err != nil { - return err - } - if converged { - if convergedAt.IsZero() { - convergedAt = time.Now() - } - wait := monitor - time.Since(convergedAt) - if wait >= 0 { - progressOut.WriteProgress(progress.Progress{ - // Ideally this would have no ID, but - // the progress rendering code behaves - // poorly on an "action" with no ID. It - // returns the cursor to the beginning - // of the line, so the first character - // may be difficult to read. Then the - // output is overwritten by the shell - // prompt when the command finishes. - ID: "verify", - Action: fmt.Sprintf("Waiting %d seconds to verify that tasks are stable...", wait/time.Second+1), - }) - } - } else { - if !convergedAt.IsZero() { - progressOut.WriteProgress(progress.Progress{ - ID: "verify", - Action: "Detected task failure", - }) - } - convergedAt = time.Time{} - } - - select { - case <-time.After(200 * time.Millisecond): - case <-sigint: - if !converged { - progress.Message(progressOut, "", "Operation continuing in background.") - progress.Messagef(progressOut, "", "Use `docker service ps %s` to check progress.", serviceID) - } - return nil - } - } -} - -func getActiveNodes(ctx context.Context, client client.APIClient) (map[string]swarm.Node, error) { - nodes, err := client.NodeList(ctx, types.NodeListOptions{}) - if err != nil { - return nil, err - } - - activeNodes := make(map[string]swarm.Node) - for _, n := range nodes { - if n.Status.State != swarm.NodeStateDown { - activeNodes[n.ID] = n - } - } - return activeNodes, nil -} - -func initializeUpdater(service swarm.Service, progressOut progress.Output) (progressUpdater, error) { - if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { - return &replicatedProgressUpdater{ - progressOut: progressOut, - }, nil - } - if service.Spec.Mode.Global != nil { - return &globalProgressUpdater{ - progressOut: progressOut, - }, nil - } - return nil, errors.New("unrecognized service mode") -} - -func writeOverallProgress(progressOut progress.Output, numerator, denominator int, rollback bool) { - if rollback { - progressOut.WriteProgress(progress.Progress{ - ID: "overall progress", - Action: fmt.Sprintf("rolling back update: %d out of %d tasks", numerator, denominator), - }) - return - } - progressOut.WriteProgress(progress.Progress{ - ID: "overall progress", - Action: fmt.Sprintf("%d out of %d tasks", numerator, denominator), - }) -} - -type replicatedProgressUpdater struct { - progressOut progress.Output - - // used for maping slots to a contiguous space - // this also causes progress bars to appear in order - slotMap map[int]int - - initialized bool - done bool -} - -func (u *replicatedProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]swarm.Node, rollback bool) (bool, error) { - if service.Spec.Mode.Replicated == nil || service.Spec.Mode.Replicated.Replicas == nil { - return false, errors.New("no replica count") - } - replicas := *service.Spec.Mode.Replicated.Replicas - - if !u.initialized { - u.slotMap = make(map[int]int) - - // Draw progress bars in order - writeOverallProgress(u.progressOut, 0, int(replicas), rollback) - - if replicas <= maxProgressBars { - for i := uint64(1); i <= replicas; i++ { - progress.Update(u.progressOut, fmt.Sprintf("%d/%d", i, replicas), " ") - } - } - u.initialized = true - } - - // If there are multiple tasks with the same slot number, favor the one - // with the *lowest* desired state. This can happen in restart - // scenarios. - tasksBySlot := make(map[int]swarm.Task) - for _, task := range tasks { - if numberedStates[task.DesiredState] == 0 { - continue - } - if existingTask, ok := tasksBySlot[task.Slot]; ok { - if numberedStates[existingTask.DesiredState] <= numberedStates[task.DesiredState] { - continue - } - } - if _, nodeActive := activeNodes[task.NodeID]; nodeActive { - tasksBySlot[task.Slot] = task - } - } - - // If we had reached a converged state, check if we are still converged. - if u.done { - for _, task := range tasksBySlot { - if task.Status.State != swarm.TaskStateRunning { - u.done = false - break - } - } - } - - running := uint64(0) - - for _, task := range tasksBySlot { - mappedSlot := u.slotMap[task.Slot] - if mappedSlot == 0 { - mappedSlot = len(u.slotMap) + 1 - u.slotMap[task.Slot] = mappedSlot - } - - if !u.done && replicas <= maxProgressBars && uint64(mappedSlot) <= replicas { - u.progressOut.WriteProgress(progress.Progress{ - ID: fmt.Sprintf("%d/%d", mappedSlot, replicas), - Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State), - Current: stateToProgress(task.Status.State, rollback), - Total: maxProgress, - HideCounts: true, - }) - } - if task.Status.State == swarm.TaskStateRunning { - running++ - } - } - - if !u.done { - writeOverallProgress(u.progressOut, int(running), int(replicas), rollback) - - if running == replicas { - u.done = true - } - } - - return running == replicas, nil -} - -type globalProgressUpdater struct { - progressOut progress.Output - - initialized bool - done bool -} - -func (u *globalProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]swarm.Node, rollback bool) (bool, error) { - // If there are multiple tasks with the same node ID, favor the one - // with the *lowest* desired state. This can happen in restart - // scenarios. - tasksByNode := make(map[string]swarm.Task) - for _, task := range tasks { - if numberedStates[task.DesiredState] == 0 { - continue - } - if existingTask, ok := tasksByNode[task.NodeID]; ok { - if numberedStates[existingTask.DesiredState] <= numberedStates[task.DesiredState] { - continue - } - } - tasksByNode[task.NodeID] = task - } - - // We don't have perfect knowledge of how many nodes meet the - // constraints for this service. But the orchestrator creates tasks - // for all eligible nodes at the same time, so we should see all those - // nodes represented among the up-to-date tasks. - nodeCount := len(tasksByNode) - - if !u.initialized { - if nodeCount == 0 { - // Two possibilities: either the orchestrator hasn't created - // the tasks yet, or the service doesn't meet constraints for - // any node. Either way, we wait. - u.progressOut.WriteProgress(progress.Progress{ - ID: "overall progress", - Action: "waiting for new tasks", - }) - return false, nil - } - - writeOverallProgress(u.progressOut, 0, nodeCount, rollback) - u.initialized = true - } - - // If we had reached a converged state, check if we are still converged. - if u.done { - for _, task := range tasksByNode { - if task.Status.State != swarm.TaskStateRunning { - u.done = false - break - } - } - } - - running := 0 - - for _, task := range tasksByNode { - if node, nodeActive := activeNodes[task.NodeID]; nodeActive { - if !u.done && nodeCount <= maxProgressBars { - u.progressOut.WriteProgress(progress.Progress{ - ID: stringid.TruncateID(node.ID), - Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State), - Current: stateToProgress(task.Status.State, rollback), - Total: maxProgress, - HideCounts: true, - }) - } - if task.Status.State == swarm.TaskStateRunning { - running++ - } - } - } - - if !u.done { - writeOverallProgress(u.progressOut, running, nodeCount, rollback) - - if running == nodeCount { - u.done = true - } - } - - return running == nodeCount, nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/ps.go b/fn/vendor/github.com/docker/docker/cli/command/service/ps.go deleted file mode 100644 index 3a53a545d..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/ps.go +++ /dev/null @@ -1,123 +0,0 @@ -package service - -import ( - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/cli/command/idresolver" - "github.com/docker/docker/cli/command/node" - "github.com/docker/docker/cli/command/task" - "github.com/docker/docker/opts" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type psOptions struct { - services []string - quiet bool - noResolve bool - noTrunc bool - format string - filter opts.FilterOpt -} - -func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS] SERVICE [SERVICE...]", - Short: "List the tasks of one or more services", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.services = args - return runPS(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display task IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.StringVar(&opts.format, "format", "", "Pretty-print tasks using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runPS(dockerCli *command.DockerCli, opts psOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - filter := opts.filter.Value() - - serviceIDFilter := filters.NewArgs() - serviceNameFilter := filters.NewArgs() - for _, service := range opts.services { - serviceIDFilter.Add("id", service) - serviceNameFilter.Add("name", service) - } - serviceByIDList, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceIDFilter}) - if err != nil { - return err - } - serviceByNameList, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceNameFilter}) - if err != nil { - return err - } - - for _, service := range opts.services { - serviceCount := 0 - // Lookup by ID/Prefix - for _, serviceEntry := range serviceByIDList { - if strings.HasPrefix(serviceEntry.ID, service) { - filter.Add("service", serviceEntry.ID) - serviceCount++ - } - } - - // Lookup by Name/Prefix - for _, serviceEntry := range serviceByNameList { - if strings.HasPrefix(serviceEntry.Spec.Annotations.Name, service) { - filter.Add("service", serviceEntry.ID) - serviceCount++ - } - } - // If nothing has been found, return immediately. - if serviceCount == 0 { - return errors.Errorf("no such services: %s", service) - } - } - - if filter.Include("node") { - nodeFilters := filter.Get("node") - for _, nodeFilter := range nodeFilters { - nodeReference, err := node.Reference(ctx, client, nodeFilter) - if err != nil { - return err - } - filter.Del("node", nodeFilter) - filter.Add("node", nodeReference) - } - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().TasksFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().TasksFormat - } else { - format = formatter.TableFormatKey - } - } - - return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), !opts.noTrunc, opts.quiet, format) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/remove.go b/fn/vendor/github.com/docker/docker/cli/command/service/remove.go deleted file mode 100644 index a7b010708..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/remove.go +++ /dev/null @@ -1,48 +0,0 @@ -package service - -import ( - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - - cmd := &cobra.Command{ - Use: "rm SERVICE [SERVICE...]", - Aliases: []string{"remove"}, - Short: "Remove one or more services", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } - cmd.Flags() - - return cmd -} - -func runRemove(dockerCli *command.DockerCli, sids []string) error { - client := dockerCli.Client() - - ctx := context.Background() - - var errs []string - for _, sid := range sids { - err := client.ServiceRemove(ctx, sid) - if err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", sid) - } - if len(errs) > 0 { - return errors.Errorf(strings.Join(errs, "\n")) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/scale.go b/fn/vendor/github.com/docker/docker/cli/command/service/scale.go deleted file mode 100644 index 98163c87c..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/scale.go +++ /dev/null @@ -1,97 +0,0 @@ -package service - -import ( - "fmt" - "strconv" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -func newScaleCommand(dockerCli *command.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "scale SERVICE=REPLICAS [SERVICE=REPLICAS...]", - Short: "Scale one or multiple replicated services", - Args: scaleArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runScale(dockerCli, args) - }, - } -} - -func scaleArgs(cmd *cobra.Command, args []string) error { - if err := cli.RequiresMinArgs(1)(cmd, args); err != nil { - return err - } - for _, arg := range args { - if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 { - return errors.Errorf( - "Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s", - arg, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } - } - return nil -} - -func runScale(dockerCli *command.DockerCli, args []string) error { - var errs []string - for _, arg := range args { - parts := strings.SplitN(arg, "=", 2) - serviceID, scaleStr := parts[0], parts[1] - - // validate input arg scale number - scale, err := strconv.ParseUint(scaleStr, 10, 64) - if err != nil { - errs = append(errs, fmt.Sprintf("%s: invalid replicas value %s: %v", serviceID, scaleStr, err)) - continue - } - - if err := runServiceScale(dockerCli, serviceID, scale); err != nil { - errs = append(errs, fmt.Sprintf("%s: %v", serviceID, err)) - } - } - - if len(errs) == 0 { - return nil - } - return errors.Errorf(strings.Join(errs, "\n")) -} - -func runServiceScale(dockerCli *command.DockerCli, serviceID string, scale uint64) error { - client := dockerCli.Client() - ctx := context.Background() - - service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) - if err != nil { - return err - } - - serviceMode := &service.Spec.Mode - if serviceMode.Replicated == nil { - return errors.Errorf("scale can only be used with replicated mode") - } - - serviceMode.Replicated.Replicas = &scale - - response, err := client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) - if err != nil { - return err - } - - for _, warning := range response.Warnings { - fmt.Fprintln(dockerCli.Err(), warning) - } - - fmt.Fprintf(dockerCli.Out(), "%s scaled to %d\n", serviceID, scale) - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/trust.go b/fn/vendor/github.com/docker/docker/cli/command/service/trust.go deleted file mode 100644 index eba52a9dd..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/trust.go +++ /dev/null @@ -1,87 +0,0 @@ -package service - -import ( - "encoding/hex" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/trust" - "github.com/docker/docker/registry" - "github.com/docker/notary/tuf/data" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func resolveServiceImageDigest(dockerCli *command.DockerCli, service *swarm.ServiceSpec) error { - if !command.IsTrusted() { - // Digests are resolved by the daemon when not using content - // trust. - return nil - } - - ref, err := reference.ParseAnyReference(service.TaskTemplate.ContainerSpec.Image) - if err != nil { - return errors.Wrapf(err, "invalid reference %s", service.TaskTemplate.ContainerSpec.Image) - } - - // If reference does not have digest (is not canonical nor image id) - if _, ok := ref.(reference.Digested); !ok { - namedRef, ok := ref.(reference.Named) - if !ok { - return errors.New("failed to resolve image digest using content trust: reference is not named") - } - namedRef = reference.TagNameOnly(namedRef) - taggedRef, ok := namedRef.(reference.NamedTagged) - if !ok { - return errors.New("failed to resolve image digest using content trust: reference is not tagged") - } - - resolvedImage, err := trustedResolveDigest(context.Background(), dockerCli, taggedRef) - if err != nil { - return errors.Wrap(err, "failed to resolve image digest using content trust") - } - resolvedFamiliar := reference.FamiliarString(resolvedImage) - logrus.Debugf("resolved image tag to %s using content trust", resolvedFamiliar) - service.TaskTemplate.ContainerSpec.Image = resolvedFamiliar - } - - return nil -} - -func trustedResolveDigest(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged) (reference.Canonical, error) { - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return nil, err - } - - authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) - - notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") - if err != nil { - return nil, errors.Wrap(err, "error establishing connection to trust repository") - } - - t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return nil, trust.NotaryError(repoInfo.Name.Name(), err) - } - // Only get the tag if it's in the top level targets role or the releases delegation role - // ignore it if it's in any other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { - return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", reference.FamiliarString(ref))) - } - - logrus.Debugf("retrieving target for %s role\n", t.Role) - h, ok := t.Hashes["sha256"] - if !ok { - return nil, errors.New("no valid hash, expecting sha256") - } - - dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h)) - - // Allow returning canonical reference with tag - return reference.WithDigest(ref, dgst) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/update.go b/fn/vendor/github.com/docker/docker/cli/command/service/update.go deleted file mode 100644 index 233da68ee..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/update.go +++ /dev/null @@ -1,1018 +0,0 @@ -package service - -import ( - "fmt" - "sort" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/client" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/go-connections/nat" - "github.com/docker/swarmkit/api/defaults" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { - serviceOpts := newServiceOptions() - - cmd := &cobra.Command{ - Use: "update [OPTIONS] SERVICE", - Short: "Update a service", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), serviceOpts, args[0]) - }, - } - - flags := cmd.Flags() - flags.String("image", "", "Service image tag") - flags.Var(&ShlexOpt{}, "args", "Service command args") - flags.Bool("rollback", false, "Rollback to previous specification") - flags.SetAnnotation("rollback", "version", []string{"1.25"}) - flags.Bool("force", false, "Force update even if no changes require it") - flags.SetAnnotation("force", "version", []string{"1.25"}) - addServiceFlags(flags, serviceOpts, nil) - - flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable") - flags.Var(newListOptsVar(), flagGroupRemove, "Remove a previously added supplementary user group from the container") - flags.SetAnnotation(flagGroupRemove, "version", []string{"1.25"}) - flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key") - flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key") - flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path") - // flags.Var(newListOptsVar().WithValidator(validatePublishRemove), flagPublishRemove, "Remove a published port by its target port") - flags.Var(&opts.PortOpt{}, flagPublishRemove, "Remove a published port by its target port") - flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint") - flags.Var(newListOptsVar(), flagDNSRemove, "Remove a custom DNS server") - flags.SetAnnotation(flagDNSRemove, "version", []string{"1.25"}) - flags.Var(newListOptsVar(), flagDNSOptionRemove, "Remove a DNS option") - flags.SetAnnotation(flagDNSOptionRemove, "version", []string{"1.25"}) - flags.Var(newListOptsVar(), flagDNSSearchRemove, "Remove a DNS search domain") - flags.SetAnnotation(flagDNSSearchRemove, "version", []string{"1.25"}) - flags.Var(newListOptsVar(), flagHostRemove, "Remove a custom host-to-IP mapping (host:ip)") - flags.SetAnnotation(flagHostRemove, "version", []string{"1.25"}) - flags.Var(&serviceOpts.labels, flagLabelAdd, "Add or update a service label") - flags.Var(&serviceOpts.containerLabels, flagContainerLabelAdd, "Add or update a container label") - flags.Var(&serviceOpts.env, flagEnvAdd, "Add or update an environment variable") - flags.Var(newListOptsVar(), flagSecretRemove, "Remove a secret") - flags.SetAnnotation(flagSecretRemove, "version", []string{"1.25"}) - flags.Var(&serviceOpts.secrets, flagSecretAdd, "Add or update a secret on a service") - flags.SetAnnotation(flagSecretAdd, "version", []string{"1.25"}) - flags.Var(&serviceOpts.mounts, flagMountAdd, "Add or update a mount on a service") - flags.Var(&serviceOpts.constraints, flagConstraintAdd, "Add or update a placement constraint") - flags.Var(&serviceOpts.placementPrefs, flagPlacementPrefAdd, "Add a placement preference") - flags.SetAnnotation(flagPlacementPrefAdd, "version", []string{"1.28"}) - flags.Var(&placementPrefOpts{}, flagPlacementPrefRemove, "Remove a placement preference") - flags.SetAnnotation(flagPlacementPrefRemove, "version", []string{"1.28"}) - flags.Var(&serviceOpts.networks, flagNetworkAdd, "Add a network") - flags.SetAnnotation(flagNetworkAdd, "version", []string{"1.29"}) - flags.Var(newListOptsVar(), flagNetworkRemove, "Remove a network") - flags.SetAnnotation(flagNetworkRemove, "version", []string{"1.29"}) - flags.Var(&serviceOpts.endpoint.publishPorts, flagPublishAdd, "Add or update a published port") - flags.Var(&serviceOpts.groups, flagGroupAdd, "Add an additional supplementary user group to the container") - flags.SetAnnotation(flagGroupAdd, "version", []string{"1.25"}) - flags.Var(&serviceOpts.dns, flagDNSAdd, "Add or update a custom DNS server") - flags.SetAnnotation(flagDNSAdd, "version", []string{"1.25"}) - flags.Var(&serviceOpts.dnsOption, flagDNSOptionAdd, "Add or update a DNS option") - flags.SetAnnotation(flagDNSOptionAdd, "version", []string{"1.25"}) - flags.Var(&serviceOpts.dnsSearch, flagDNSSearchAdd, "Add or update a custom DNS search domain") - flags.SetAnnotation(flagDNSSearchAdd, "version", []string{"1.25"}) - flags.Var(&serviceOpts.hosts, flagHostAdd, "Add or update a custom host-to-IP mapping (host:ip)") - flags.SetAnnotation(flagHostAdd, "version", []string{"1.25"}) - - return cmd -} - -func newListOptsVar() *opts.ListOpts { - return opts.NewListOptsRef(&[]string{}, nil) -} - -func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *serviceOptions, serviceID string) error { - apiClient := dockerCli.Client() - ctx := context.Background() - - service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) - if err != nil { - return err - } - - rollback, err := flags.GetBool("rollback") - if err != nil { - return err - } - - // There are two ways to do user-requested rollback. The old way is - // client-side, but with a sufficiently recent daemon we prefer - // server-side, because it will honor the rollback parameters. - var ( - clientSideRollback bool - serverSideRollback bool - ) - - spec := &service.Spec - if rollback { - // Rollback can't be combined with other flags. - otherFlagsPassed := false - flags.VisitAll(func(f *pflag.Flag) { - if f.Name == "rollback" { - return - } - if flags.Changed(f.Name) { - otherFlagsPassed = true - } - }) - if otherFlagsPassed { - return errors.New("other flags may not be combined with --rollback") - } - - if versions.LessThan(dockerCli.Client().ClientVersion(), "1.28") { - clientSideRollback = true - spec = service.PreviousSpec - if spec == nil { - return errors.Errorf("service does not have a previous specification to roll back to") - } - } else { - serverSideRollback = true - } - } - - updateOpts := types.ServiceUpdateOptions{} - if serverSideRollback { - updateOpts.Rollback = "previous" - } - - err = updateService(ctx, apiClient, flags, spec) - if err != nil { - return err - } - - if flags.Changed("image") { - if err := resolveServiceImageDigest(dockerCli, spec); err != nil { - return err - } - } - - updatedSecrets, err := getUpdatedSecrets(apiClient, flags, spec.TaskTemplate.ContainerSpec.Secrets) - if err != nil { - return err - } - - spec.TaskTemplate.ContainerSpec.Secrets = updatedSecrets - - // only send auth if flag was set - sendAuth, err := flags.GetBool(flagRegistryAuth) - if err != nil { - return err - } - if sendAuth { - // Retrieve encoded auth token from the image reference - // This would be the old image if it didn't change in this update - image := spec.TaskTemplate.ContainerSpec.Image - encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) - if err != nil { - return err - } - updateOpts.EncodedRegistryAuth = encodedAuth - } else if clientSideRollback { - updateOpts.RegistryAuthFrom = types.RegistryAuthFromPreviousSpec - } else { - updateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec - } - - response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts) - if err != nil { - return err - } - - for _, warning := range response.Warnings { - fmt.Fprintln(dockerCli.Err(), warning) - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) - - if opts.detach { - if !flags.Changed("detach") { - fmt.Fprintln(dockerCli.Err(), "Since --detach=false was not specified, tasks will be updated in the background.\n"+ - "In a future release, --detach=false will become the default.") - } - return nil - } - - return waitOnService(ctx, dockerCli, serviceID, opts) -} - -func updateService(ctx context.Context, apiClient client.APIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { - updateString := func(flag string, field *string) { - if flags.Changed(flag) { - *field, _ = flags.GetString(flag) - } - } - - updateInt64Value := func(flag string, field *int64) { - if flags.Changed(flag) { - *field = flags.Lookup(flag).Value.(int64Value).Value() - } - } - - updateFloatValue := func(flag string, field *float32) { - if flags.Changed(flag) { - *field = flags.Lookup(flag).Value.(*floatValue).Value() - } - } - - updateDuration := func(flag string, field *time.Duration) { - if flags.Changed(flag) { - *field, _ = flags.GetDuration(flag) - } - } - - updateDurationOpt := func(flag string, field **time.Duration) { - if flags.Changed(flag) { - val := *flags.Lookup(flag).Value.(*DurationOpt).Value() - *field = &val - } - } - - updateUint64 := func(flag string, field *uint64) { - if flags.Changed(flag) { - *field, _ = flags.GetUint64(flag) - } - } - - updateUint64Opt := func(flag string, field **uint64) { - if flags.Changed(flag) { - val := *flags.Lookup(flag).Value.(*Uint64Opt).Value() - *field = &val - } - } - - cspec := &spec.TaskTemplate.ContainerSpec - task := &spec.TaskTemplate - - taskResources := func() *swarm.ResourceRequirements { - if task.Resources == nil { - task.Resources = &swarm.ResourceRequirements{} - } - return task.Resources - } - - updateLabels(flags, &spec.Labels) - updateContainerLabels(flags, &cspec.Labels) - updateString("image", &cspec.Image) - updateStringToSlice(flags, "args", &cspec.Args) - updateStringToSlice(flags, flagEntrypoint, &cspec.Command) - updateEnvironment(flags, &cspec.Env) - updateString(flagWorkdir, &cspec.Dir) - updateString(flagUser, &cspec.User) - updateString(flagHostname, &cspec.Hostname) - if err := updateMounts(flags, &cspec.Mounts); err != nil { - return err - } - - if flags.Changed(flagLimitCPU) || flags.Changed(flagLimitMemory) { - taskResources().Limits = &swarm.Resources{} - updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) - updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) - } - if flags.Changed(flagReserveCPU) || flags.Changed(flagReserveMemory) { - taskResources().Reservations = &swarm.Resources{} - updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) - updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) - } - - updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod) - - if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) { - if task.RestartPolicy == nil { - task.RestartPolicy = defaultRestartPolicy() - } - if flags.Changed(flagRestartCondition) { - value, _ := flags.GetString(flagRestartCondition) - task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value) - } - updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay) - updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts) - updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window) - } - - if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) { - if task.Placement == nil { - task.Placement = &swarm.Placement{} - } - updatePlacementConstraints(flags, task.Placement) - } - - if anyChanged(flags, flagPlacementPrefAdd, flagPlacementPrefRemove) { - if task.Placement == nil { - task.Placement = &swarm.Placement{} - } - updatePlacementPreferences(flags, task.Placement) - } - - if anyChanged(flags, flagNetworkAdd, flagNetworkRemove) { - if err := updateNetworks(ctx, apiClient, flags, spec); err != nil { - return err - } - } - - if err := updateReplicas(flags, &spec.Mode); err != nil { - return err - } - - if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) { - if spec.UpdateConfig == nil { - spec.UpdateConfig = updateConfigFromDefaults(defaults.Service.Update) - } - updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism) - updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay) - updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor) - updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction) - updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio) - updateString(flagUpdateOrder, &spec.UpdateConfig.Order) - } - - if anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) { - if spec.RollbackConfig == nil { - spec.RollbackConfig = updateConfigFromDefaults(defaults.Service.Rollback) - } - updateUint64(flagRollbackParallelism, &spec.RollbackConfig.Parallelism) - updateDuration(flagRollbackDelay, &spec.RollbackConfig.Delay) - updateDuration(flagRollbackMonitor, &spec.RollbackConfig.Monitor) - updateString(flagRollbackFailureAction, &spec.RollbackConfig.FailureAction) - updateFloatValue(flagRollbackMaxFailureRatio, &spec.RollbackConfig.MaxFailureRatio) - updateString(flagRollbackOrder, &spec.RollbackConfig.Order) - } - - if flags.Changed(flagEndpointMode) { - value, _ := flags.GetString(flagEndpointMode) - if spec.EndpointSpec == nil { - spec.EndpointSpec = &swarm.EndpointSpec{} - } - spec.EndpointSpec.Mode = swarm.ResolutionMode(value) - } - - if anyChanged(flags, flagGroupAdd, flagGroupRemove) { - if err := updateGroups(flags, &cspec.Groups); err != nil { - return err - } - } - - if anyChanged(flags, flagPublishAdd, flagPublishRemove) { - if spec.EndpointSpec == nil { - spec.EndpointSpec = &swarm.EndpointSpec{} - } - if err := updatePorts(flags, &spec.EndpointSpec.Ports); err != nil { - return err - } - } - - if anyChanged(flags, flagDNSAdd, flagDNSRemove, flagDNSOptionAdd, flagDNSOptionRemove, flagDNSSearchAdd, flagDNSSearchRemove) { - if cspec.DNSConfig == nil { - cspec.DNSConfig = &swarm.DNSConfig{} - } - if err := updateDNSConfig(flags, &cspec.DNSConfig); err != nil { - return err - } - } - - if anyChanged(flags, flagHostAdd, flagHostRemove) { - if err := updateHosts(flags, &cspec.Hosts); err != nil { - return err - } - } - - if err := updateLogDriver(flags, &spec.TaskTemplate); err != nil { - return err - } - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - if force { - spec.TaskTemplate.ForceUpdate++ - } - - if err := updateHealthcheck(flags, cspec); err != nil { - return err - } - - if flags.Changed(flagTTY) { - tty, err := flags.GetBool(flagTTY) - if err != nil { - return err - } - cspec.TTY = tty - } - - if flags.Changed(flagReadOnly) { - readOnly, err := flags.GetBool(flagReadOnly) - if err != nil { - return err - } - cspec.ReadOnly = readOnly - } - - updateString(flagStopSignal, &cspec.StopSignal) - - return nil -} - -func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) { - if !flags.Changed(flag) { - return - } - - *field = flags.Lookup(flag).Value.(*ShlexOpt).Value() -} - -func anyChanged(flags *pflag.FlagSet, fields ...string) bool { - for _, flag := range fields { - if flags.Changed(flag) { - return true - } - } - return false -} - -func updatePlacementConstraints(flags *pflag.FlagSet, placement *swarm.Placement) { - if flags.Changed(flagConstraintAdd) { - values := flags.Lookup(flagConstraintAdd).Value.(*opts.ListOpts).GetAll() - placement.Constraints = append(placement.Constraints, values...) - } - toRemove := buildToRemoveSet(flags, flagConstraintRemove) - - newConstraints := []string{} - for _, constraint := range placement.Constraints { - if _, exists := toRemove[constraint]; !exists { - newConstraints = append(newConstraints, constraint) - } - } - // Sort so that result is predictable. - sort.Strings(newConstraints) - - placement.Constraints = newConstraints -} - -func updatePlacementPreferences(flags *pflag.FlagSet, placement *swarm.Placement) { - var newPrefs []swarm.PlacementPreference - - if flags.Changed(flagPlacementPrefRemove) { - for _, existing := range placement.Preferences { - removed := false - for _, removal := range flags.Lookup(flagPlacementPrefRemove).Value.(*placementPrefOpts).prefs { - if removal.Spread != nil && existing.Spread != nil && removal.Spread.SpreadDescriptor == existing.Spread.SpreadDescriptor { - removed = true - break - } - } - if !removed { - newPrefs = append(newPrefs, existing) - } - } - } else { - newPrefs = placement.Preferences - } - - if flags.Changed(flagPlacementPrefAdd) { - for _, addition := range flags.Lookup(flagPlacementPrefAdd).Value.(*placementPrefOpts).prefs { - newPrefs = append(newPrefs, addition) - } - } - - placement.Preferences = newPrefs -} - -func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) { - if flags.Changed(flagContainerLabelAdd) { - if *field == nil { - *field = map[string]string{} - } - - values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll() - for key, value := range runconfigopts.ConvertKVStringsToMap(values) { - (*field)[key] = value - } - } - - if *field != nil && flags.Changed(flagContainerLabelRemove) { - toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll() - for _, label := range toRemove { - delete(*field, label) - } - } -} - -func updateLabels(flags *pflag.FlagSet, field *map[string]string) { - if flags.Changed(flagLabelAdd) { - if *field == nil { - *field = map[string]string{} - } - - values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() - for key, value := range runconfigopts.ConvertKVStringsToMap(values) { - (*field)[key] = value - } - } - - if *field != nil && flags.Changed(flagLabelRemove) { - toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() - for _, label := range toRemove { - delete(*field, label) - } - } -} - -func updateEnvironment(flags *pflag.FlagSet, field *[]string) { - if flags.Changed(flagEnvAdd) { - envSet := map[string]string{} - for _, v := range *field { - envSet[envKey(v)] = v - } - - value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) - for _, v := range value.GetAll() { - envSet[envKey(v)] = v - } - - *field = []string{} - for _, v := range envSet { - *field = append(*field, v) - } - } - - toRemove := buildToRemoveSet(flags, flagEnvRemove) - *field = removeItems(*field, toRemove, envKey) -} - -func getUpdatedSecrets(apiClient client.SecretAPIClient, flags *pflag.FlagSet, secrets []*swarm.SecretReference) ([]*swarm.SecretReference, error) { - newSecrets := []*swarm.SecretReference{} - - toRemove := buildToRemoveSet(flags, flagSecretRemove) - for _, secret := range secrets { - if _, exists := toRemove[secret.SecretName]; !exists { - newSecrets = append(newSecrets, secret) - } - } - - if flags.Changed(flagSecretAdd) { - values := flags.Lookup(flagSecretAdd).Value.(*opts.SecretOpt).Value() - - addSecrets, err := ParseSecrets(apiClient, values) - if err != nil { - return nil, err - } - newSecrets = append(newSecrets, addSecrets...) - } - - return newSecrets, nil -} - -func envKey(value string) string { - kv := strings.SplitN(value, "=", 2) - return kv[0] -} - -func itemKey(value string) string { - return value -} - -func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { - var empty struct{} - toRemove := make(map[string]struct{}) - - if !flags.Changed(flag) { - return toRemove - } - - toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() - for _, key := range toRemoveSlice { - toRemove[key] = empty - } - return toRemove -} - -func removeItems( - seq []string, - toRemove map[string]struct{}, - keyFunc func(string) string, -) []string { - newSeq := []string{} - for _, item := range seq { - if _, exists := toRemove[keyFunc(item)]; !exists { - newSeq = append(newSeq, item) - } - } - return newSeq -} - -type byMountSource []mounttypes.Mount - -func (m byMountSource) Len() int { return len(m) } -func (m byMountSource) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m byMountSource) Less(i, j int) bool { - a, b := m[i], m[j] - - if a.Source == b.Source { - return a.Target < b.Target - } - - return a.Source < b.Source -} - -func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error { - mountsByTarget := map[string]mounttypes.Mount{} - - if flags.Changed(flagMountAdd) { - values := flags.Lookup(flagMountAdd).Value.(*opts.MountOpt).Value() - for _, mount := range values { - if _, ok := mountsByTarget[mount.Target]; ok { - return errors.Errorf("duplicate mount target") - } - mountsByTarget[mount.Target] = mount - } - } - - // Add old list of mount points minus updated one. - for _, mount := range *mounts { - if _, ok := mountsByTarget[mount.Target]; !ok { - mountsByTarget[mount.Target] = mount - } - } - - newMounts := []mounttypes.Mount{} - - toRemove := buildToRemoveSet(flags, flagMountRemove) - - for _, mount := range mountsByTarget { - if _, exists := toRemove[mount.Target]; !exists { - newMounts = append(newMounts, mount) - } - } - sort.Sort(byMountSource(newMounts)) - *mounts = newMounts - return nil -} - -func updateGroups(flags *pflag.FlagSet, groups *[]string) error { - if flags.Changed(flagGroupAdd) { - values := flags.Lookup(flagGroupAdd).Value.(*opts.ListOpts).GetAll() - *groups = append(*groups, values...) - } - toRemove := buildToRemoveSet(flags, flagGroupRemove) - - newGroups := []string{} - for _, group := range *groups { - if _, exists := toRemove[group]; !exists { - newGroups = append(newGroups, group) - } - } - // Sort so that result is predictable. - sort.Strings(newGroups) - - *groups = newGroups - return nil -} - -func removeDuplicates(entries []string) []string { - hit := map[string]bool{} - newEntries := []string{} - for _, v := range entries { - if !hit[v] { - newEntries = append(newEntries, v) - hit[v] = true - } - } - return newEntries -} - -func updateDNSConfig(flags *pflag.FlagSet, config **swarm.DNSConfig) error { - newConfig := &swarm.DNSConfig{} - - nameservers := (*config).Nameservers - if flags.Changed(flagDNSAdd) { - values := flags.Lookup(flagDNSAdd).Value.(*opts.ListOpts).GetAll() - nameservers = append(nameservers, values...) - } - nameservers = removeDuplicates(nameservers) - toRemove := buildToRemoveSet(flags, flagDNSRemove) - for _, nameserver := range nameservers { - if _, exists := toRemove[nameserver]; !exists { - newConfig.Nameservers = append(newConfig.Nameservers, nameserver) - - } - } - // Sort so that result is predictable. - sort.Strings(newConfig.Nameservers) - - search := (*config).Search - if flags.Changed(flagDNSSearchAdd) { - values := flags.Lookup(flagDNSSearchAdd).Value.(*opts.ListOpts).GetAll() - search = append(search, values...) - } - search = removeDuplicates(search) - toRemove = buildToRemoveSet(flags, flagDNSSearchRemove) - for _, entry := range search { - if _, exists := toRemove[entry]; !exists { - newConfig.Search = append(newConfig.Search, entry) - } - } - // Sort so that result is predictable. - sort.Strings(newConfig.Search) - - options := (*config).Options - if flags.Changed(flagDNSOptionAdd) { - values := flags.Lookup(flagDNSOptionAdd).Value.(*opts.ListOpts).GetAll() - options = append(options, values...) - } - options = removeDuplicates(options) - toRemove = buildToRemoveSet(flags, flagDNSOptionRemove) - for _, option := range options { - if _, exists := toRemove[option]; !exists { - newConfig.Options = append(newConfig.Options, option) - } - } - // Sort so that result is predictable. - sort.Strings(newConfig.Options) - - *config = newConfig - return nil -} - -type byPortConfig []swarm.PortConfig - -func (r byPortConfig) Len() int { return len(r) } -func (r byPortConfig) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byPortConfig) Less(i, j int) bool { - // We convert PortConfig into `port/protocol`, e.g., `80/tcp` - // In updatePorts we already filter out with map so there is duplicate entries - return portConfigToString(&r[i]) < portConfigToString(&r[j]) -} - -func portConfigToString(portConfig *swarm.PortConfig) string { - protocol := portConfig.Protocol - mode := portConfig.PublishMode - return fmt.Sprintf("%v:%v/%s/%s", portConfig.PublishedPort, portConfig.TargetPort, protocol, mode) -} - -func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error { - // The key of the map is `port/protocol`, e.g., `80/tcp` - portSet := map[string]swarm.PortConfig{} - - // Build the current list of portConfig - for _, entry := range *portConfig { - if _, ok := portSet[portConfigToString(&entry)]; !ok { - portSet[portConfigToString(&entry)] = entry - } - } - - newPorts := []swarm.PortConfig{} - - // Clean current ports - toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.PortOpt).Value() -portLoop: - for _, port := range portSet { - for _, pConfig := range toRemove { - if equalProtocol(port.Protocol, pConfig.Protocol) && - port.TargetPort == pConfig.TargetPort && - equalPublishMode(port.PublishMode, pConfig.PublishMode) { - continue portLoop - } - } - - newPorts = append(newPorts, port) - } - - // Check to see if there are any conflict in flags. - if flags.Changed(flagPublishAdd) { - ports := flags.Lookup(flagPublishAdd).Value.(*opts.PortOpt).Value() - - for _, port := range ports { - if _, ok := portSet[portConfigToString(&port)]; ok { - continue - } - //portSet[portConfigToString(&port)] = port - newPorts = append(newPorts, port) - } - } - - // Sort the PortConfig to avoid unnecessary updates - sort.Sort(byPortConfig(newPorts)) - *portConfig = newPorts - return nil -} - -func equalProtocol(prot1, prot2 swarm.PortConfigProtocol) bool { - return prot1 == prot2 || - (prot1 == swarm.PortConfigProtocol("") && prot2 == swarm.PortConfigProtocolTCP) || - (prot2 == swarm.PortConfigProtocol("") && prot1 == swarm.PortConfigProtocolTCP) -} - -func equalPublishMode(mode1, mode2 swarm.PortConfigPublishMode) bool { - return mode1 == mode2 || - (mode1 == swarm.PortConfigPublishMode("") && mode2 == swarm.PortConfigPublishModeIngress) || - (mode2 == swarm.PortConfigPublishMode("") && mode1 == swarm.PortConfigPublishModeIngress) -} - -func equalPort(targetPort nat.Port, port swarm.PortConfig) bool { - return (string(port.Protocol) == targetPort.Proto() && - port.TargetPort == uint32(targetPort.Int())) -} - -func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { - if !flags.Changed(flagReplicas) { - return nil - } - - if serviceMode == nil || serviceMode.Replicated == nil { - return errors.Errorf("replicas can only be used with replicated mode") - } - serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() - return nil -} - -func updateHosts(flags *pflag.FlagSet, hosts *[]string) error { - // Combine existing Hosts (in swarmkit format) with the host to add (convert to swarmkit format) - if flags.Changed(flagHostAdd) { - values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll()) - *hosts = append(*hosts, values...) - } - // Remove duplicate - *hosts = removeDuplicates(*hosts) - - keysToRemove := make(map[string]struct{}) - if flags.Changed(flagHostRemove) { - var empty struct{} - extraHostsToRemove := flags.Lookup(flagHostRemove).Value.(*opts.ListOpts).GetAll() - for _, entry := range extraHostsToRemove { - key := strings.SplitN(entry, ":", 2)[0] - keysToRemove[key] = empty - } - } - - newHosts := []string{} - for _, entry := range *hosts { - // Since this is in swarmkit format, we need to find the key, which is canonical_hostname of: - // IP_address canonical_hostname [aliases...] - parts := strings.Fields(entry) - if len(parts) > 1 { - key := parts[1] - if _, exists := keysToRemove[key]; !exists { - newHosts = append(newHosts, entry) - } - } else { - newHosts = append(newHosts, entry) - } - } - - // Sort so that result is predictable. - sort.Strings(newHosts) - - *hosts = newHosts - return nil -} - -// updateLogDriver updates the log driver only if the log driver flag is set. -// All options will be replaced with those provided on the command line. -func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error { - if !flags.Changed(flagLogDriver) { - return nil - } - - name, err := flags.GetString(flagLogDriver) - if err != nil { - return err - } - - if name == "" { - return nil - } - - taskTemplate.LogDriver = &swarm.Driver{ - Name: name, - Options: runconfigopts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()), - } - - return nil -} - -func updateHealthcheck(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) error { - if !anyChanged(flags, flagNoHealthcheck, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout, flagHealthStartPeriod) { - return nil - } - if containerSpec.Healthcheck == nil { - containerSpec.Healthcheck = &container.HealthConfig{} - } - noHealthcheck, err := flags.GetBool(flagNoHealthcheck) - if err != nil { - return err - } - if noHealthcheck { - if !anyChanged(flags, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout, flagHealthStartPeriod) { - containerSpec.Healthcheck = &container.HealthConfig{ - Test: []string{"NONE"}, - } - return nil - } - return errors.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) - } - if len(containerSpec.Healthcheck.Test) > 0 && containerSpec.Healthcheck.Test[0] == "NONE" { - containerSpec.Healthcheck.Test = nil - } - if flags.Changed(flagHealthInterval) { - val := *flags.Lookup(flagHealthInterval).Value.(*PositiveDurationOpt).Value() - containerSpec.Healthcheck.Interval = val - } - if flags.Changed(flagHealthTimeout) { - val := *flags.Lookup(flagHealthTimeout).Value.(*PositiveDurationOpt).Value() - containerSpec.Healthcheck.Timeout = val - } - if flags.Changed(flagHealthStartPeriod) { - val := *flags.Lookup(flagHealthStartPeriod).Value.(*PositiveDurationOpt).Value() - containerSpec.Healthcheck.StartPeriod = val - } - if flags.Changed(flagHealthRetries) { - containerSpec.Healthcheck.Retries, _ = flags.GetInt(flagHealthRetries) - } - if flags.Changed(flagHealthCmd) { - cmd, _ := flags.GetString(flagHealthCmd) - if cmd != "" { - containerSpec.Healthcheck.Test = []string{"CMD-SHELL", cmd} - } else { - containerSpec.Healthcheck.Test = nil - } - } - return nil -} - -type byNetworkTarget []swarm.NetworkAttachmentConfig - -func (m byNetworkTarget) Len() int { return len(m) } -func (m byNetworkTarget) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m byNetworkTarget) Less(i, j int) bool { - return m[i].Target < m[j].Target -} - -func updateNetworks(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { - // spec.TaskTemplate.Networks takes precedence over the deprecated - // spec.Networks field. If spec.Network is in use, we'll migrate those - // values to spec.TaskTemplate.Networks. - specNetworks := spec.TaskTemplate.Networks - if len(specNetworks) == 0 { - specNetworks = spec.Networks - } - spec.Networks = nil - - toRemove := buildToRemoveSet(flags, flagNetworkRemove) - idsToRemove := make(map[string]struct{}) - for networkIDOrName := range toRemove { - network, err := apiClient.NetworkInspect(ctx, networkIDOrName, false) - if err != nil { - return err - } - idsToRemove[network.ID] = struct{}{} - } - - existingNetworks := make(map[string]struct{}) - var newNetworks []swarm.NetworkAttachmentConfig - for _, network := range specNetworks { - if _, exists := idsToRemove[network.Target]; exists { - continue - } - - newNetworks = append(newNetworks, network) - existingNetworks[network.Target] = struct{}{} - } - - if flags.Changed(flagNetworkAdd) { - values := flags.Lookup(flagNetworkAdd).Value.(*opts.ListOpts).GetAll() - networks, err := convertNetworks(ctx, apiClient, values) - if err != nil { - return err - } - for _, network := range networks { - if _, exists := existingNetworks[network.Target]; exists { - return errors.Errorf("service is already attached to network %s", network.Target) - } - newNetworks = append(newNetworks, network) - existingNetworks[network.Target] = struct{}{} - } - } - - sort.Sort(byNetworkTarget(newNetworks)) - - spec.TaskTemplate.Networks = newNetworks - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/service/update_test.go b/fn/vendor/github.com/docker/docker/cli/command/service/update_test.go deleted file mode 100644 index 090372fb7..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/service/update_test.go +++ /dev/null @@ -1,496 +0,0 @@ -package service - -import ( - "reflect" - "sort" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/testutil/assert" - "golang.org/x/net/context" -) - -func TestUpdateServiceArgs(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("args", "the \"new args\"") - - spec := &swarm.ServiceSpec{} - cspec := &spec.TaskTemplate.ContainerSpec - cspec.Args = []string{"old", "args"} - - updateService(nil, nil, flags, spec) - assert.EqualStringSlice(t, cspec.Args, []string{"the", "new args"}) -} - -func TestUpdateLabels(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("label-add", "toadd=newlabel") - flags.Set("label-rm", "toremove") - - labels := map[string]string{ - "toremove": "thelabeltoremove", - "tokeep": "value", - } - - updateLabels(flags, &labels) - assert.Equal(t, len(labels), 2) - assert.Equal(t, labels["tokeep"], "value") - assert.Equal(t, labels["toadd"], "newlabel") -} - -func TestUpdateLabelsRemoveALabelThatDoesNotExist(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("label-rm", "dne") - - labels := map[string]string{"foo": "theoldlabel"} - updateLabels(flags, &labels) - assert.Equal(t, len(labels), 1) -} - -func TestUpdatePlacementConstraints(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("constraint-add", "node=toadd") - flags.Set("constraint-rm", "node!=toremove") - - placement := &swarm.Placement{ - Constraints: []string{"node!=toremove", "container=tokeep"}, - } - - updatePlacementConstraints(flags, placement) - assert.Equal(t, len(placement.Constraints), 2) - assert.Equal(t, placement.Constraints[0], "container=tokeep") - assert.Equal(t, placement.Constraints[1], "node=toadd") -} - -func TestUpdatePlacementPrefs(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("placement-pref-add", "spread=node.labels.dc") - flags.Set("placement-pref-rm", "spread=node.labels.rack") - - placement := &swarm.Placement{ - Preferences: []swarm.PlacementPreference{ - { - Spread: &swarm.SpreadOver{ - SpreadDescriptor: "node.labels.rack", - }, - }, - { - Spread: &swarm.SpreadOver{ - SpreadDescriptor: "node.labels.row", - }, - }, - }, - } - - updatePlacementPreferences(flags, placement) - assert.Equal(t, len(placement.Preferences), 2) - assert.Equal(t, placement.Preferences[0].Spread.SpreadDescriptor, "node.labels.row") - assert.Equal(t, placement.Preferences[1].Spread.SpreadDescriptor, "node.labels.dc") -} - -func TestUpdateEnvironment(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "toadd=newenv") - flags.Set("env-rm", "toremove") - - envs := []string{"toremove=theenvtoremove", "tokeep=value"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 2) - // Order has been removed in updateEnvironment (map) - sort.Strings(envs) - assert.Equal(t, envs[0], "toadd=newenv") - assert.Equal(t, envs[1], "tokeep=value") -} - -func TestUpdateEnvironmentWithDuplicateValues(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "foo=newenv") - flags.Set("env-add", "foo=dupe") - flags.Set("env-rm", "foo") - - envs := []string{"foo=value"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 0) -} - -func TestUpdateEnvironmentWithDuplicateKeys(t *testing.T) { - // Test case for #25404 - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "A=b") - - envs := []string{"A=c"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 1) - assert.Equal(t, envs[0], "A=b") -} - -func TestUpdateGroups(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("group-add", "wheel") - flags.Set("group-add", "docker") - flags.Set("group-rm", "root") - flags.Set("group-add", "foo") - flags.Set("group-rm", "docker") - - groups := []string{"bar", "root"} - - updateGroups(flags, &groups) - assert.Equal(t, len(groups), 3) - assert.Equal(t, groups[0], "bar") - assert.Equal(t, groups[1], "foo") - assert.Equal(t, groups[2], "wheel") -} - -func TestUpdateDNSConfig(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - - // IPv4, with duplicates - flags.Set("dns-add", "1.1.1.1") - flags.Set("dns-add", "1.1.1.1") - flags.Set("dns-add", "2.2.2.2") - flags.Set("dns-rm", "3.3.3.3") - flags.Set("dns-rm", "2.2.2.2") - // IPv6 - flags.Set("dns-add", "2001:db8:abc8::1") - // Invalid dns record - assert.Error(t, flags.Set("dns-add", "x.y.z.w"), "x.y.z.w is not an ip address") - - // domains with duplicates - flags.Set("dns-search-add", "example.com") - flags.Set("dns-search-add", "example.com") - flags.Set("dns-search-add", "example.org") - flags.Set("dns-search-rm", "example.org") - // Invalid dns search domain - assert.Error(t, flags.Set("dns-search-add", "example$com"), "example$com is not a valid domain") - - flags.Set("dns-option-add", "ndots:9") - flags.Set("dns-option-rm", "timeout:3") - - config := &swarm.DNSConfig{ - Nameservers: []string{"3.3.3.3", "5.5.5.5"}, - Search: []string{"localdomain"}, - Options: []string{"timeout:3"}, - } - - updateDNSConfig(flags, &config) - - assert.Equal(t, len(config.Nameservers), 3) - assert.Equal(t, config.Nameservers[0], "1.1.1.1") - assert.Equal(t, config.Nameservers[1], "2001:db8:abc8::1") - assert.Equal(t, config.Nameservers[2], "5.5.5.5") - - assert.Equal(t, len(config.Search), 2) - assert.Equal(t, config.Search[0], "example.com") - assert.Equal(t, config.Search[1], "localdomain") - - assert.Equal(t, len(config.Options), 1) - assert.Equal(t, config.Options[0], "ndots:9") -} - -func TestUpdateMounts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("mount-add", "type=volume,source=vol2,target=/toadd") - flags.Set("mount-rm", "/toremove") - - mounts := []mounttypes.Mount{ - {Target: "/toremove", Source: "vol1", Type: mounttypes.TypeBind}, - {Target: "/tokeep", Source: "vol3", Type: mounttypes.TypeBind}, - } - - updateMounts(flags, &mounts) - assert.Equal(t, len(mounts), 2) - assert.Equal(t, mounts[0].Target, "/toadd") - assert.Equal(t, mounts[1].Target, "/tokeep") - -} - -func TestUpdateMountsWithDuplicateMounts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("mount-add", "type=volume,source=vol4,target=/toadd") - - mounts := []mounttypes.Mount{ - {Target: "/tokeep1", Source: "vol1", Type: mounttypes.TypeBind}, - {Target: "/toadd", Source: "vol2", Type: mounttypes.TypeBind}, - {Target: "/tokeep2", Source: "vol3", Type: mounttypes.TypeBind}, - } - - updateMounts(flags, &mounts) - assert.Equal(t, len(mounts), 3) - assert.Equal(t, mounts[0].Target, "/tokeep1") - assert.Equal(t, mounts[1].Target, "/tokeep2") - assert.Equal(t, mounts[2].Target, "/toadd") -} - -func TestUpdatePorts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "1000:1000") - flags.Set("publish-rm", "333/udp") - - portConfigs := []swarm.PortConfig{ - {TargetPort: 333, Protocol: swarm.PortConfigProtocolUDP}, - {TargetPort: 555}, - } - - err := updatePorts(flags, &portConfigs) - assert.Equal(t, err, nil) - assert.Equal(t, len(portConfigs), 2) - // Do a sort to have the order (might have changed by map) - targetPorts := []int{int(portConfigs[0].TargetPort), int(portConfigs[1].TargetPort)} - sort.Ints(targetPorts) - assert.Equal(t, targetPorts[0], 555) - assert.Equal(t, targetPorts[1], 1000) -} - -func TestUpdatePortsDuplicate(t *testing.T) { - // Test case for #25375 - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "80:80") - - portConfigs := []swarm.PortConfig{ - { - TargetPort: 80, - PublishedPort: 80, - Protocol: swarm.PortConfigProtocolTCP, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - } - - err := updatePorts(flags, &portConfigs) - assert.Equal(t, err, nil) - assert.Equal(t, len(portConfigs), 1) - assert.Equal(t, portConfigs[0].TargetPort, uint32(80)) -} - -func TestUpdateHealthcheckTable(t *testing.T) { - type test struct { - flags [][2]string - initial *container.HealthConfig - expected *container.HealthConfig - err string - } - testCases := []test{ - { - flags: [][2]string{{"no-healthcheck", "true"}}, - initial: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}, Retries: 10}, - expected: &container.HealthConfig{Test: []string{"NONE"}}, - }, - { - flags: [][2]string{{"health-cmd", "cmd1"}}, - initial: &container.HealthConfig{Test: []string{"NONE"}}, - expected: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}}, - }, - { - flags: [][2]string{{"health-retries", "10"}}, - initial: &container.HealthConfig{Test: []string{"NONE"}}, - expected: &container.HealthConfig{Retries: 10}, - }, - { - flags: [][2]string{{"health-retries", "10"}}, - initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, - expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, - }, - { - flags: [][2]string{{"health-interval", "1m"}}, - initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, - expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Interval: time.Minute}, - }, - { - flags: [][2]string{{"health-cmd", ""}}, - initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, - expected: &container.HealthConfig{Retries: 10}, - }, - { - flags: [][2]string{{"health-retries", "0"}}, - initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, - expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, - }, - { - flags: [][2]string{{"health-start-period", "1m"}}, - initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, - expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, StartPeriod: time.Minute}, - }, - { - flags: [][2]string{{"health-cmd", "cmd1"}, {"no-healthcheck", "true"}}, - err: "--no-healthcheck conflicts with --health-* options", - }, - { - flags: [][2]string{{"health-interval", "10m"}, {"no-healthcheck", "true"}}, - err: "--no-healthcheck conflicts with --health-* options", - }, - { - flags: [][2]string{{"health-timeout", "1m"}, {"no-healthcheck", "true"}}, - err: "--no-healthcheck conflicts with --health-* options", - }, - } - for i, c := range testCases { - flags := newUpdateCommand(nil).Flags() - for _, flag := range c.flags { - flags.Set(flag[0], flag[1]) - } - cspec := &swarm.ContainerSpec{ - Healthcheck: c.initial, - } - err := updateHealthcheck(flags, cspec) - if c.err != "" { - assert.Error(t, err, c.err) - } else { - assert.NilError(t, err) - if !reflect.DeepEqual(cspec.Healthcheck, c.expected) { - t.Errorf("incorrect result for test %d, expected health config:\n\t%#v\ngot:\n\t%#v", i, c.expected, cspec.Healthcheck) - } - } - } -} - -func TestUpdateHosts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("host-add", "example.net:2.2.2.2") - flags.Set("host-add", "ipv6.net:2001:db8:abc8::1") - // remove with ipv6 should work - flags.Set("host-rm", "example.net:2001:db8:abc8::1") - // just hostname should work as well - flags.Set("host-rm", "example.net") - // bad format error - assert.Error(t, flags.Set("host-add", "$example.com$"), "bad format for add-host:") - - hosts := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2001:db8:abc8::1 example.net"} - - updateHosts(flags, &hosts) - assert.Equal(t, len(hosts), 3) - assert.Equal(t, hosts[0], "1.2.3.4 example.com") - assert.Equal(t, hosts[1], "2001:db8:abc8::1 ipv6.net") - assert.Equal(t, hosts[2], "4.3.2.1 example.org") -} - -func TestUpdatePortsRmWithProtocol(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "8081:81") - flags.Set("publish-add", "8082:82") - flags.Set("publish-rm", "80") - flags.Set("publish-rm", "81/tcp") - flags.Set("publish-rm", "82/udp") - - portConfigs := []swarm.PortConfig{ - { - TargetPort: 80, - PublishedPort: 8080, - Protocol: swarm.PortConfigProtocolTCP, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - } - - err := updatePorts(flags, &portConfigs) - assert.Equal(t, err, nil) - assert.Equal(t, len(portConfigs), 2) - assert.Equal(t, portConfigs[0].TargetPort, uint32(81)) - assert.Equal(t, portConfigs[1].TargetPort, uint32(82)) -} - -type secretAPIClientMock struct { - listResult []swarm.Secret -} - -func (s secretAPIClientMock) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - return s.listResult, nil -} -func (s secretAPIClientMock) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - return types.SecretCreateResponse{}, nil -} -func (s secretAPIClientMock) SecretRemove(ctx context.Context, id string) error { - return nil -} -func (s secretAPIClientMock) SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) { - return swarm.Secret{}, []byte{}, nil -} -func (s secretAPIClientMock) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - return nil -} - -// TestUpdateSecretUpdateInPlace tests the ability to update the "target" of an secret with "docker service update" -// by combining "--secret-rm" and "--secret-add" for the same secret. -func TestUpdateSecretUpdateInPlace(t *testing.T) { - apiClient := secretAPIClientMock{ - listResult: []swarm.Secret{ - { - ID: "tn9qiblgnuuut11eufquw5dev", - Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "foo"}}, - }, - }, - } - - flags := newUpdateCommand(nil).Flags() - flags.Set("secret-add", "source=foo,target=foo2") - flags.Set("secret-rm", "foo") - - secrets := []*swarm.SecretReference{ - { - File: &swarm.SecretReferenceFileTarget{ - Name: "foo", - UID: "0", - GID: "0", - Mode: 292, - }, - SecretID: "tn9qiblgnuuut11eufquw5dev", - SecretName: "foo", - }, - } - - updatedSecrets, err := getUpdatedSecrets(apiClient, flags, secrets) - - assert.Equal(t, err, nil) - assert.Equal(t, len(updatedSecrets), 1) - assert.Equal(t, updatedSecrets[0].SecretID, "tn9qiblgnuuut11eufquw5dev") - assert.Equal(t, updatedSecrets[0].SecretName, "foo") - assert.Equal(t, updatedSecrets[0].File.Name, "foo2") -} - -func TestUpdateReadOnly(t *testing.T) { - spec := &swarm.ServiceSpec{} - cspec := &spec.TaskTemplate.ContainerSpec - - // Update with --read-only=true, changed to true - flags := newUpdateCommand(nil).Flags() - flags.Set("read-only", "true") - updateService(nil, nil, flags, spec) - assert.Equal(t, cspec.ReadOnly, true) - - // Update without --read-only, no change - flags = newUpdateCommand(nil).Flags() - updateService(nil, nil, flags, spec) - assert.Equal(t, cspec.ReadOnly, true) - - // Update with --read-only=false, changed to false - flags = newUpdateCommand(nil).Flags() - flags.Set("read-only", "false") - updateService(nil, nil, flags, spec) - assert.Equal(t, cspec.ReadOnly, false) -} - -func TestUpdateStopSignal(t *testing.T) { - spec := &swarm.ServiceSpec{} - cspec := &spec.TaskTemplate.ContainerSpec - - // Update with --stop-signal=SIGUSR1 - flags := newUpdateCommand(nil).Flags() - flags.Set("stop-signal", "SIGUSR1") - updateService(nil, nil, flags, spec) - assert.Equal(t, cspec.StopSignal, "SIGUSR1") - - // Update without --stop-signal, no change - flags = newUpdateCommand(nil).Flags() - updateService(nil, nil, flags, spec) - assert.Equal(t, cspec.StopSignal, "SIGUSR1") - - // Update with --stop-signal=SIGWINCH - flags = newUpdateCommand(nil).Flags() - flags.Set("stop-signal", "SIGWINCH") - updateService(nil, nil, flags, spec) - assert.Equal(t, cspec.StopSignal, "SIGWINCH") -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/client_test.go b/fn/vendor/github.com/docker/docker/cli/command/stack/client_test.go deleted file mode 100644 index 0cd8612b6..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/client_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package stack - -import ( - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/compose/convert" - "github.com/docker/docker/client" - "golang.org/x/net/context" -) - -type fakeClient struct { - client.Client - - services []string - networks []string - secrets []string - - removedServices []string - removedNetworks []string - removedSecrets []string - - serviceListFunc func(options types.ServiceListOptions) ([]swarm.Service, error) - networkListFunc func(options types.NetworkListOptions) ([]types.NetworkResource, error) - secretListFunc func(options types.SecretListOptions) ([]swarm.Secret, error) - serviceRemoveFunc func(serviceID string) error - networkRemoveFunc func(networkID string) error - secretRemoveFunc func(secretID string) error -} - -func (cli *fakeClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { - if cli.serviceListFunc != nil { - return cli.serviceListFunc(options) - } - - namespace := namespaceFromFilters(options.Filters) - servicesList := []swarm.Service{} - for _, name := range cli.services { - if belongToNamespace(name, namespace) { - servicesList = append(servicesList, serviceFromName(name)) - } - } - return servicesList, nil -} - -func (cli *fakeClient) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { - if cli.networkListFunc != nil { - return cli.networkListFunc(options) - } - - namespace := namespaceFromFilters(options.Filters) - networksList := []types.NetworkResource{} - for _, name := range cli.networks { - if belongToNamespace(name, namespace) { - networksList = append(networksList, networkFromName(name)) - } - } - return networksList, nil -} - -func (cli *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if cli.secretListFunc != nil { - return cli.secretListFunc(options) - } - - namespace := namespaceFromFilters(options.Filters) - secretsList := []swarm.Secret{} - for _, name := range cli.secrets { - if belongToNamespace(name, namespace) { - secretsList = append(secretsList, secretFromName(name)) - } - } - return secretsList, nil -} - -func (cli *fakeClient) ServiceRemove(ctx context.Context, serviceID string) error { - if cli.serviceRemoveFunc != nil { - return cli.serviceRemoveFunc(serviceID) - } - - cli.removedServices = append(cli.removedServices, serviceID) - return nil -} - -func (cli *fakeClient) NetworkRemove(ctx context.Context, networkID string) error { - if cli.networkRemoveFunc != nil { - return cli.networkRemoveFunc(networkID) - } - - cli.removedNetworks = append(cli.removedNetworks, networkID) - return nil -} - -func (cli *fakeClient) SecretRemove(ctx context.Context, secretID string) error { - if cli.secretRemoveFunc != nil { - return cli.secretRemoveFunc(secretID) - } - - cli.removedSecrets = append(cli.removedSecrets, secretID) - return nil -} - -func serviceFromName(name string) swarm.Service { - return swarm.Service{ - ID: "ID-" + name, - Spec: swarm.ServiceSpec{ - Annotations: swarm.Annotations{Name: name}, - }, - } -} - -func networkFromName(name string) types.NetworkResource { - return types.NetworkResource{ - ID: "ID-" + name, - Name: name, - } -} - -func secretFromName(name string) swarm.Secret { - return swarm.Secret{ - ID: "ID-" + name, - Spec: swarm.SecretSpec{ - Annotations: swarm.Annotations{Name: name}, - }, - } -} - -func namespaceFromFilters(filters filters.Args) string { - label := filters.Get("label")[0] - return strings.TrimPrefix(label, convert.LabelNamespace+"=") -} - -func belongToNamespace(id, namespace string) bool { - return strings.HasPrefix(id, namespace+"_") -} - -func objectName(namespace, name string) string { - return namespace + "_" + name -} - -func objectID(name string) string { - return "ID-" + name -} - -func buildObjectIDs(objectNames []string) []string { - IDs := make([]string, len(objectNames)) - for i, name := range objectNames { - IDs[i] = objectID(name) - } - return IDs -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/stack/cmd.go deleted file mode 100644 index 860bfedd1..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/cmd.go +++ /dev/null @@ -1,35 +0,0 @@ -package stack - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -// NewStackCommand returns a cobra command for `stack` subcommands -func NewStackCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "stack", - Short: "Manage Docker stacks", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - Tags: map[string]string{"version": "1.25"}, - } - cmd.AddCommand( - newDeployCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newServicesCommand(dockerCli), - newPsCommand(dockerCli), - ) - return cmd -} - -// NewTopLevelDeployCommand returns a command for `docker deploy` -func NewTopLevelDeployCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := newDeployCommand(dockerCli) - // Remove the aliases at the top level - cmd.Aliases = []string{} - cmd.Tags = map[string]string{"experimental": "", "version": "1.25"} - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/common.go b/fn/vendor/github.com/docker/docker/cli/command/stack/common.go deleted file mode 100644 index 72719f94f..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/common.go +++ /dev/null @@ -1,60 +0,0 @@ -package stack - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/compose/convert" - "github.com/docker/docker/client" - "github.com/docker/docker/opts" -) - -func getStackFilter(namespace string) filters.Args { - filter := filters.NewArgs() - filter.Add("label", convert.LabelNamespace+"="+namespace) - return filter -} - -func getStackFilterFromOpt(namespace string, opt opts.FilterOpt) filters.Args { - filter := opt.Value() - filter.Add("label", convert.LabelNamespace+"="+namespace) - return filter -} - -func getAllStacksFilter() filters.Args { - filter := filters.NewArgs() - filter.Add("label", convert.LabelNamespace) - return filter -} - -func getServices( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]swarm.Service, error) { - return apiclient.ServiceList( - ctx, - types.ServiceListOptions{Filters: getStackFilter(namespace)}) -} - -func getStackNetworks( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]types.NetworkResource, error) { - return apiclient.NetworkList( - ctx, - types.NetworkListOptions{Filters: getStackFilter(namespace)}) -} - -func getStackSecrets( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]swarm.Secret, error) { - return apiclient.SecretList( - ctx, - types.SecretListOptions{Filters: getStackFilter(namespace)}) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/deploy.go b/fn/vendor/github.com/docker/docker/cli/command/stack/deploy.go deleted file mode 100644 index 678917170..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/deploy.go +++ /dev/null @@ -1,97 +0,0 @@ -package stack - -import ( - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/compose/convert" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -const ( - defaultNetworkDriver = "overlay" -) - -type deployOptions struct { - bundlefile string - composefile string - namespace string - sendRegistryAuth bool - prune bool -} - -func newDeployCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts deployOptions - - cmd := &cobra.Command{ - Use: "deploy [OPTIONS] STACK", - Aliases: []string{"up"}, - Short: "Deploy a new stack or update an existing stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runDeploy(dockerCli, opts) - }, - } - - flags := cmd.Flags() - addBundlefileFlag(&opts.bundlefile, flags) - addComposefileFlag(&opts.composefile, flags) - addRegistryAuthFlag(&opts.sendRegistryAuth, flags) - flags.BoolVar(&opts.prune, "prune", false, "Prune services that are no longer referenced") - flags.SetAnnotation("prune", "version", []string{"1.27"}) - return cmd -} - -func runDeploy(dockerCli *command.DockerCli, opts deployOptions) error { - ctx := context.Background() - - switch { - case opts.bundlefile == "" && opts.composefile == "": - return errors.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).") - case opts.bundlefile != "" && opts.composefile != "": - return errors.Errorf("You cannot specify both a bundle file and a Compose file.") - case opts.bundlefile != "": - return deployBundle(ctx, dockerCli, opts) - default: - return deployCompose(ctx, dockerCli, opts) - } -} - -// checkDaemonIsSwarmManager does an Info API call to verify that the daemon is -// a swarm manager. This is necessary because we must create networks before we -// create services, but the API call for creating a network does not return a -// proper status code when it can't create a network in the "global" scope. -func checkDaemonIsSwarmManager(ctx context.Context, dockerCli *command.DockerCli) error { - info, err := dockerCli.Client().Info(ctx) - if err != nil { - return err - } - if !info.Swarm.ControlAvailable { - return errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") - } - return nil -} - -// pruneServices removes services that are no longer referenced in the source -func pruneServices(ctx context.Context, dockerCli command.Cli, namespace convert.Namespace, services map[string]struct{}) bool { - client := dockerCli.Client() - - oldServices, err := getServices(ctx, client, namespace.Name()) - if err != nil { - fmt.Fprintf(dockerCli.Err(), "Failed to list services: %s", err) - return true - } - - pruneServices := []swarm.Service{} - for _, service := range oldServices { - if _, exists := services[namespace.Descope(service.Spec.Name)]; !exists { - pruneServices = append(pruneServices, service) - } - } - return removeServices(ctx, dockerCli, pruneServices) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go b/fn/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go deleted file mode 100644 index 0f8f8d040..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go +++ /dev/null @@ -1,91 +0,0 @@ -package stack - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/compose/convert" -) - -func deployBundle(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error { - bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) - if err != nil { - return err - } - - if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { - return err - } - - namespace := convert.NewNamespace(opts.namespace) - - if opts.prune { - services := map[string]struct{}{} - for service := range bundle.Services { - services[service] = struct{}{} - } - pruneServices(ctx, dockerCli, namespace, services) - } - - networks := make(map[string]types.NetworkCreate) - for _, service := range bundle.Services { - for _, networkName := range service.Networks { - networks[networkName] = types.NetworkCreate{ - Labels: convert.AddStackLabel(namespace, nil), - } - } - } - - services := make(map[string]swarm.ServiceSpec) - for internalName, service := range bundle.Services { - name := namespace.Scope(internalName) - - var ports []swarm.PortConfig - for _, portSpec := range service.Ports { - ports = append(ports, swarm.PortConfig{ - Protocol: swarm.PortConfigProtocol(portSpec.Protocol), - TargetPort: portSpec.Port, - }) - } - - nets := []swarm.NetworkAttachmentConfig{} - for _, networkName := range service.Networks { - nets = append(nets, swarm.NetworkAttachmentConfig{ - Target: namespace.Scope(networkName), - Aliases: []string{internalName}, - }) - } - - serviceSpec := swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: name, - Labels: convert.AddStackLabel(namespace, service.Labels), - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: service.Image, - Command: service.Command, - Args: service.Args, - Env: service.Env, - // Service Labels will not be copied to Containers - // automatically during the deployment so we apply - // it here. - Labels: convert.AddStackLabel(namespace, nil), - }, - }, - EndpointSpec: &swarm.EndpointSpec{ - Ports: ports, - }, - Networks: nets, - } - - services[internalName] = serviceSpec - } - - if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { - return err - } - return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/deploy_composefile.go b/fn/vendor/github.com/docker/docker/cli/command/stack/deploy_composefile.go deleted file mode 100644 index 10963d184..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/deploy_composefile.go +++ /dev/null @@ -1,315 +0,0 @@ -package stack - -import ( - "fmt" - "io/ioutil" - "os" - "sort" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/compose/convert" - "github.com/docker/docker/cli/compose/loader" - composetypes "github.com/docker/docker/cli/compose/types" - apiclient "github.com/docker/docker/client" - dockerclient "github.com/docker/docker/client" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func deployCompose(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error { - configDetails, err := getConfigDetails(opts) - if err != nil { - return err - } - - config, err := loader.Load(configDetails) - if err != nil { - if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { - return errors.Errorf("Compose file contains unsupported options:\n\n%s\n", - propertyWarnings(fpe.Properties)) - } - - return err - } - - unsupportedProperties := loader.GetUnsupportedProperties(configDetails) - if len(unsupportedProperties) > 0 { - fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n", - strings.Join(unsupportedProperties, ", ")) - } - - deprecatedProperties := loader.GetDeprecatedProperties(configDetails) - if len(deprecatedProperties) > 0 { - fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n", - propertyWarnings(deprecatedProperties)) - } - - if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { - return err - } - - namespace := convert.NewNamespace(opts.namespace) - - if opts.prune { - services := map[string]struct{}{} - for _, service := range config.Services { - services[service.Name] = struct{}{} - } - pruneServices(ctx, dockerCli, namespace, services) - } - - serviceNetworks := getServicesDeclaredNetworks(config.Services) - networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks) - if err := validateExternalNetworks(ctx, dockerCli, externalNetworks); err != nil { - return err - } - if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { - return err - } - - secrets, err := convert.Secrets(namespace, config.Secrets) - if err != nil { - return err - } - if err := createSecrets(ctx, dockerCli, namespace, secrets); err != nil { - return err - } - - services, err := convert.Services(namespace, config, dockerCli.Client()) - if err != nil { - return err - } - return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth) -} - -func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} { - serviceNetworks := map[string]struct{}{} - for _, serviceConfig := range serviceConfigs { - if len(serviceConfig.Networks) == 0 { - serviceNetworks["default"] = struct{}{} - continue - } - for network := range serviceConfig.Networks { - serviceNetworks[network] = struct{}{} - } - } - return serviceNetworks -} - -func propertyWarnings(properties map[string]string) string { - var msgs []string - for name, description := range properties { - msgs = append(msgs, fmt.Sprintf("%s: %s", name, description)) - } - sort.Strings(msgs) - return strings.Join(msgs, "\n\n") -} - -func getConfigDetails(opts deployOptions) (composetypes.ConfigDetails, error) { - var details composetypes.ConfigDetails - var err error - - details.WorkingDir, err = os.Getwd() - if err != nil { - return details, err - } - - configFile, err := getConfigFile(opts.composefile) - if err != nil { - return details, err - } - // TODO: support multiple files - details.ConfigFiles = []composetypes.ConfigFile{*configFile} - details.Environment, err = buildEnvironment(os.Environ()) - if err != nil { - return details, err - } - return details, nil -} - -func buildEnvironment(env []string) (map[string]string, error) { - result := make(map[string]string, len(env)) - for _, s := range env { - // if value is empty, s is like "K=", not "K". - if !strings.Contains(s, "=") { - return result, errors.Errorf("unexpected environment %q", s) - } - kv := strings.SplitN(s, "=", 2) - result[kv[0]] = kv[1] - } - return result, nil -} - -func getConfigFile(filename string) (*composetypes.ConfigFile, error) { - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - config, err := loader.ParseYAML(bytes) - if err != nil { - return nil, err - } - return &composetypes.ConfigFile{ - Filename: filename, - Config: config, - }, nil -} - -func validateExternalNetworks( - ctx context.Context, - dockerCli *command.DockerCli, - externalNetworks []string) error { - client := dockerCli.Client() - - for _, networkName := range externalNetworks { - network, err := client.NetworkInspect(ctx, networkName, false) - if err != nil { - if dockerclient.IsErrNetworkNotFound(err) { - return errors.Errorf("network %q is declared as external, but could not be found. You need to create the network before the stack is deployed (with overlay driver)", networkName) - } - return err - } - if network.Scope != "swarm" { - return errors.Errorf("network %q is declared as external, but it is not in the right scope: %q instead of %q", networkName, network.Scope, "swarm") - } - } - - return nil -} - -func createSecrets( - ctx context.Context, - dockerCli *command.DockerCli, - namespace convert.Namespace, - secrets []swarm.SecretSpec, -) error { - client := dockerCli.Client() - - for _, secretSpec := range secrets { - secret, _, err := client.SecretInspectWithRaw(ctx, secretSpec.Name) - if err == nil { - // secret already exists, then we update that - if err := client.SecretUpdate(ctx, secret.ID, secret.Meta.Version, secretSpec); err != nil { - return err - } - } else if apiclient.IsErrSecretNotFound(err) { - // secret does not exist, then we create a new one. - if _, err := client.SecretCreate(ctx, secretSpec); err != nil { - return err - } - } else { - return err - } - } - return nil -} - -func createNetworks( - ctx context.Context, - dockerCli *command.DockerCli, - namespace convert.Namespace, - networks map[string]types.NetworkCreate, -) error { - client := dockerCli.Client() - - existingNetworks, err := getStackNetworks(ctx, client, namespace.Name()) - if err != nil { - return err - } - - existingNetworkMap := make(map[string]types.NetworkResource) - for _, network := range existingNetworks { - existingNetworkMap[network.Name] = network - } - - for internalName, createOpts := range networks { - name := namespace.Scope(internalName) - if _, exists := existingNetworkMap[name]; exists { - continue - } - - if createOpts.Driver == "" { - createOpts.Driver = defaultNetworkDriver - } - - fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name) - if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil { - return err - } - } - - return nil -} - -func deployServices( - ctx context.Context, - dockerCli *command.DockerCli, - services map[string]swarm.ServiceSpec, - namespace convert.Namespace, - sendAuth bool, -) error { - apiClient := dockerCli.Client() - out := dockerCli.Out() - - existingServices, err := getServices(ctx, apiClient, namespace.Name()) - if err != nil { - return err - } - - existingServiceMap := make(map[string]swarm.Service) - for _, service := range existingServices { - existingServiceMap[service.Spec.Name] = service - } - - for internalName, serviceSpec := range services { - name := namespace.Scope(internalName) - - encodedAuth := "" - if sendAuth { - // Retrieve encoded auth token from the image reference - image := serviceSpec.TaskTemplate.ContainerSpec.Image - encodedAuth, err = command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) - if err != nil { - return err - } - } - - if service, exists := existingServiceMap[name]; exists { - fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID) - - updateOpts := types.ServiceUpdateOptions{} - if sendAuth { - updateOpts.EncodedRegistryAuth = encodedAuth - } - response, err := apiClient.ServiceUpdate( - ctx, - service.ID, - service.Version, - serviceSpec, - updateOpts, - ) - if err != nil { - return err - } - - for _, warning := range response.Warnings { - fmt.Fprintln(dockerCli.Err(), warning) - } - } else { - fmt.Fprintf(out, "Creating service %s\n", name) - - createOpts := types.ServiceCreateOptions{} - if sendAuth { - createOpts.EncodedRegistryAuth = encodedAuth - } - if _, err := apiClient.ServiceCreate(ctx, serviceSpec, createOpts); err != nil { - return err - } - } - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/deploy_test.go b/fn/vendor/github.com/docker/docker/cli/command/stack/deploy_test.go deleted file mode 100644 index 328222af5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/deploy_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package stack - -import ( - "bytes" - "testing" - - "github.com/docker/docker/cli/compose/convert" - "github.com/docker/docker/cli/internal/test" - "github.com/docker/docker/pkg/testutil/assert" - "golang.org/x/net/context" -) - -func TestPruneServices(t *testing.T) { - ctx := context.Background() - namespace := convert.NewNamespace("foo") - services := map[string]struct{}{ - "new": {}, - "keep": {}, - } - client := &fakeClient{services: []string{objectName("foo", "keep"), objectName("foo", "remove")}} - dockerCli := test.NewFakeCli(client, &bytes.Buffer{}) - dockerCli.SetErr(&bytes.Buffer{}) - - pruneServices(ctx, dockerCli, namespace, services) - - assert.DeepEqual(t, client.removedServices, buildObjectIDs([]string{objectName("foo", "remove")})) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/list.go b/fn/vendor/github.com/docker/docker/cli/command/stack/list.go deleted file mode 100644 index f27d5009e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/list.go +++ /dev/null @@ -1,122 +0,0 @@ -package stack - -import ( - "fmt" - "io" - "sort" - "strconv" - "text/tabwriter" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/compose/convert" - "github.com/docker/docker/client" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -const ( - listItemFmt = "%s\t%s\n" -) - -type listOptions struct { -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := listOptions{} - - cmd := &cobra.Command{ - Use: "ls", - Aliases: []string{"list"}, - Short: "List stacks", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - return cmd -} - -func runList(dockerCli *command.DockerCli, opts listOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - stacks, err := getStacks(ctx, client) - if err != nil { - return err - } - - out := dockerCli.Out() - printTable(out, stacks) - return nil -} - -type byName []*stack - -func (n byName) Len() int { return len(n) } -func (n byName) Swap(i, j int) { n[i], n[j] = n[j], n[i] } -func (n byName) Less(i, j int) bool { return n[i].Name < n[j].Name } - -func printTable(out io.Writer, stacks []*stack) { - writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - - sort.Sort(byName(stacks)) - - fmt.Fprintf(writer, listItemFmt, "NAME", "SERVICES") - for _, stack := range stacks { - fmt.Fprintf( - writer, - listItemFmt, - stack.Name, - strconv.Itoa(stack.Services), - ) - } -} - -type stack struct { - // Name is the name of the stack - Name string - // Services is the number of the services - Services int -} - -func getStacks( - ctx context.Context, - apiclient client.APIClient, -) ([]*stack, error) { - services, err := apiclient.ServiceList( - ctx, - types.ServiceListOptions{Filters: getAllStacksFilter()}) - if err != nil { - return nil, err - } - m := make(map[string]*stack, 0) - for _, service := range services { - labels := service.Spec.Labels - name, ok := labels[convert.LabelNamespace] - if !ok { - return nil, errors.Errorf("cannot get label %s for service %s", - convert.LabelNamespace, service.ID) - } - ztack, ok := m[name] - if !ok { - m[name] = &stack{ - Name: name, - Services: 1, - } - } else { - ztack.Services++ - } - } - var stacks []*stack - for _, stack := range m { - stacks = append(stacks, stack) - } - return stacks, nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/opts.go b/fn/vendor/github.com/docker/docker/cli/command/stack/opts.go deleted file mode 100644 index 0d7214e96..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/opts.go +++ /dev/null @@ -1,51 +0,0 @@ -package stack - -import ( - "fmt" - "io" - "os" - - "github.com/docker/docker/cli/command/bundlefile" - "github.com/pkg/errors" - "github.com/spf13/pflag" -) - -func addComposefileFlag(opt *string, flags *pflag.FlagSet) { - flags.StringVarP(opt, "compose-file", "c", "", "Path to a Compose file") - flags.SetAnnotation("compose-file", "version", []string{"1.25"}) -} - -func addBundlefileFlag(opt *string, flags *pflag.FlagSet) { - flags.StringVar(opt, "bundle-file", "", "Path to a Distributed Application Bundle file") - flags.SetAnnotation("bundle-file", "experimental", nil) -} - -func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) { - flags.BoolVar(opt, "with-registry-auth", false, "Send registry authentication details to Swarm agents") -} - -func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) { - defaultPath := fmt.Sprintf("%s.dab", namespace) - - if path == "" { - path = defaultPath - } - if _, err := os.Stat(path); err != nil { - return nil, errors.Errorf( - "Bundle %s not found. Specify the path with --file", - path) - } - - fmt.Fprintf(stderr, "Loading bundle from %s\n", path) - reader, err := os.Open(path) - if err != nil { - return nil, err - } - defer reader.Close() - - bundle, err := bundlefile.LoadFile(reader) - if err != nil { - return nil, errors.Errorf("Error reading %s: %v\n", path, err) - } - return bundle, err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/ps.go b/fn/vendor/github.com/docker/docker/cli/command/stack/ps.go deleted file mode 100644 index bac5307bd..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/ps.go +++ /dev/null @@ -1,76 +0,0 @@ -package stack - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/cli/command/idresolver" - "github.com/docker/docker/cli/command/task" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type psOptions struct { - filter opts.FilterOpt - noTrunc bool - namespace string - noResolve bool - quiet bool - format string -} - -func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS] STACK", - Short: "List the tasks in the stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runPS(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display task IDs") - flags.StringVar(&opts.format, "format", "", "Pretty-print tasks using a Go template") - - return cmd -} - -func runPS(dockerCli *command.DockerCli, opts psOptions) error { - namespace := opts.namespace - client := dockerCli.Client() - ctx := context.Background() - - filter := getStackFilterFromOpt(opts.namespace, opts.filter) - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) - if err != nil { - return err - } - - if len(tasks) == 0 { - fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) - return nil - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().TasksFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().TasksFormat - } else { - format = formatter.TableFormatKey - } - } - - return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), !opts.noTrunc, opts.quiet, format) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/remove.go b/fn/vendor/github.com/docker/docker/cli/command/stack/remove.go deleted file mode 100644 index 7df4e4c0e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/remove.go +++ /dev/null @@ -1,121 +0,0 @@ -package stack - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type removeOptions struct { - namespaces []string -} - -func newRemoveCommand(dockerCli command.Cli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rm STACK [STACK...]", - Aliases: []string{"remove", "down"}, - Short: "Remove one or more stacks", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespaces = args - return runRemove(dockerCli, opts) - }, - } - return cmd -} - -func runRemove(dockerCli command.Cli, opts removeOptions) error { - namespaces := opts.namespaces - client := dockerCli.Client() - ctx := context.Background() - - var errs []string - for _, namespace := range namespaces { - services, err := getServices(ctx, client, namespace) - if err != nil { - return err - } - - networks, err := getStackNetworks(ctx, client, namespace) - if err != nil { - return err - } - - secrets, err := getStackSecrets(ctx, client, namespace) - if err != nil { - return err - } - - if len(services)+len(networks)+len(secrets) == 0 { - fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) - continue - } - - hasError := removeServices(ctx, dockerCli, services) - hasError = removeSecrets(ctx, dockerCli, secrets) || hasError - hasError = removeNetworks(ctx, dockerCli, networks) || hasError - - if hasError { - errs = append(errs, fmt.Sprintf("Failed to remove some resources from stack: %s", namespace)) - } - } - - if len(errs) > 0 { - return errors.Errorf(strings.Join(errs, "\n")) - } - return nil -} - -func removeServices( - ctx context.Context, - dockerCli command.Cli, - services []swarm.Service, -) bool { - var err error - for _, service := range services { - fmt.Fprintf(dockerCli.Err(), "Removing service %s\n", service.Spec.Name) - if err = dockerCli.Client().ServiceRemove(ctx, service.ID); err != nil { - fmt.Fprintf(dockerCli.Err(), "Failed to remove service %s: %s", service.ID, err) - } - } - return err != nil -} - -func removeNetworks( - ctx context.Context, - dockerCli command.Cli, - networks []types.NetworkResource, -) bool { - var err error - for _, network := range networks { - fmt.Fprintf(dockerCli.Err(), "Removing network %s\n", network.Name) - if err = dockerCli.Client().NetworkRemove(ctx, network.ID); err != nil { - fmt.Fprintf(dockerCli.Err(), "Failed to remove network %s: %s", network.ID, err) - } - } - return err != nil -} - -func removeSecrets( - ctx context.Context, - dockerCli command.Cli, - secrets []swarm.Secret, -) bool { - var err error - for _, secret := range secrets { - fmt.Fprintf(dockerCli.Err(), "Removing secret %s\n", secret.Spec.Name) - if err = dockerCli.Client().SecretRemove(ctx, secret.ID); err != nil { - fmt.Fprintf(dockerCli.Err(), "Failed to remove secret %s: %s", secret.ID, err) - } - } - return err != nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/remove_test.go b/fn/vendor/github.com/docker/docker/cli/command/stack/remove_test.go deleted file mode 100644 index 7f64fb550..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/remove_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package stack - -import ( - "bytes" - "errors" - "strings" - "testing" - - "github.com/docker/docker/cli/internal/test" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestRemoveStack(t *testing.T) { - allServices := []string{ - objectName("foo", "service1"), - objectName("foo", "service2"), - objectName("bar", "service1"), - objectName("bar", "service2"), - } - allServicesIDs := buildObjectIDs(allServices) - - allNetworks := []string{ - objectName("foo", "network1"), - objectName("bar", "network1"), - } - allNetworksIDs := buildObjectIDs(allNetworks) - - allSecrets := []string{ - objectName("foo", "secret1"), - objectName("foo", "secret2"), - objectName("bar", "secret1"), - } - allSecretsIDs := buildObjectIDs(allSecrets) - - cli := &fakeClient{ - services: allServices, - networks: allNetworks, - secrets: allSecrets, - } - cmd := newRemoveCommand(test.NewFakeCli(cli, &bytes.Buffer{})) - cmd.SetArgs([]string{"foo", "bar"}) - - assert.NilError(t, cmd.Execute()) - assert.DeepEqual(t, cli.removedServices, allServicesIDs) - assert.DeepEqual(t, cli.removedNetworks, allNetworksIDs) - assert.DeepEqual(t, cli.removedSecrets, allSecretsIDs) -} - -func TestSkipEmptyStack(t *testing.T) { - buf := new(bytes.Buffer) - allServices := []string{objectName("bar", "service1"), objectName("bar", "service2")} - allServicesIDs := buildObjectIDs(allServices) - - allNetworks := []string{objectName("bar", "network1")} - allNetworksIDs := buildObjectIDs(allNetworks) - - allSecrets := []string{objectName("bar", "secret1")} - allSecretsIDs := buildObjectIDs(allSecrets) - - cli := &fakeClient{ - services: allServices, - networks: allNetworks, - secrets: allSecrets, - } - cmd := newRemoveCommand(test.NewFakeCli(cli, buf)) - cmd.SetArgs([]string{"foo", "bar"}) - - assert.NilError(t, cmd.Execute()) - assert.Contains(t, buf.String(), "Nothing found in stack: foo") - assert.DeepEqual(t, cli.removedServices, allServicesIDs) - assert.DeepEqual(t, cli.removedNetworks, allNetworksIDs) - assert.DeepEqual(t, cli.removedSecrets, allSecretsIDs) -} - -func TestContinueAfterError(t *testing.T) { - allServices := []string{objectName("foo", "service1"), objectName("bar", "service1")} - allServicesIDs := buildObjectIDs(allServices) - - allNetworks := []string{objectName("foo", "network1"), objectName("bar", "network1")} - allNetworksIDs := buildObjectIDs(allNetworks) - - allSecrets := []string{objectName("foo", "secret1"), objectName("bar", "secret1")} - allSecretsIDs := buildObjectIDs(allSecrets) - - removedServices := []string{} - cli := &fakeClient{ - services: allServices, - networks: allNetworks, - secrets: allSecrets, - - serviceRemoveFunc: func(serviceID string) error { - removedServices = append(removedServices, serviceID) - - if strings.Contains(serviceID, "foo") { - return errors.New("") - } - return nil - }, - } - cmd := newRemoveCommand(test.NewFakeCli(cli, &bytes.Buffer{})) - cmd.SetArgs([]string{"foo", "bar"}) - - assert.Error(t, cmd.Execute(), "Failed to remove some resources from stack: foo") - assert.DeepEqual(t, removedServices, allServicesIDs) - assert.DeepEqual(t, cli.removedNetworks, allNetworksIDs) - assert.DeepEqual(t, cli.removedSecrets, allSecretsIDs) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/stack/services.go b/fn/vendor/github.com/docker/docker/cli/command/stack/services.go deleted file mode 100644 index 78ddd399c..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/stack/services.go +++ /dev/null @@ -1,97 +0,0 @@ -package stack - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/cli/command/service" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type servicesOptions struct { - quiet bool - format string - filter opts.FilterOpt - namespace string -} - -func newServicesCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := servicesOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "services [OPTIONS] STACK", - Short: "List the services in the stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runServices(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.StringVar(&opts.format, "format", "", "Pretty-print services using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runServices(dockerCli *command.DockerCli, opts servicesOptions) error { - ctx := context.Background() - client := dockerCli.Client() - - filter := getStackFilterFromOpt(opts.namespace, opts.filter) - services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: filter}) - if err != nil { - return err - } - - out := dockerCli.Out() - - // if no services in this stack, print message and exit 0 - if len(services) == 0 { - fmt.Fprintf(out, "Nothing found in stack: %s\n", opts.namespace) - return nil - } - - info := map[string]formatter.ServiceListInfo{} - if !opts.quiet { - taskFilter := filters.NewArgs() - for _, service := range services { - taskFilter.Add("service", service.ID) - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) - if err != nil { - return err - } - - nodes, err := client.NodeList(ctx, types.NodeListOptions{}) - if err != nil { - return err - } - - info = service.GetServicesStatus(services, nodes, tasks) - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().ServicesFormat - } else { - format = formatter.TableFormatKey - } - } - - servicesCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewServiceListFormat(format, opts.quiet), - } - return formatter.ServiceListWrite(servicesCtx, services, info) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/client_test.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/client_test.go deleted file mode 100644 index 1d42b9499..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/client_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package swarm - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "golang.org/x/net/context" -) - -type fakeClient struct { - client.Client - infoFunc func() (types.Info, error) - swarmInitFunc func() (string, error) - swarmInspectFunc func() (swarm.Swarm, error) - nodeInspectFunc func() (swarm.Node, []byte, error) - swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) - swarmJoinFunc func() error - swarmLeaveFunc func() error - swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error - swarmUnlockFunc func(req swarm.UnlockRequest) error -} - -func (cli *fakeClient) Info(ctx context.Context) (types.Info, error) { - if cli.infoFunc != nil { - return cli.infoFunc() - } - return types.Info{}, nil -} - -func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { - if cli.nodeInspectFunc != nil { - return cli.nodeInspectFunc() - } - return swarm.Node{}, []byte{}, nil -} - -func (cli *fakeClient) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - if cli.swarmInitFunc != nil { - return cli.swarmInitFunc() - } - return "", nil -} - -func (cli *fakeClient) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - if cli.swarmInspectFunc != nil { - return cli.swarmInspectFunc() - } - return swarm.Swarm{}, nil -} - -func (cli *fakeClient) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { - if cli.swarmGetUnlockKeyFunc != nil { - return cli.swarmGetUnlockKeyFunc() - } - return types.SwarmUnlockKeyResponse{}, nil -} - -func (cli *fakeClient) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { - if cli.swarmJoinFunc != nil { - return cli.swarmJoinFunc() - } - return nil -} - -func (cli *fakeClient) SwarmLeave(ctx context.Context, force bool) error { - if cli.swarmLeaveFunc != nil { - return cli.swarmLeaveFunc() - } - return nil -} - -func (cli *fakeClient) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { - if cli.swarmUpdateFunc != nil { - return cli.swarmUpdateFunc(swarm, flags) - } - return nil -} - -func (cli *fakeClient) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { - if cli.swarmUnlockFunc != nil { - return cli.swarmUnlockFunc(req) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/cmd.go deleted file mode 100644 index 659dbcdf7..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/cmd.go +++ /dev/null @@ -1,29 +0,0 @@ -package swarm - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewSwarmCommand returns a cobra command for `swarm` subcommands -func NewSwarmCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "swarm", - Short: "Manage Swarm", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - Tags: map[string]string{"version": "1.24"}, - } - cmd.AddCommand( - newInitCommand(dockerCli), - newJoinCommand(dockerCli), - newJoinTokenCommand(dockerCli), - newUnlockKeyCommand(dockerCli), - newUpdateCommand(dockerCli), - newLeaveCommand(dockerCli), - newUnlockCommand(dockerCli), - ) - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/init.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/init.go deleted file mode 100644 index 37d96de11..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/init.go +++ /dev/null @@ -1,96 +0,0 @@ -package swarm - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type initOptions struct { - swarmOptions - listenAddr NodeAddrOption - // Not a NodeAddrOption because it has no default port. - advertiseAddr string - forceNewCluster bool - availability string -} - -func newInitCommand(dockerCli command.Cli) *cobra.Command { - opts := initOptions{ - listenAddr: NewListenAddrOption(), - } - - cmd := &cobra.Command{ - Use: "init [OPTIONS]", - Short: "Initialize a swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runInit(dockerCli, cmd.Flags(), opts) - }, - } - - flags := cmd.Flags() - flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") - flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") - flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state") - flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)") - flags.StringVar(&opts.availability, flagAvailability, "active", `Availability of the node ("active"|"pause"|"drain")`) - addSwarmFlags(flags, &opts.swarmOptions) - return cmd -} - -func runInit(dockerCli command.Cli, flags *pflag.FlagSet, opts initOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - req := swarm.InitRequest{ - ListenAddr: opts.listenAddr.String(), - AdvertiseAddr: opts.advertiseAddr, - ForceNewCluster: opts.forceNewCluster, - Spec: opts.swarmOptions.ToSpec(flags), - AutoLockManagers: opts.swarmOptions.autolock, - } - if flags.Changed(flagAvailability) { - availability := swarm.NodeAvailability(strings.ToLower(opts.availability)) - switch availability { - case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain: - req.Availability = availability - default: - return errors.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) - } - } - - nodeID, err := client.SwarmInit(ctx, req) - if err != nil { - if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") { - return errors.New(err.Error() + " - specify one with --advertise-addr") - } - return err - } - - fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) - - if err := printJoinCommand(ctx, dockerCli, nodeID, true, false); err != nil { - return err - } - - fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n") - - if req.AutoLockManagers { - unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) - if err != nil { - return errors.Wrap(err, "could not fetch unlock key") - } - printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/init_test.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/init_test.go deleted file mode 100644 index c21433bdb..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/init_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package swarm - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" - "github.com/pkg/errors" -) - -func TestSwarmInitErrorOnAPIFailure(t *testing.T) { - testCases := []struct { - name string - flags map[string]string - swarmInitFunc func() (string, error) - swarmInspectFunc func() (swarm.Swarm, error) - swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) - nodeInspectFunc func() (swarm.Node, []byte, error) - expectedError string - }{ - { - name: "init-failed", - swarmInitFunc: func() (string, error) { - return "", errors.Errorf("error initializing the swarm") - }, - expectedError: "error initializing the swarm", - }, - { - name: "init-failed-with-ip-choice", - swarmInitFunc: func() (string, error) { - return "", errors.Errorf("could not choose an IP address to advertise") - }, - expectedError: "could not choose an IP address to advertise - specify one with --advertise-addr", - }, - { - name: "swarm-inspect-after-init-failed", - swarmInspectFunc: func() (swarm.Swarm, error) { - return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") - }, - expectedError: "error inspecting the swarm", - }, - { - name: "node-inspect-after-init-failed", - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") - }, - expectedError: "error inspecting the node", - }, - { - name: "swarm-get-unlock-key-after-init-failed", - flags: map[string]string{ - flagAutolock: "true", - }, - swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { - return types.SwarmUnlockKeyResponse{}, errors.Errorf("error getting swarm unlock key") - }, - expectedError: "could not fetch unlock key: error getting swarm unlock key", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newInitCommand( - test.NewFakeCli(&fakeClient{ - swarmInitFunc: tc.swarmInitFunc, - swarmInspectFunc: tc.swarmInspectFunc, - swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, - nodeInspectFunc: tc.nodeInspectFunc, - }, buf)) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSwarmInit(t *testing.T) { - testCases := []struct { - name string - flags map[string]string - swarmInitFunc func() (string, error) - swarmInspectFunc func() (swarm.Swarm, error) - swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) - nodeInspectFunc func() (swarm.Node, []byte, error) - }{ - { - name: "init", - swarmInitFunc: func() (string, error) { - return "nodeID", nil - }, - }, - { - name: "init-autolock", - flags: map[string]string{ - flagAutolock: "true", - }, - swarmInitFunc: func() (string, error) { - return "nodeID", nil - }, - swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { - return types.SwarmUnlockKeyResponse{ - UnlockKey: "unlock-key", - }, nil - }, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newInitCommand( - test.NewFakeCli(&fakeClient{ - swarmInitFunc: tc.swarmInitFunc, - swarmInspectFunc: tc.swarmInspectFunc, - swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, - nodeInspectFunc: tc.nodeInspectFunc, - }, buf)) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), fmt.Sprintf("init-%s.golden", tc.name)) - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/join.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/join.go deleted file mode 100644 index 873eaaefa..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/join.go +++ /dev/null @@ -1,85 +0,0 @@ -package swarm - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type joinOptions struct { - remote string - listenAddr NodeAddrOption - // Not a NodeAddrOption because it has no default port. - advertiseAddr string - token string - availability string -} - -func newJoinCommand(dockerCli command.Cli) *cobra.Command { - opts := joinOptions{ - listenAddr: NewListenAddrOption(), - } - - cmd := &cobra.Command{ - Use: "join [OPTIONS] HOST:PORT", - Short: "Join a swarm as a node and/or manager", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.remote = args[0] - return runJoin(dockerCli, cmd.Flags(), opts) - }, - } - - flags := cmd.Flags() - flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") - flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") - flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm") - flags.StringVar(&opts.availability, flagAvailability, "active", `Availability of the node ("active"|"pause"|"drain")`) - return cmd -} - -func runJoin(dockerCli command.Cli, flags *pflag.FlagSet, opts joinOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - req := swarm.JoinRequest{ - JoinToken: opts.token, - ListenAddr: opts.listenAddr.String(), - AdvertiseAddr: opts.advertiseAddr, - RemoteAddrs: []string{opts.remote}, - } - if flags.Changed(flagAvailability) { - availability := swarm.NodeAvailability(strings.ToLower(opts.availability)) - switch availability { - case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain: - req.Availability = availability - default: - return errors.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) - } - } - - err := client.SwarmJoin(ctx, req) - if err != nil { - return err - } - - info, err := client.Info(ctx) - if err != nil { - return err - } - - if info.Swarm.ControlAvailable { - fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.") - } else { - fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.") - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/join_test.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/join_test.go deleted file mode 100644 index 6d92f0c4f..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/join_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package swarm - -import ( - "bytes" - "io/ioutil" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/pkg/errors" -) - -func TestSwarmJoinErrors(t *testing.T) { - testCases := []struct { - name string - args []string - swarmJoinFunc func() error - infoFunc func() (types.Info, error) - expectedError string - }{ - { - name: "not-enough-args", - expectedError: "requires exactly 1 argument", - }, - { - name: "too-many-args", - args: []string{"remote1", "remote2"}, - expectedError: "requires exactly 1 argument", - }, - { - name: "join-failed", - args: []string{"remote"}, - swarmJoinFunc: func() error { - return errors.Errorf("error joining the swarm") - }, - expectedError: "error joining the swarm", - }, - { - name: "join-failed-on-init", - args: []string{"remote"}, - infoFunc: func() (types.Info, error) { - return types.Info{}, errors.Errorf("error asking for node info") - }, - expectedError: "error asking for node info", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newJoinCommand( - test.NewFakeCli(&fakeClient{ - swarmJoinFunc: tc.swarmJoinFunc, - infoFunc: tc.infoFunc, - }, buf)) - cmd.SetArgs(tc.args) - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSwarmJoin(t *testing.T) { - testCases := []struct { - name string - infoFunc func() (types.Info, error) - expected string - }{ - { - name: "join-as-manager", - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - ControlAvailable: true, - }, - }, nil - }, - expected: "This node joined a swarm as a manager.", - }, - { - name: "join-as-worker", - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - ControlAvailable: false, - }, - }, nil - }, - expected: "This node joined a swarm as a worker.", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newJoinCommand( - test.NewFakeCli(&fakeClient{ - infoFunc: tc.infoFunc, - }, buf)) - cmd.SetArgs([]string{"remote"}) - assert.NilError(t, cmd.Execute()) - assert.Equal(t, strings.TrimSpace(buf.String()), tc.expected) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/join_token.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/join_token.go deleted file mode 100644 index dc69e909e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/join_token.go +++ /dev/null @@ -1,119 +0,0 @@ -package swarm - -import ( - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type joinTokenOptions struct { - role string - rotate bool - quiet bool -} - -func newJoinTokenCommand(dockerCli command.Cli) *cobra.Command { - opts := joinTokenOptions{} - - cmd := &cobra.Command{ - Use: "join-token [OPTIONS] (worker|manager)", - Short: "Manage join tokens", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.role = args[0] - return runJoinToken(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&opts.rotate, flagRotate, false, "Rotate join token") - flags.BoolVarP(&opts.quiet, flagQuiet, "q", false, "Only display token") - - return cmd -} - -func runJoinToken(dockerCli command.Cli, opts joinTokenOptions) error { - worker := opts.role == "worker" - manager := opts.role == "manager" - - if !worker && !manager { - return errors.New("unknown role " + opts.role) - } - - client := dockerCli.Client() - ctx := context.Background() - - if opts.rotate { - flags := swarm.UpdateFlags{ - RotateWorkerToken: worker, - RotateManagerToken: manager, - } - - sw, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - if err := client.SwarmUpdate(ctx, sw.Version, sw.Spec, flags); err != nil { - return err - } - - if !opts.quiet { - fmt.Fprintf(dockerCli.Out(), "Successfully rotated %s join token.\n\n", opts.role) - } - } - - // second SwarmInspect in this function, - // this is necessary since SwarmUpdate after first changes the join tokens - sw, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - if opts.quiet && worker { - fmt.Fprintln(dockerCli.Out(), sw.JoinTokens.Worker) - return nil - } - - if opts.quiet && manager { - fmt.Fprintln(dockerCli.Out(), sw.JoinTokens.Manager) - return nil - } - - info, err := client.Info(ctx) - if err != nil { - return err - } - - return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, worker, manager) -} - -func printJoinCommand(ctx context.Context, dockerCli command.Cli, nodeID string, worker bool, manager bool) error { - client := dockerCli.Client() - - node, _, err := client.NodeInspectWithRaw(ctx, nodeID) - if err != nil { - return err - } - - sw, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - if node.ManagerStatus != nil { - if worker { - fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", sw.JoinTokens.Worker, node.ManagerStatus.Addr) - } - if manager { - fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", sw.JoinTokens.Manager, node.ManagerStatus.Addr) - } - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/join_token_test.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/join_token_test.go deleted file mode 100644 index 9b10369ad..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/join_token_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package swarm - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" -) - -func TestSwarmJoinTokenErrors(t *testing.T) { - testCases := []struct { - name string - args []string - flags map[string]string - infoFunc func() (types.Info, error) - swarmInspectFunc func() (swarm.Swarm, error) - swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error - nodeInspectFunc func() (swarm.Node, []byte, error) - expectedError string - }{ - { - name: "not-enough-args", - expectedError: "requires exactly 1 argument", - }, - { - name: "too-many-args", - args: []string{"worker", "manager"}, - expectedError: "requires exactly 1 argument", - }, - { - name: "invalid-args", - args: []string{"foo"}, - expectedError: "unknown role foo", - }, - { - name: "swarm-inspect-failed", - args: []string{"worker"}, - swarmInspectFunc: func() (swarm.Swarm, error) { - return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") - }, - expectedError: "error inspecting the swarm", - }, - { - name: "swarm-inspect-rotate-failed", - args: []string{"worker"}, - flags: map[string]string{ - flagRotate: "true", - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") - }, - expectedError: "error inspecting the swarm", - }, - { - name: "swarm-update-failed", - args: []string{"worker"}, - flags: map[string]string{ - flagRotate: "true", - }, - swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { - return errors.Errorf("error updating the swarm") - }, - expectedError: "error updating the swarm", - }, - { - name: "node-inspect-failed", - args: []string{"worker"}, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node") - }, - expectedError: "error inspecting node", - }, - { - name: "info-failed", - args: []string{"worker"}, - infoFunc: func() (types.Info, error) { - return types.Info{}, errors.Errorf("error asking for node info") - }, - expectedError: "error asking for node info", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newJoinTokenCommand( - test.NewFakeCli(&fakeClient{ - swarmInspectFunc: tc.swarmInspectFunc, - swarmUpdateFunc: tc.swarmUpdateFunc, - infoFunc: tc.infoFunc, - nodeInspectFunc: tc.nodeInspectFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSwarmJoinToken(t *testing.T) { - testCases := []struct { - name string - args []string - flags map[string]string - infoFunc func() (types.Info, error) - swarmInspectFunc func() (swarm.Swarm, error) - nodeInspectFunc func() (swarm.Node, []byte, error) - }{ - { - name: "worker", - args: []string{"worker"}, - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - NodeID: "nodeID", - }, - }, nil - }, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(Manager()), []byte{}, nil - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(), nil - }, - }, - { - name: "manager", - args: []string{"manager"}, - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - NodeID: "nodeID", - }, - }, nil - }, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(Manager()), []byte{}, nil - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(), nil - }, - }, - { - name: "manager-rotate", - args: []string{"manager"}, - flags: map[string]string{ - flagRotate: "true", - }, - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - NodeID: "nodeID", - }, - }, nil - }, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(Manager()), []byte{}, nil - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(), nil - }, - }, - { - name: "worker-quiet", - args: []string{"worker"}, - flags: map[string]string{ - flagQuiet: "true", - }, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(Manager()), []byte{}, nil - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(), nil - }, - }, - { - name: "manager-quiet", - args: []string{"manager"}, - flags: map[string]string{ - flagQuiet: "true", - }, - nodeInspectFunc: func() (swarm.Node, []byte, error) { - return *Node(Manager()), []byte{}, nil - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(), nil - }, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newJoinTokenCommand( - test.NewFakeCli(&fakeClient{ - swarmInspectFunc: tc.swarmInspectFunc, - infoFunc: tc.infoFunc, - nodeInspectFunc: tc.nodeInspectFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), fmt.Sprintf("jointoken-%s.golden", tc.name)) - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/leave.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/leave.go deleted file mode 100644 index 128ed46d8..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/leave.go +++ /dev/null @@ -1,44 +0,0 @@ -package swarm - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type leaveOptions struct { - force bool -} - -func newLeaveCommand(dockerCli command.Cli) *cobra.Command { - opts := leaveOptions{} - - cmd := &cobra.Command{ - Use: "leave [OPTIONS]", - Short: "Leave the swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runLeave(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force this node to leave the swarm, ignoring warnings") - return cmd -} - -func runLeave(dockerCli command.Cli, opts leaveOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - if err := client.SwarmLeave(ctx, opts.force); err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), "Node left the swarm.") - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/leave_test.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/leave_test.go deleted file mode 100644 index 93a58887a..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/leave_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package swarm - -import ( - "bytes" - "io/ioutil" - "strings" - "testing" - - "github.com/docker/docker/cli/internal/test" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/pkg/errors" -) - -func TestSwarmLeaveErrors(t *testing.T) { - testCases := []struct { - name string - args []string - swarmLeaveFunc func() error - expectedError string - }{ - { - name: "too-many-args", - args: []string{"foo"}, - expectedError: "accepts no argument(s)", - }, - { - name: "leave-failed", - swarmLeaveFunc: func() error { - return errors.Errorf("error leaving the swarm") - }, - expectedError: "error leaving the swarm", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newLeaveCommand( - test.NewFakeCli(&fakeClient{ - swarmLeaveFunc: tc.swarmLeaveFunc, - }, buf)) - cmd.SetArgs(tc.args) - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSwarmLeave(t *testing.T) { - buf := new(bytes.Buffer) - cmd := newLeaveCommand( - test.NewFakeCli(&fakeClient{}, buf)) - assert.NilError(t, cmd.Execute()) - assert.Equal(t, strings.TrimSpace(buf.String()), "Node left the swarm.") -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/opts.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/opts.go deleted file mode 100644 index 6eddddcca..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/opts.go +++ /dev/null @@ -1,212 +0,0 @@ -package swarm - -import ( - "encoding/csv" - "fmt" - "strings" - "time" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/opts" - "github.com/pkg/errors" - "github.com/spf13/pflag" -) - -const ( - defaultListenAddr = "0.0.0.0:2377" - - flagCertExpiry = "cert-expiry" - flagDispatcherHeartbeat = "dispatcher-heartbeat" - flagListenAddr = "listen-addr" - flagAdvertiseAddr = "advertise-addr" - flagQuiet = "quiet" - flagRotate = "rotate" - flagToken = "token" - flagTaskHistoryLimit = "task-history-limit" - flagExternalCA = "external-ca" - flagMaxSnapshots = "max-snapshots" - flagSnapshotInterval = "snapshot-interval" - flagLockKey = "lock-key" - flagAutolock = "autolock" - flagAvailability = "availability" -) - -type swarmOptions struct { - taskHistoryLimit int64 - dispatcherHeartbeat time.Duration - nodeCertExpiry time.Duration - externalCA ExternalCAOption - maxSnapshots uint64 - snapshotInterval uint64 - autolock bool -} - -// NodeAddrOption is a pflag.Value for listening addresses -type NodeAddrOption struct { - addr string -} - -// String prints the representation of this flag -func (a *NodeAddrOption) String() string { - return a.Value() -} - -// Set the value for this flag -func (a *NodeAddrOption) Set(value string) error { - addr, err := opts.ParseTCPAddr(value, a.addr) - if err != nil { - return err - } - a.addr = addr - return nil -} - -// Type returns the type of this flag -func (a *NodeAddrOption) Type() string { - return "node-addr" -} - -// Value returns the value of this option as addr:port -func (a *NodeAddrOption) Value() string { - return strings.TrimPrefix(a.addr, "tcp://") -} - -// NewNodeAddrOption returns a new node address option -func NewNodeAddrOption(addr string) NodeAddrOption { - return NodeAddrOption{addr} -} - -// NewListenAddrOption returns a NodeAddrOption with default values -func NewListenAddrOption() NodeAddrOption { - return NewNodeAddrOption(defaultListenAddr) -} - -// ExternalCAOption is a Value type for parsing external CA specifications. -type ExternalCAOption struct { - values []*swarm.ExternalCA -} - -// Set parses an external CA option. -func (m *ExternalCAOption) Set(value string) error { - parsed, err := parseExternalCA(value) - if err != nil { - return err - } - - m.values = append(m.values, parsed) - return nil -} - -// Type returns the type of this option. -func (m *ExternalCAOption) Type() string { - return "external-ca" -} - -// String returns a string repr of this option. -func (m *ExternalCAOption) String() string { - externalCAs := []string{} - for _, externalCA := range m.values { - repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL) - externalCAs = append(externalCAs, repr) - } - return strings.Join(externalCAs, ", ") -} - -// Value returns the external CAs -func (m *ExternalCAOption) Value() []*swarm.ExternalCA { - return m.values -} - -// parseExternalCA parses an external CA specification from the command line, -// such as protocol=cfssl,url=https://example.com. -func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) { - csvReader := csv.NewReader(strings.NewReader(caSpec)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - - externalCA := swarm.ExternalCA{ - Options: make(map[string]string), - } - - var ( - hasProtocol bool - hasURL bool - ) - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - - if len(parts) != 2 { - return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) - } - - key, value := parts[0], parts[1] - - switch strings.ToLower(key) { - case "protocol": - hasProtocol = true - if strings.ToLower(value) == string(swarm.ExternalCAProtocolCFSSL) { - externalCA.Protocol = swarm.ExternalCAProtocolCFSSL - } else { - return nil, errors.Errorf("unrecognized external CA protocol %s", value) - } - case "url": - hasURL = true - externalCA.URL = value - default: - externalCA.Options[key] = value - } - } - - if !hasProtocol { - return nil, errors.New("the external-ca option needs a protocol= parameter") - } - if !hasURL { - return nil, errors.New("the external-ca option needs a url= parameter") - } - - return &externalCA, nil -} - -func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) { - flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit") - flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period (ns|us|ms|s|m|h)") - flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates (ns|us|ms|s|m|h)") - flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints") - flags.Uint64Var(&opts.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain") - flags.SetAnnotation(flagMaxSnapshots, "version", []string{"1.25"}) - flags.Uint64Var(&opts.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots") - flags.SetAnnotation(flagSnapshotInterval, "version", []string{"1.25"}) -} - -func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet) { - if flags.Changed(flagTaskHistoryLimit) { - spec.Orchestration.TaskHistoryRetentionLimit = &opts.taskHistoryLimit - } - if flags.Changed(flagDispatcherHeartbeat) { - spec.Dispatcher.HeartbeatPeriod = opts.dispatcherHeartbeat - } - if flags.Changed(flagCertExpiry) { - spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry - } - if flags.Changed(flagExternalCA) { - spec.CAConfig.ExternalCAs = opts.externalCA.Value() - } - if flags.Changed(flagMaxSnapshots) { - spec.Raft.KeepOldSnapshots = &opts.maxSnapshots - } - if flags.Changed(flagSnapshotInterval) { - spec.Raft.SnapshotInterval = opts.snapshotInterval - } - if flags.Changed(flagAutolock) { - spec.EncryptionConfig.AutoLockManagers = opts.autolock - } -} - -func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec { - var spec swarm.Spec - opts.mergeSwarmSpec(&spec, flags) - return spec -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go deleted file mode 100644 index 9a97e8bd2..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package swarm - -import ( - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestNodeAddrOptionSetHostAndPort(t *testing.T) { - opt := NewNodeAddrOption("old:123") - addr := "newhost:5555" - assert.NilError(t, opt.Set(addr)) - assert.Equal(t, opt.Value(), addr) -} - -func TestNodeAddrOptionSetHostOnly(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set("newhost")) - assert.Equal(t, opt.Value(), "newhost:2377") -} - -func TestNodeAddrOptionSetHostOnlyIPv6(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set("::1")) - assert.Equal(t, opt.Value(), "[::1]:2377") -} - -func TestNodeAddrOptionSetPortOnly(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set(":4545")) - assert.Equal(t, opt.Value(), "0.0.0.0:4545") -} - -func TestNodeAddrOptionSetInvalidFormat(t *testing.T) { - opt := NewListenAddrOption() - assert.Error(t, opt.Set("http://localhost:4545"), "Invalid") -} - -func TestExternalCAOptionErrors(t *testing.T) { - testCases := []struct { - externalCA string - expectedError string - }{ - { - externalCA: "", - expectedError: "EOF", - }, - { - externalCA: "anything", - expectedError: "invalid field 'anything' must be a key=value pair", - }, - { - externalCA: "foo=bar", - expectedError: "the external-ca option needs a protocol= parameter", - }, - { - externalCA: "protocol=baz", - expectedError: "unrecognized external CA protocol baz", - }, - { - externalCA: "protocol=cfssl", - expectedError: "the external-ca option needs a url= parameter", - }, - } - for _, tc := range testCases { - opt := &ExternalCAOption{} - assert.Error(t, opt.Set(tc.externalCA), tc.expectedError) - } -} - -func TestExternalCAOption(t *testing.T) { - testCases := []struct { - externalCA string - expected string - }{ - { - externalCA: "protocol=cfssl,url=anything", - expected: "cfssl: anything", - }, - { - externalCA: "protocol=CFSSL,url=anything", - expected: "cfssl: anything", - }, - { - externalCA: "protocol=Cfssl,url=https://example.com", - expected: "cfssl: https://example.com", - }, - { - externalCA: "protocol=Cfssl,url=https://example.com,foo=bar", - expected: "cfssl: https://example.com", - }, - { - externalCA: "protocol=Cfssl,url=https://example.com,foo=bar,foo=baz", - expected: "cfssl: https://example.com", - }, - } - for _, tc := range testCases { - opt := &ExternalCAOption{} - assert.NilError(t, opt.Set(tc.externalCA)) - assert.Equal(t, opt.String(), tc.expected) - } -} - -func TestExternalCAOptionMultiple(t *testing.T) { - opt := &ExternalCAOption{} - assert.NilError(t, opt.Set("protocol=cfssl,url=https://example.com")) - assert.NilError(t, opt.Set("protocol=CFSSL,url=anything")) - assert.Equal(t, len(opt.Value()), 2) - assert.Equal(t, opt.String(), "cfssl: https://example.com, cfssl: anything") -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/init-init-autolock.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/init-init-autolock.golden deleted file mode 100644 index cdd3c666b..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/init-init-autolock.golden +++ /dev/null @@ -1,11 +0,0 @@ -Swarm initialized: current node (nodeID) is now a manager. - -To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. - -To unlock a swarm manager after it restarts, run the `docker swarm unlock` -command and provide the following key: - - unlock-key - -Please remember to store this key in a password manager, since without it you -will not be able to restart the manager. diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/init-init.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/init-init.golden deleted file mode 100644 index 6e82be010..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/init-init.golden +++ /dev/null @@ -1,4 +0,0 @@ -Swarm initialized: current node (nodeID) is now a manager. - -To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. - diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager-quiet.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager-quiet.golden deleted file mode 100644 index 0c7cfc608..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager-quiet.golden +++ /dev/null @@ -1 +0,0 @@ -manager-join-token diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager-rotate.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager-rotate.golden deleted file mode 100644 index 7ee455bec..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager-rotate.golden +++ /dev/null @@ -1,8 +0,0 @@ -Successfully rotated manager join token. - -To add a manager to this swarm, run the following command: - - docker swarm join \ - --token manager-join-token \ - 127.0.0.1 - diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager.golden deleted file mode 100644 index d56527aa5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-manager.golden +++ /dev/null @@ -1,6 +0,0 @@ -To add a manager to this swarm, run the following command: - - docker swarm join \ - --token manager-join-token \ - 127.0.0.1 - diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-worker-quiet.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-worker-quiet.golden deleted file mode 100644 index b445e191e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-worker-quiet.golden +++ /dev/null @@ -1 +0,0 @@ -worker-join-token diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-worker.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-worker.golden deleted file mode 100644 index 5d44f3dae..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/jointoken-worker.golden +++ /dev/null @@ -1,6 +0,0 @@ -To add a worker to this swarm, run the following command: - - docker swarm join \ - --token worker-join-token \ - 127.0.0.1 - diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden deleted file mode 100644 index ed53505e2..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden +++ /dev/null @@ -1 +0,0 @@ -unlock-key diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden deleted file mode 100644 index ed53505e2..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden +++ /dev/null @@ -1 +0,0 @@ -unlock-key diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden deleted file mode 100644 index 89152b864..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden +++ /dev/null @@ -1,9 +0,0 @@ -Successfully rotated manager unlock key. - -To unlock a swarm manager after it restarts, run the `docker swarm unlock` -command and provide the following key: - - unlock-key - -Please remember to store this key in a password manager, since without it you -will not be able to restart the manager. diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key.golden deleted file mode 100644 index 8316df478..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/unlockkeys-unlock-key.golden +++ /dev/null @@ -1,7 +0,0 @@ -To unlock a swarm manager after it restarts, run the `docker swarm unlock` -command and provide the following key: - - unlock-key - -Please remember to store this key in a password manager, since without it you -will not be able to restart the manager. diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-all-flags-quiet.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-all-flags-quiet.golden deleted file mode 100644 index 3d195a258..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-all-flags-quiet.golden +++ /dev/null @@ -1 +0,0 @@ -Swarm updated. diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-autolock-unlock-key.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-autolock-unlock-key.golden deleted file mode 100644 index a077b9e16..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-autolock-unlock-key.golden +++ /dev/null @@ -1,8 +0,0 @@ -Swarm updated. -To unlock a swarm manager after it restarts, run the `docker swarm unlock` -command and provide the following key: - - unlock-key - -Please remember to store this key in a password manager, since without it you -will not be able to restart the manager. diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-noargs.golden b/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-noargs.golden deleted file mode 100644 index 381c0ccf1..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/testdata/update-noargs.golden +++ /dev/null @@ -1,13 +0,0 @@ -Update the swarm - -Usage: - update [OPTIONS] [flags] - -Flags: - --autolock Change manager autolocking setting (true|false) - --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) - --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) - --external-ca external-ca Specifications of one or more certificate signing endpoints - --max-snapshots uint Number of additional Raft snapshots to retain - --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) - --task-history-limit int Task history retention limit (default 5) diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock.go deleted file mode 100644 index c1d9b9918..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock.go +++ /dev/null @@ -1,78 +0,0 @@ -package swarm - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/crypto/ssh/terminal" - "golang.org/x/net/context" -) - -type unlockOptions struct{} - -func newUnlockCommand(dockerCli command.Cli) *cobra.Command { - opts := unlockOptions{} - - cmd := &cobra.Command{ - Use: "unlock", - Short: "Unlock swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runUnlock(dockerCli, opts) - }, - } - - return cmd -} - -func runUnlock(dockerCli command.Cli, opts unlockOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - // First see if the node is actually part of a swarm, and if it is actually locked first. - // If it's in any other state than locked, don't ask for the key. - info, err := client.Info(ctx) - if err != nil { - return err - } - - switch info.Swarm.LocalNodeState { - case swarm.LocalNodeStateInactive: - return errors.New("Error: This node is not part of a swarm") - case swarm.LocalNodeStateLocked: - break - default: - return errors.New("Error: swarm is not locked") - } - - key, err := readKey(dockerCli.In(), "Please enter unlock key: ") - if err != nil { - return err - } - req := swarm.UnlockRequest{ - UnlockKey: key, - } - - return client.SwarmUnlock(ctx, req) -} - -func readKey(in *command.InStream, prompt string) (string, error) { - if in.IsTerminal() { - fmt.Print(prompt) - dt, err := terminal.ReadPassword(int(in.FD())) - fmt.Println() - return string(dt), err - } - key, err := bufio.NewReader(in).ReadString('\n') - if err == io.EOF { - err = nil - } - return strings.TrimSpace(key), err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go deleted file mode 100644 index 77c97d88e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go +++ /dev/null @@ -1,86 +0,0 @@ -package swarm - -import ( - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type unlockKeyOptions struct { - rotate bool - quiet bool -} - -func newUnlockKeyCommand(dockerCli command.Cli) *cobra.Command { - opts := unlockKeyOptions{} - - cmd := &cobra.Command{ - Use: "unlock-key [OPTIONS]", - Short: "Manage the unlock key", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runUnlockKey(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&opts.rotate, flagRotate, false, "Rotate unlock key") - flags.BoolVarP(&opts.quiet, flagQuiet, "q", false, "Only display token") - - return cmd -} - -func runUnlockKey(dockerCli command.Cli, opts unlockKeyOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - if opts.rotate { - flags := swarm.UpdateFlags{RotateManagerUnlockKey: true} - - sw, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - if !sw.Spec.EncryptionConfig.AutoLockManagers { - return errors.New("cannot rotate because autolock is not turned on") - } - - if err := client.SwarmUpdate(ctx, sw.Version, sw.Spec, flags); err != nil { - return err - } - - if !opts.quiet { - fmt.Fprintf(dockerCli.Out(), "Successfully rotated manager unlock key.\n\n") - } - } - - unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) - if err != nil { - return errors.Wrap(err, "could not fetch unlock key") - } - - if unlockKeyResp.UnlockKey == "" { - return errors.New("no unlock key is set") - } - - if opts.quiet { - fmt.Fprintln(dockerCli.Out(), unlockKeyResp.UnlockKey) - return nil - } - - printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) - return nil -} - -func printUnlockCommand(ctx context.Context, dockerCli command.Cli, unlockKey string) { - if len(unlockKey) > 0 { - fmt.Fprintf(dockerCli.Out(), "To unlock a swarm manager after it restarts, run the `docker swarm unlock`\ncommand and provide the following key:\n\n %s\n\nPlease remember to store this key in a password manager, since without it you\nwill not be able to restart the manager.\n", unlockKey) - } - return -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_key_test.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_key_test.go deleted file mode 100644 index 7b644f70e..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_key_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package swarm - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" -) - -func TestSwarmUnlockKeyErrors(t *testing.T) { - testCases := []struct { - name string - args []string - flags map[string]string - swarmInspectFunc func() (swarm.Swarm, error) - swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error - swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) - expectedError string - }{ - { - name: "too-many-args", - args: []string{"foo"}, - expectedError: "accepts no argument(s)", - }, - { - name: "swarm-inspect-rotate-failed", - flags: map[string]string{ - flagRotate: "true", - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") - }, - expectedError: "error inspecting the swarm", - }, - { - name: "swarm-rotate-no-autolock-failed", - flags: map[string]string{ - flagRotate: "true", - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(), nil - }, - expectedError: "cannot rotate because autolock is not turned on", - }, - { - name: "swarm-update-failed", - flags: map[string]string{ - flagRotate: "true", - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(Autolock()), nil - }, - swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { - return errors.Errorf("error updating the swarm") - }, - expectedError: "error updating the swarm", - }, - { - name: "swarm-get-unlock-key-failed", - swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { - return types.SwarmUnlockKeyResponse{}, errors.Errorf("error getting unlock key") - }, - expectedError: "error getting unlock key", - }, - { - name: "swarm-no-unlock-key-failed", - swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { - return types.SwarmUnlockKeyResponse{ - UnlockKey: "", - }, nil - }, - expectedError: "no unlock key is set", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newUnlockKeyCommand( - test.NewFakeCli(&fakeClient{ - swarmInspectFunc: tc.swarmInspectFunc, - swarmUpdateFunc: tc.swarmUpdateFunc, - swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSwarmUnlockKey(t *testing.T) { - testCases := []struct { - name string - args []string - flags map[string]string - swarmInspectFunc func() (swarm.Swarm, error) - swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error - swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) - }{ - { - name: "unlock-key", - swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { - return types.SwarmUnlockKeyResponse{ - UnlockKey: "unlock-key", - }, nil - }, - }, - { - name: "unlock-key-quiet", - flags: map[string]string{ - flagQuiet: "true", - }, - swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { - return types.SwarmUnlockKeyResponse{ - UnlockKey: "unlock-key", - }, nil - }, - }, - { - name: "unlock-key-rotate", - flags: map[string]string{ - flagRotate: "true", - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(Autolock()), nil - }, - swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { - return types.SwarmUnlockKeyResponse{ - UnlockKey: "unlock-key", - }, nil - }, - }, - { - name: "unlock-key-rotate-quiet", - flags: map[string]string{ - flagQuiet: "true", - flagRotate: "true", - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(Autolock()), nil - }, - swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { - return types.SwarmUnlockKeyResponse{ - UnlockKey: "unlock-key", - }, nil - }, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newUnlockKeyCommand( - test.NewFakeCli(&fakeClient{ - swarmInspectFunc: tc.swarmInspectFunc, - swarmUpdateFunc: tc.swarmUpdateFunc, - swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), fmt.Sprintf("unlockkeys-%s.golden", tc.name)) - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_test.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_test.go deleted file mode 100644 index 620fecafe..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/unlock_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package swarm - -import ( - "bytes" - "io/ioutil" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/pkg/errors" -) - -func TestSwarmUnlockErrors(t *testing.T) { - testCases := []struct { - name string - args []string - input string - swarmUnlockFunc func(req swarm.UnlockRequest) error - infoFunc func() (types.Info, error) - expectedError string - }{ - { - name: "too-many-args", - args: []string{"foo"}, - expectedError: "accepts no argument(s)", - }, - { - name: "is-not-part-of-a-swarm", - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - LocalNodeState: swarm.LocalNodeStateInactive, - }, - }, nil - }, - expectedError: "This node is not part of a swarm", - }, - { - name: "is-not-locked", - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - LocalNodeState: swarm.LocalNodeStateActive, - }, - }, nil - }, - expectedError: "Error: swarm is not locked", - }, - { - name: "unlockrequest-failed", - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - LocalNodeState: swarm.LocalNodeStateLocked, - }, - }, nil - }, - swarmUnlockFunc: func(req swarm.UnlockRequest) error { - return errors.Errorf("error unlocking the swarm") - }, - expectedError: "error unlocking the swarm", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newUnlockCommand( - test.NewFakeCli(&fakeClient{ - infoFunc: tc.infoFunc, - swarmUnlockFunc: tc.swarmUnlockFunc, - }, buf)) - cmd.SetArgs(tc.args) - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSwarmUnlock(t *testing.T) { - input := "unlockKey" - buf := new(bytes.Buffer) - dockerCli := test.NewFakeCli(&fakeClient{ - infoFunc: func() (types.Info, error) { - return types.Info{ - Swarm: swarm.Info{ - LocalNodeState: swarm.LocalNodeStateLocked, - }, - }, nil - }, - swarmUnlockFunc: func(req swarm.UnlockRequest) error { - if req.UnlockKey != input { - return errors.Errorf("Invalid unlock key") - } - return nil - }, - }, buf) - dockerCli.SetIn(ioutil.NopCloser(strings.NewReader(input))) - cmd := newUnlockCommand(dockerCli) - assert.NilError(t, cmd.Execute()) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/update.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/update.go deleted file mode 100644 index 1ccd268e7..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/update.go +++ /dev/null @@ -1,72 +0,0 @@ -package swarm - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -func newUpdateCommand(dockerCli command.Cli) *cobra.Command { - opts := swarmOptions{} - - cmd := &cobra.Command{ - Use: "update [OPTIONS]", - Short: "Update the swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), opts) - }, - PreRunE: func(cmd *cobra.Command, args []string) error { - if cmd.Flags().NFlag() == 0 { - return pflag.ErrHelp - } - return nil - }, - } - - cmd.Flags().BoolVar(&opts.autolock, flagAutolock, false, "Change manager autolocking setting (true|false)") - addSwarmFlags(cmd.Flags(), &opts) - return cmd -} - -func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, opts swarmOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - var updateFlags swarm.UpdateFlags - - swarmInspect, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - prevAutoLock := swarmInspect.Spec.EncryptionConfig.AutoLockManagers - - opts.mergeSwarmSpec(&swarmInspect.Spec, flags) - - curAutoLock := swarmInspect.Spec.EncryptionConfig.AutoLockManagers - - err = client.SwarmUpdate(ctx, swarmInspect.Version, swarmInspect.Spec, updateFlags) - if err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), "Swarm updated.") - - if curAutoLock && !prevAutoLock { - unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) - if err != nil { - return errors.Wrap(err, "could not fetch unlock key") - } - printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) - } - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/swarm/update_test.go b/fn/vendor/github.com/docker/docker/cli/command/swarm/update_test.go deleted file mode 100644 index 0450c0297..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/swarm/update_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package swarm - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" -) - -func TestSwarmUpdateErrors(t *testing.T) { - testCases := []struct { - name string - args []string - flags map[string]string - swarmInspectFunc func() (swarm.Swarm, error) - swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error - swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) - expectedError string - }{ - { - name: "too-many-args", - args: []string{"foo"}, - expectedError: "accepts no argument(s)", - }, - { - name: "swarm-inspect-error", - flags: map[string]string{ - flagTaskHistoryLimit: "10", - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") - }, - expectedError: "error inspecting the swarm", - }, - { - name: "swarm-update-error", - flags: map[string]string{ - flagTaskHistoryLimit: "10", - }, - swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { - return errors.Errorf("error updating the swarm") - }, - expectedError: "error updating the swarm", - }, - { - name: "swarm-unlockkey-error", - flags: map[string]string{ - flagAutolock: "true", - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(), nil - }, - swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { - return types.SwarmUnlockKeyResponse{}, errors.Errorf("error getting unlock key") - }, - expectedError: "error getting unlock key", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newUpdateCommand( - test.NewFakeCli(&fakeClient{ - swarmInspectFunc: tc.swarmInspectFunc, - swarmUpdateFunc: tc.swarmUpdateFunc, - swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestSwarmUpdate(t *testing.T) { - testCases := []struct { - name string - args []string - flags map[string]string - swarmInspectFunc func() (swarm.Swarm, error) - swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error - swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) - }{ - { - name: "noargs", - }, - { - name: "all-flags-quiet", - flags: map[string]string{ - flagTaskHistoryLimit: "10", - flagDispatcherHeartbeat: "10s", - flagCertExpiry: "20s", - flagExternalCA: "protocol=cfssl,url=https://example.com.", - flagMaxSnapshots: "10", - flagSnapshotInterval: "100", - flagAutolock: "true", - flagQuiet: "true", - }, - swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { - if *swarm.Orchestration.TaskHistoryRetentionLimit != 10 { - return errors.Errorf("historyLimit not correctly set") - } - heartbeatDuration, err := time.ParseDuration("10s") - if err != nil { - return err - } - if swarm.Dispatcher.HeartbeatPeriod != heartbeatDuration { - return errors.Errorf("heartbeatPeriodLimit not correctly set") - } - certExpiryDuration, err := time.ParseDuration("20s") - if err != nil { - return err - } - if swarm.CAConfig.NodeCertExpiry != certExpiryDuration { - return errors.Errorf("certExpiry not correctly set") - } - if len(swarm.CAConfig.ExternalCAs) != 1 { - return errors.Errorf("externalCA not correctly set") - } - if *swarm.Raft.KeepOldSnapshots != 10 { - return errors.Errorf("keepOldSnapshots not correctly set") - } - if swarm.Raft.SnapshotInterval != 100 { - return errors.Errorf("snapshotInterval not correctly set") - } - if !swarm.EncryptionConfig.AutoLockManagers { - return errors.Errorf("autolock not correctly set") - } - return nil - }, - }, - { - name: "autolock-unlock-key", - flags: map[string]string{ - flagTaskHistoryLimit: "10", - flagAutolock: "true", - }, - swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { - if *swarm.Orchestration.TaskHistoryRetentionLimit != 10 { - return errors.Errorf("historyLimit not correctly set") - } - return nil - }, - swarmInspectFunc: func() (swarm.Swarm, error) { - return *Swarm(), nil - }, - swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { - return types.SwarmUnlockKeyResponse{ - UnlockKey: "unlock-key", - }, nil - }, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newUpdateCommand( - test.NewFakeCli(&fakeClient{ - swarmInspectFunc: tc.swarmInspectFunc, - swarmUpdateFunc: tc.swarmUpdateFunc, - swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, - }, buf)) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(buf) - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), fmt.Sprintf("update-%s.golden", tc.name)) - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/system/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/system/cmd.go deleted file mode 100644 index ab3beb895..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/system/cmd.go +++ /dev/null @@ -1,26 +0,0 @@ -package system - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewSystemCommand returns a cobra command for `system` subcommands -func NewSystemCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "system", - Short: "Manage Docker", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - NewEventsCommand(dockerCli), - NewInfoCommand(dockerCli), - NewDiskUsageCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/system/df.go b/fn/vendor/github.com/docker/docker/cli/command/system/df.go deleted file mode 100644 index 9f712484a..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/system/df.go +++ /dev/null @@ -1,56 +0,0 @@ -package system - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type diskUsageOptions struct { - verbose bool -} - -// NewDiskUsageCommand creates a new cobra.Command for `docker df` -func NewDiskUsageCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts diskUsageOptions - - cmd := &cobra.Command{ - Use: "df [OPTIONS]", - Short: "Show docker disk usage", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runDiskUsage(dockerCli, opts) - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Show detailed information on space usage") - - return cmd -} - -func runDiskUsage(dockerCli *command.DockerCli, opts diskUsageOptions) error { - du, err := dockerCli.Client().DiskUsage(context.Background()) - if err != nil { - return err - } - - duCtx := formatter.DiskUsageContext{ - Context: formatter.Context{ - Output: dockerCli.Out(), - }, - LayersSize: du.LayersSize, - Images: du.Images, - Containers: du.Containers, - Volumes: du.Volumes, - Verbose: opts.verbose, - } - - duCtx.Write() - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/system/events.go b/fn/vendor/github.com/docker/docker/cli/command/system/events.go deleted file mode 100644 index 441ef91d3..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/system/events.go +++ /dev/null @@ -1,140 +0,0 @@ -package system - -import ( - "fmt" - "io" - "io/ioutil" - "sort" - "strings" - "text/template" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - eventtypes "github.com/docker/docker/api/types/events" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/templates" - "github.com/spf13/cobra" -) - -type eventsOptions struct { - since string - until string - filter opts.FilterOpt - format string -} - -// NewEventsCommand creates a new cobra.Command for `docker events` -func NewEventsCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := eventsOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "events [OPTIONS]", - Short: "Get real time events from the server", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runEvents(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.StringVar(&opts.since, "since", "", "Show all events created since timestamp") - flags.StringVar(&opts.until, "until", "", "Stream events until this timestamp") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - flags.StringVar(&opts.format, "format", "", "Format the output using the given Go template") - - return cmd -} - -func runEvents(dockerCli *command.DockerCli, opts *eventsOptions) error { - tmpl, err := makeTemplate(opts.format) - if err != nil { - return cli.StatusError{ - StatusCode: 64, - Status: "Error parsing format: " + err.Error()} - } - options := types.EventsOptions{ - Since: opts.since, - Until: opts.until, - Filters: opts.filter.Value(), - } - - ctx, cancel := context.WithCancel(context.Background()) - events, errs := dockerCli.Client().Events(ctx, options) - defer cancel() - - out := dockerCli.Out() - - for { - select { - case event := <-events: - if err := handleEvent(out, event, tmpl); err != nil { - return err - } - case err := <-errs: - if err == io.EOF { - return nil - } - return err - } - } -} - -func handleEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { - if tmpl == nil { - return prettyPrintEvent(out, event) - } - - return formatEvent(out, event, tmpl) -} - -func makeTemplate(format string) (*template.Template, error) { - if format == "" { - return nil, nil - } - tmpl, err := templates.Parse(format) - if err != nil { - return tmpl, err - } - // we execute the template for an empty message, so as to validate - // a bad template like "{{.badFieldString}}" - return tmpl, tmpl.Execute(ioutil.Discard, &eventtypes.Message{}) -} - -// prettyPrintEvent prints all types of event information. -// Each output includes the event type, actor id, name and action. -// Actor attributes are printed at the end if the actor has any. -func prettyPrintEvent(out io.Writer, event eventtypes.Message) error { - if event.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) - } else if event.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) - } - - fmt.Fprintf(out, "%s %s %s", event.Type, event.Action, event.Actor.ID) - - if len(event.Actor.Attributes) > 0 { - var attrs []string - var keys []string - for k := range event.Actor.Attributes { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := event.Actor.Attributes[k] - attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) - } - fmt.Fprintf(out, " (%s)", strings.Join(attrs, ", ")) - } - fmt.Fprint(out, "\n") - return nil -} - -func formatEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { - defer out.Write([]byte{'\n'}) - return tmpl.Execute(out, event) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/system/info.go b/fn/vendor/github.com/docker/docker/cli/command/system/info.go deleted file mode 100644 index 8498dd8c5..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/system/info.go +++ /dev/null @@ -1,365 +0,0 @@ -package system - -import ( - "fmt" - "sort" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/debug" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/templates" - "github.com/docker/go-units" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type infoOptions struct { - format string -} - -// NewInfoCommand creates a new cobra.Command for `docker info` -func NewInfoCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts infoOptions - - cmd := &cobra.Command{ - Use: "info [OPTIONS]", - Short: "Display system-wide information", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runInfo(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - - return cmd -} - -func runInfo(dockerCli *command.DockerCli, opts *infoOptions) error { - ctx := context.Background() - info, err := dockerCli.Client().Info(ctx) - if err != nil { - return err - } - if opts.format == "" { - return prettyPrintInfo(dockerCli, info) - } - return formatInfo(dockerCli, info, opts.format) -} - -func prettyPrintInfo(dockerCli *command.DockerCli, info types.Info) error { - fmt.Fprintf(dockerCli.Out(), "Containers: %d\n", info.Containers) - fmt.Fprintf(dockerCli.Out(), " Running: %d\n", info.ContainersRunning) - fmt.Fprintf(dockerCli.Out(), " Paused: %d\n", info.ContainersPaused) - fmt.Fprintf(dockerCli.Out(), " Stopped: %d\n", info.ContainersStopped) - fmt.Fprintf(dockerCli.Out(), "Images: %d\n", info.Images) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Server Version: %s\n", info.ServerVersion) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Storage Driver: %s\n", info.Driver) - if info.DriverStatus != nil { - for _, pair := range info.DriverStatus { - fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) - } - - } - if info.SystemStatus != nil { - for _, pair := range info.SystemStatus { - fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1]) - } - } - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Logging Driver: %s\n", info.LoggingDriver) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Cgroup Driver: %s\n", info.CgroupDriver) - - fmt.Fprintf(dockerCli.Out(), "Plugins: \n") - fmt.Fprintf(dockerCli.Out(), " Volume:") - fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Volume, " ")) - fmt.Fprintf(dockerCli.Out(), "\n") - fmt.Fprintf(dockerCli.Out(), " Network:") - fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Network, " ")) - fmt.Fprintf(dockerCli.Out(), "\n") - - if len(info.Plugins.Authorization) != 0 { - fmt.Fprintf(dockerCli.Out(), " Authorization:") - fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Authorization, " ")) - fmt.Fprintf(dockerCli.Out(), "\n") - } - - fmt.Fprintf(dockerCli.Out(), "Swarm: %v\n", info.Swarm.LocalNodeState) - if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive && info.Swarm.LocalNodeState != swarm.LocalNodeStateLocked { - fmt.Fprintf(dockerCli.Out(), " NodeID: %s\n", info.Swarm.NodeID) - if info.Swarm.Error != "" { - fmt.Fprintf(dockerCli.Out(), " Error: %v\n", info.Swarm.Error) - } - fmt.Fprintf(dockerCli.Out(), " Is Manager: %v\n", info.Swarm.ControlAvailable) - if info.Swarm.Cluster != nil && info.Swarm.ControlAvailable && info.Swarm.Error == "" && info.Swarm.LocalNodeState != swarm.LocalNodeStateError { - fmt.Fprintf(dockerCli.Out(), " ClusterID: %s\n", info.Swarm.Cluster.ID) - fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) - fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) - fmt.Fprintf(dockerCli.Out(), " Orchestration:\n") - taskHistoryRetentionLimit := int64(0) - if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil { - taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit - } - fmt.Fprintf(dockerCli.Out(), " Task History Retention Limit: %d\n", taskHistoryRetentionLimit) - fmt.Fprintf(dockerCli.Out(), " Raft:\n") - fmt.Fprintf(dockerCli.Out(), " Snapshot Interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval) - if info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots != nil { - fmt.Fprintf(dockerCli.Out(), " Number of Old Snapshots to Retain: %d\n", *info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots) - } - fmt.Fprintf(dockerCli.Out(), " Heartbeat Tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick) - fmt.Fprintf(dockerCli.Out(), " Election Tick: %d\n", info.Swarm.Cluster.Spec.Raft.ElectionTick) - fmt.Fprintf(dockerCli.Out(), " Dispatcher:\n") - fmt.Fprintf(dockerCli.Out(), " Heartbeat Period: %s\n", units.HumanDuration(time.Duration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod))) - fmt.Fprintf(dockerCli.Out(), " CA Configuration:\n") - fmt.Fprintf(dockerCli.Out(), " Expiry Duration: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry)) - if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 { - fmt.Fprintf(dockerCli.Out(), " External CAs:\n") - for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs { - fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL) - } - } - } - fmt.Fprintf(dockerCli.Out(), " Node Address: %s\n", info.Swarm.NodeAddr) - managers := []string{} - for _, entry := range info.Swarm.RemoteManagers { - managers = append(managers, entry.Addr) - } - if len(managers) > 0 { - sort.Strings(managers) - fmt.Fprintf(dockerCli.Out(), " Manager Addresses:\n") - for _, entry := range managers { - fmt.Fprintf(dockerCli.Out(), " %s\n", entry) - } - } - } - - if len(info.Runtimes) > 0 { - fmt.Fprintf(dockerCli.Out(), "Runtimes:") - for name := range info.Runtimes { - fmt.Fprintf(dockerCli.Out(), " %s", name) - } - fmt.Fprint(dockerCli.Out(), "\n") - fmt.Fprintf(dockerCli.Out(), "Default Runtime: %s\n", info.DefaultRuntime) - } - - if info.OSType == "linux" { - fmt.Fprintf(dockerCli.Out(), "Init Binary: %v\n", info.InitBinary) - - for _, ci := range []struct { - Name string - Commit types.Commit - }{ - {"containerd", info.ContainerdCommit}, - {"runc", info.RuncCommit}, - {"init", info.InitCommit}, - } { - fmt.Fprintf(dockerCli.Out(), "%s version: %s", ci.Name, ci.Commit.ID) - if ci.Commit.ID != ci.Commit.Expected { - fmt.Fprintf(dockerCli.Out(), " (expected: %s)", ci.Commit.Expected) - } - fmt.Fprintf(dockerCli.Out(), "\n") - } - if len(info.SecurityOptions) != 0 { - kvs, err := types.DecodeSecurityOptions(info.SecurityOptions) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "Security Options:\n") - for _, so := range kvs { - fmt.Fprintf(dockerCli.Out(), " %s\n", so.Name) - for _, o := range so.Options { - switch o.Key { - case "profile": - if o.Value != "default" { - fmt.Fprintf(dockerCli.Err(), " WARNING: You're not using the default seccomp profile\n") - } - fmt.Fprintf(dockerCli.Out(), " Profile: %s\n", o.Value) - } - } - } - } - } - - // Isolation only has meaning on a Windows daemon. - if info.OSType == "windows" { - fmt.Fprintf(dockerCli.Out(), "Default Isolation: %v\n", info.Isolation) - } - - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Kernel Version: %s\n", info.KernelVersion) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Operating System: %s\n", info.OperatingSystem) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "OSType: %s\n", info.OSType) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Architecture: %s\n", info.Architecture) - fmt.Fprintf(dockerCli.Out(), "CPUs: %d\n", info.NCPU) - fmt.Fprintf(dockerCli.Out(), "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Name: %s\n", info.Name) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "ID: %s\n", info.ID) - fmt.Fprintf(dockerCli.Out(), "Docker Root Dir: %s\n", info.DockerRootDir) - fmt.Fprintf(dockerCli.Out(), "Debug Mode (client): %v\n", debug.IsEnabled()) - fmt.Fprintf(dockerCli.Out(), "Debug Mode (server): %v\n", info.Debug) - - if info.Debug { - fmt.Fprintf(dockerCli.Out(), " File Descriptors: %d\n", info.NFd) - fmt.Fprintf(dockerCli.Out(), " Goroutines: %d\n", info.NGoroutines) - fmt.Fprintf(dockerCli.Out(), " System Time: %s\n", info.SystemTime) - fmt.Fprintf(dockerCli.Out(), " EventsListeners: %d\n", info.NEventsListener) - } - - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Http Proxy: %s\n", info.HTTPProxy) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Https Proxy: %s\n", info.HTTPSProxy) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "No Proxy: %s\n", info.NoProxy) - - if info.IndexServerAddress != "" { - u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username - if len(u) > 0 { - fmt.Fprintf(dockerCli.Out(), "Username: %v\n", u) - } - fmt.Fprintf(dockerCli.Out(), "Registry: %v\n", info.IndexServerAddress) - } - - if info.Labels != nil { - fmt.Fprintln(dockerCli.Out(), "Labels:") - for _, attribute := range info.Labels { - fmt.Fprintf(dockerCli.Out(), " %s\n", attribute) - } - // TODO: Engine labels with duplicate keys has been deprecated in 1.13 and will be error out - // after 3 release cycles (17.12). For now, a WARNING will be generated. The following will - // be removed eventually. - labelMap := map[string]string{} - for _, label := range info.Labels { - stringSlice := strings.SplitN(label, "=", 2) - if len(stringSlice) > 1 { - // If there is a conflict we will throw out a warning - if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { - fmt.Fprintln(dockerCli.Err(), "WARNING: labels with duplicate keys and conflicting values have been deprecated") - break - } - labelMap[stringSlice[0]] = stringSlice[1] - } - } - } - - fmt.Fprintf(dockerCli.Out(), "Experimental: %v\n", info.ExperimentalBuild) - if info.ClusterStore != "" { - fmt.Fprintf(dockerCli.Out(), "Cluster Store: %s\n", info.ClusterStore) - } - - if info.ClusterAdvertise != "" { - fmt.Fprintf(dockerCli.Out(), "Cluster Advertise: %s\n", info.ClusterAdvertise) - } - - if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { - fmt.Fprintln(dockerCli.Out(), "Insecure Registries:") - for _, registry := range info.RegistryConfig.IndexConfigs { - if registry.Secure == false { - fmt.Fprintf(dockerCli.Out(), " %s\n", registry.Name) - } - } - - for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { - mask, _ := registry.Mask.Size() - fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) - } - } - - if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 { - fmt.Fprintln(dockerCli.Out(), "Registry Mirrors:") - for _, mirror := range info.RegistryConfig.Mirrors { - fmt.Fprintf(dockerCli.Out(), " %s\n", mirror) - } - } - - fmt.Fprintf(dockerCli.Out(), "Live Restore Enabled: %v\n\n", info.LiveRestoreEnabled) - - // Only output these warnings if the server does not support these features - if info.OSType != "windows" { - printStorageDriverWarnings(dockerCli, info) - - if !info.MemoryLimit { - fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") - } - if !info.SwapLimit { - fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") - } - if !info.KernelMemory { - fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") - } - if !info.OomKillDisable { - fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") - } - if !info.CPUCfsQuota { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") - } - if !info.CPUCfsPeriod { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") - } - if !info.CPUShares { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") - } - if !info.CPUSet { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") - } - if !info.IPv4Forwarding { - fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") - } - if !info.BridgeNfIptables { - fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") - } - if !info.BridgeNfIP6tables { - fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") - } - } - - return nil -} - -func printStorageDriverWarnings(dockerCli *command.DockerCli, info types.Info) { - if info.DriverStatus == nil { - return - } - - for _, pair := range info.DriverStatus { - if pair[0] == "Data loop file" { - fmt.Fprintf(dockerCli.Err(), "WARNING: %s: usage of loopback devices is strongly discouraged for production use.\n Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.\n", info.Driver) - } - if pair[0] == "Supports d_type" && pair[1] == "false" { - backingFs := getBackingFs(info) - - msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", info.Driver, backingFs) - if backingFs == "xfs" { - msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" - } - msg += " Running without d_type support will not be supported in future releases." - fmt.Fprintln(dockerCli.Err(), msg) - } - } -} - -func getBackingFs(info types.Info) string { - if info.DriverStatus == nil { - return "" - } - - for _, pair := range info.DriverStatus { - if pair[0] == "Backing Filesystem" { - return pair[1] - } - } - return "" -} - -func formatInfo(dockerCli *command.DockerCli, info types.Info, format string) error { - tmpl, err := templates.Parse(format) - if err != nil { - return cli.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - err = tmpl.Execute(dockerCli.Out(), info) - dockerCli.Out().Write([]byte{'\n'}) - return err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/system/inspect.go b/fn/vendor/github.com/docker/docker/cli/command/system/inspect.go deleted file mode 100644 index ad23d35a0..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/system/inspect.go +++ /dev/null @@ -1,216 +0,0 @@ -package system - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - apiclient "github.com/docker/docker/client" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - format string - inspectType string - size bool - ids []string -} - -// NewInspectCommand creates a new cobra.Command for `docker inspect` -func NewInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] NAME|ID [NAME|ID...]", - Short: "Return low-level information on Docker objects", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.ids = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - flags.StringVar(&opts.inspectType, "type", "", "Return JSON for specified type") - flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes if the type is container") - - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - var elementSearcher inspect.GetRefFunc - switch opts.inspectType { - case "", "container", "image", "node", "network", "service", "volume", "task", "plugin", "secret": - elementSearcher = inspectAll(context.Background(), dockerCli, opts.size, opts.inspectType) - default: - return errors.Errorf("%q is not a valid value for --type", opts.inspectType) - } - return inspect.Inspect(dockerCli.Out(), opts.ids, opts.format, elementSearcher) -} - -func inspectContainers(ctx context.Context, dockerCli *command.DockerCli, getSize bool) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().ContainerInspectWithRaw(ctx, ref, getSize) - } -} - -func inspectImages(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().ImageInspectWithRaw(ctx, ref) - } -} - -func inspectNetwork(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().NetworkInspectWithRaw(ctx, ref, false) - } -} - -func inspectNode(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().NodeInspectWithRaw(ctx, ref) - } -} - -func inspectService(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - // Service inspect shows defaults values in empty fields. - return dockerCli.Client().ServiceInspectWithRaw(ctx, ref, types.ServiceInspectOptions{InsertDefaults: true}) - } -} - -func inspectTasks(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().TaskInspectWithRaw(ctx, ref) - } -} - -func inspectVolume(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().VolumeInspectWithRaw(ctx, ref) - } -} - -func inspectPlugin(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().PluginInspectWithRaw(ctx, ref) - } -} - -func inspectSecret(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().SecretInspectWithRaw(ctx, ref) - } -} - -func inspectAll(ctx context.Context, dockerCli *command.DockerCli, getSize bool, typeConstraint string) inspect.GetRefFunc { - var inspectAutodetect = []struct { - objectType string - isSizeSupported bool - isSwarmObject bool - objectInspector func(string) (interface{}, []byte, error) - }{ - { - objectType: "container", - isSizeSupported: true, - objectInspector: inspectContainers(ctx, dockerCli, getSize), - }, - { - objectType: "image", - objectInspector: inspectImages(ctx, dockerCli), - }, - { - objectType: "network", - objectInspector: inspectNetwork(ctx, dockerCli), - }, - { - objectType: "volume", - objectInspector: inspectVolume(ctx, dockerCli), - }, - { - objectType: "service", - isSwarmObject: true, - objectInspector: inspectService(ctx, dockerCli), - }, - { - objectType: "task", - isSwarmObject: true, - objectInspector: inspectTasks(ctx, dockerCli), - }, - { - objectType: "node", - isSwarmObject: true, - objectInspector: inspectNode(ctx, dockerCli), - }, - { - objectType: "plugin", - objectInspector: inspectPlugin(ctx, dockerCli), - }, - { - objectType: "secret", - isSwarmObject: true, - objectInspector: inspectSecret(ctx, dockerCli), - }, - } - - // isSwarmManager does an Info API call to verify that the daemon is - // a swarm manager. - isSwarmManager := func() bool { - info, err := dockerCli.Client().Info(ctx) - if err != nil { - fmt.Fprintln(dockerCli.Err(), err) - return false - } - return info.Swarm.ControlAvailable - } - - isErrNotSupported := func(err error) bool { - return strings.Contains(err.Error(), "not supported") - } - - return func(ref string) (interface{}, []byte, error) { - const ( - swarmSupportUnknown = iota - swarmSupported - swarmUnsupported - ) - - isSwarmSupported := swarmSupportUnknown - - for _, inspectData := range inspectAutodetect { - if typeConstraint != "" && inspectData.objectType != typeConstraint { - continue - } - if typeConstraint == "" && inspectData.isSwarmObject { - if isSwarmSupported == swarmSupportUnknown { - if isSwarmManager() { - isSwarmSupported = swarmSupported - } else { - isSwarmSupported = swarmUnsupported - } - } - if isSwarmSupported == swarmUnsupported { - continue - } - } - v, raw, err := inspectData.objectInspector(ref) - if err != nil { - if typeConstraint == "" && (apiclient.IsErrNotFound(err) || isErrNotSupported(err)) { - continue - } - return v, raw, err - } - if getSize && !inspectData.isSizeSupported { - fmt.Fprintf(dockerCli.Err(), "WARNING: --size ignored for %s\n", inspectData.objectType) - } - return v, raw, err - } - return nil, nil, errors.Errorf("Error: No such object: %s", ref) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/system/prune.go b/fn/vendor/github.com/docker/docker/cli/command/system/prune.go deleted file mode 100644 index 46e4316f4..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/system/prune.go +++ /dev/null @@ -1,96 +0,0 @@ -package system - -import ( - "fmt" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/prune" - "github.com/docker/docker/opts" - units "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type pruneOptions struct { - force bool - all bool - filter opts.FilterOpt -} - -// NewPruneCommand creates a new cobra.Command for `docker prune` -func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := pruneOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove unused data", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runPrune(dockerCli, opts) - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") - flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images not just dangling ones") - flags.Var(&opts.filter, "filter", "Provide filter values (e.g. 'until=')") - - return cmd -} - -const ( - warning = `WARNING! This will remove: - - all stopped containers - - all volumes not used by at least one container - - all networks not used by at least one container - %s -Are you sure you want to continue?` - - danglingImageDesc = "- all dangling images" - allImageDesc = `- all images without at least one container associated to them` -) - -func runPrune(dockerCli *command.DockerCli, options pruneOptions) error { - var message string - - if options.all { - message = fmt.Sprintf(warning, allImageDesc) - } else { - message = fmt.Sprintf(warning, danglingImageDesc) - } - - if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), message) { - return nil - } - - var spaceReclaimed uint64 - - for _, pruneFn := range []func(dockerCli *command.DockerCli, filter opts.FilterOpt) (uint64, string, error){ - prune.RunContainerPrune, - prune.RunVolumePrune, - prune.RunNetworkPrune, - } { - spc, output, err := pruneFn(dockerCli, options.filter) - if err != nil { - return err - } - spaceReclaimed += spc - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - } - - spc, output, err := prune.RunImagePrune(dockerCli, options.all, options.filter) - if err != nil { - return err - } - if spc > 0 { - spaceReclaimed += spc - fmt.Fprintln(dockerCli.Out(), output) - } - - fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) - - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/system/version.go b/fn/vendor/github.com/docker/docker/cli/command/system/version.go deleted file mode 100644 index 468db7d03..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/system/version.go +++ /dev/null @@ -1,131 +0,0 @@ -package system - -import ( - "runtime" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/templates" - "github.com/spf13/cobra" -) - -var versionTemplate = `Client: - Version: {{.Client.Version}} - API version: {{.Client.APIVersion}}{{if ne .Client.APIVersion .Client.DefaultAPIVersion}} (downgraded from {{.Client.DefaultAPIVersion}}){{end}} - Go version: {{.Client.GoVersion}} - Git commit: {{.Client.GitCommit}} - Built: {{.Client.BuildTime}} - OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .ServerOK}} - -Server: - Version: {{.Server.Version}} - API version: {{.Server.APIVersion}} (minimum version {{.Server.MinAPIVersion}}) - Go version: {{.Server.GoVersion}} - Git commit: {{.Server.GitCommit}} - Built: {{.Server.BuildTime}} - OS/Arch: {{.Server.Os}}/{{.Server.Arch}} - Experimental: {{.Server.Experimental}}{{end}}` - -type versionOptions struct { - format string -} - -// versionInfo contains version information of both the Client, and Server -type versionInfo struct { - Client clientVersion - Server *types.Version -} - -type clientVersion struct { - Version string - APIVersion string `json:"ApiVersion"` - DefaultAPIVersion string `json:"DefaultAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - BuildTime string `json:",omitempty"` -} - -// ServerOK returns true when the client could connect to the docker server -// and parse the information received. It returns false otherwise. -func (v versionInfo) ServerOK() bool { - return v.Server != nil -} - -// NewVersionCommand creates a new cobra.Command for `docker version` -func NewVersionCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts versionOptions - - cmd := &cobra.Command{ - Use: "version [OPTIONS]", - Short: "Show the Docker version information", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runVersion(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - - return cmd -} - -func runVersion(dockerCli *command.DockerCli, opts *versionOptions) error { - ctx := context.Background() - - templateFormat := versionTemplate - if opts.format != "" { - templateFormat = opts.format - } - - tmpl, err := templates.Parse(templateFormat) - if err != nil { - return cli.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - - vd := versionInfo{ - Client: clientVersion{ - Version: dockerversion.Version, - APIVersion: dockerCli.Client().ClientVersion(), - DefaultAPIVersion: dockerCli.DefaultVersion(), - GoVersion: runtime.Version(), - GitCommit: dockerversion.GitCommit, - BuildTime: dockerversion.BuildTime, - Os: runtime.GOOS, - Arch: runtime.GOARCH, - }, - } - - serverVersion, err := dockerCli.Client().ServerVersion(ctx) - if err == nil { - vd.Server = &serverVersion - } - - // first we need to make BuildTime more human friendly - t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) - if errTime == nil { - vd.Client.BuildTime = t.Format(time.ANSIC) - } - - if vd.ServerOK() { - t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) - if errTime == nil { - vd.Server.BuildTime = t.Format(time.ANSIC) - } - } - - if err2 := tmpl.Execute(dockerCli.Out(), vd); err2 != nil && err == nil { - err = err2 - } - dockerCli.Out().Write([]byte{'\n'}) - return err -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/task/print.go b/fn/vendor/github.com/docker/docker/cli/command/task/print.go deleted file mode 100644 index 3df3b2985..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/task/print.go +++ /dev/null @@ -1,84 +0,0 @@ -package task - -import ( - "fmt" - "sort" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/cli/command/idresolver" -) - -type tasksBySlot []swarm.Task - -func (t tasksBySlot) Len() int { - return len(t) -} - -func (t tasksBySlot) Swap(i, j int) { - t[i], t[j] = t[j], t[i] -} - -func (t tasksBySlot) Less(i, j int) bool { - // Sort by slot. - if t[i].Slot != t[j].Slot { - return t[i].Slot < t[j].Slot - } - - // If same slot, sort by most recent. - return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) -} - -// Print task information in a format. -// Besides this, command `docker node ps ` -// and `docker stack ps` will call this, too. -func Print(dockerCli command.Cli, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, trunc, quiet bool, format string) error { - sort.Stable(tasksBySlot(tasks)) - - names := map[string]string{} - nodes := map[string]string{} - - tasksCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewTaskFormat(format, quiet), - Trunc: trunc, - } - - prevName := "" - for _, task := range tasks { - serviceName, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID) - if err != nil { - return err - } - - nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID) - if err != nil { - return err - } - - name := "" - if task.Slot != 0 { - name = fmt.Sprintf("%v.%v", serviceName, task.Slot) - } else { - name = fmt.Sprintf("%v.%v", serviceName, task.NodeID) - } - - // Indent the name if necessary - indentedName := name - if name == prevName { - indentedName = fmt.Sprintf(" \\_ %s", indentedName) - } - prevName = name - - names[task.ID] = name - if tasksCtx.Format.IsTable() { - names[task.ID] = indentedName - } - nodes[task.ID] = nodeValue - } - - return formatter.TaskWrite(tasksCtx, tasks, names, nodes) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/trust.go b/fn/vendor/github.com/docker/docker/cli/command/trust.go deleted file mode 100644 index c0742bc5b..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/trust.go +++ /dev/null @@ -1,43 +0,0 @@ -package command - -import ( - "os" - "strconv" - - "github.com/spf13/pflag" -) - -var ( - // TODO: make this not global - untrusted bool -) - -// AddTrustVerificationFlags adds content trust flags to the provided flagset -func AddTrustVerificationFlags(fs *pflag.FlagSet) { - trusted := getDefaultTrustState() - fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image verification") -} - -// AddTrustSigningFlags adds "signing" flags to the provided flagset -func AddTrustSigningFlags(fs *pflag.FlagSet) { - trusted := getDefaultTrustState() - fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image signing") -} - -// getDefaultTrustState returns true if content trust is enabled through the $DOCKER_CONTENT_TRUST environment variable. -func getDefaultTrustState() bool { - var trusted bool - if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { - if t, err := strconv.ParseBool(e); t || err != nil { - // treat any other value as true - trusted = true - } - } - return trusted -} - -// IsTrusted returns true if content trust is enabled, either through the $DOCKER_CONTENT_TRUST environment variable, -// or through `--disabled-content-trust=false` on a command. -func IsTrusted() bool { - return !untrusted -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/utils.go b/fn/vendor/github.com/docker/docker/cli/command/utils.go deleted file mode 100644 index 853fe11c7..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/utils.go +++ /dev/null @@ -1,119 +0,0 @@ -package command - -import ( - "bufio" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/pkg/system" -) - -// CopyToFile writes the content of the reader to the specified file -func CopyToFile(outfile string, r io.Reader) error { - // We use sequential file access here to avoid depleting the standby list - // on Windows. On Linux, this is a call directly to ioutil.TempFile - tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_") - if err != nil { - return err - } - - tmpPath := tmpFile.Name() - - _, err = io.Copy(tmpFile, r) - tmpFile.Close() - - if err != nil { - os.Remove(tmpPath) - return err - } - - if err = os.Rename(tmpPath, outfile); err != nil { - os.Remove(tmpPath) - return err - } - - return nil -} - -// capitalizeFirst capitalizes the first character of string -func capitalizeFirst(s string) string { - switch l := len(s); l { - case 0: - return s - case 1: - return strings.ToLower(s) - default: - return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) - } -} - -// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. -func PrettyPrint(i interface{}) string { - switch t := i.(type) { - case nil: - return "None" - case string: - return capitalizeFirst(t) - default: - return capitalizeFirst(fmt.Sprintf("%s", t)) - } -} - -// PromptForConfirmation requests and checks confirmation from user. -// This will display the provided message followed by ' [y/N] '. If -// the user input 'y' or 'Y' it returns true other false. If no -// message is provided "Are you sure you want to proceed? [y/N] " -// will be used instead. -func PromptForConfirmation(ins *InStream, outs *OutStream, message string) bool { - if message == "" { - message = "Are you sure you want to proceed?" - } - message += " [y/N] " - - fmt.Fprintf(outs, message) - - // On Windows, force the use of the regular OS stdin stream. - if runtime.GOOS == "windows" { - ins = NewInStream(os.Stdin) - } - - reader := bufio.NewReader(ins) - answer, _, _ := reader.ReadLine() - return strings.ToLower(string(answer)) == "y" -} - -// PruneFilters returns consolidated prune filters obtained from config.json and cli -func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args { - if dockerCli.ConfigFile() == nil { - return pruneFilters - } - for _, f := range dockerCli.ConfigFile().PruneFilters { - parts := strings.SplitN(f, "=", 2) - if len(parts) != 2 { - continue - } - if parts[0] == "label" { - // CLI label filter supersede config.json. - // If CLI label filter conflict with config.json, - // skip adding label! filter in config.json. - if pruneFilters.Include("label!") && pruneFilters.ExactMatch("label!", parts[1]) { - continue - } - } else if parts[0] == "label!" { - // CLI label! filter supersede config.json. - // If CLI label! filter conflict with config.json, - // skip adding label filter in config.json. - if pruneFilters.Include("label") && pruneFilters.ExactMatch("label", parts[1]) { - continue - } - } - pruneFilters.Add(parts[0], parts[1]) - } - - return pruneFilters -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/volume/client_test.go b/fn/vendor/github.com/docker/docker/cli/command/volume/client_test.go deleted file mode 100644 index c29655cdb..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/volume/client_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package volume - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - volumetypes "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/client" - "golang.org/x/net/context" -) - -type fakeClient struct { - client.Client - volumeCreateFunc func(volumetypes.VolumesCreateBody) (types.Volume, error) - volumeInspectFunc func(volumeID string) (types.Volume, error) - volumeListFunc func(filter filters.Args) (volumetypes.VolumesListOKBody, error) - volumeRemoveFunc func(volumeID string, force bool) error - volumePruneFunc func(filter filters.Args) (types.VolumesPruneReport, error) -} - -func (c *fakeClient) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { - if c.volumeCreateFunc != nil { - return c.volumeCreateFunc(options) - } - return types.Volume{}, nil -} - -func (c *fakeClient) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - if c.volumeInspectFunc != nil { - return c.volumeInspectFunc(volumeID) - } - return types.Volume{}, nil -} - -func (c *fakeClient) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { - if c.volumeListFunc != nil { - return c.volumeListFunc(filter) - } - return volumetypes.VolumesListOKBody{}, nil -} - -func (c *fakeClient) VolumesPrune(ctx context.Context, filter filters.Args) (types.VolumesPruneReport, error) { - if c.volumePruneFunc != nil { - return c.volumePruneFunc(filter) - } - return types.VolumesPruneReport{}, nil -} - -func (c *fakeClient) VolumeRemove(ctx context.Context, volumeID string, force bool) error { - if c.volumeRemoveFunc != nil { - return c.volumeRemoveFunc(volumeID, force) - } - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/volume/cmd.go b/fn/vendor/github.com/docker/docker/cli/command/volume/cmd.go deleted file mode 100644 index 9086c9924..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/volume/cmd.go +++ /dev/null @@ -1,26 +0,0 @@ -package volume - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -// NewVolumeCommand returns a cobra command for `volume` subcommands -func NewVolumeCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "volume COMMAND", - Short: "Manage volumes", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - Tags: map[string]string{"version": "1.21"}, - } - cmd.AddCommand( - newCreateCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - return cmd -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/volume/create.go b/fn/vendor/github.com/docker/docker/cli/command/volume/create.go deleted file mode 100644 index 8392cf002..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/volume/create.go +++ /dev/null @@ -1,70 +0,0 @@ -package volume - -import ( - "fmt" - - volumetypes "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type createOptions struct { - name string - driver string - driverOpts opts.MapOpts - labels opts.ListOpts -} - -func newCreateCommand(dockerCli command.Cli) *cobra.Command { - opts := createOptions{ - driverOpts: *opts.NewMapOpts(nil, nil), - labels: opts.NewListOpts(opts.ValidateEnv), - } - - cmd := &cobra.Command{ - Use: "create [OPTIONS] [VOLUME]", - Short: "Create a volume", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) == 1 { - if opts.name != "" { - return errors.Errorf("Conflicting options: either specify --name or provide positional arg, not both\n") - } - opts.name = args[0] - } - return runCreate(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.StringVarP(&opts.driver, "driver", "d", "local", "Specify volume driver name") - flags.StringVar(&opts.name, "name", "", "Specify volume name") - flags.Lookup("name").Hidden = true - flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") - flags.Var(&opts.labels, "label", "Set metadata for a volume") - - return cmd -} - -func runCreate(dockerCli command.Cli, opts createOptions) error { - client := dockerCli.Client() - - volReq := volumetypes.VolumesCreateBody{ - Driver: opts.driver, - DriverOpts: opts.driverOpts.GetAll(), - Name: opts.name, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), - } - - vol, err := client.VolumeCreate(context.Background(), volReq) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) - return nil -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/volume/create_test.go b/fn/vendor/github.com/docker/docker/cli/command/volume/create_test.go deleted file mode 100644 index ccb7ac75b..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/volume/create_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package volume - -import ( - "bytes" - "io/ioutil" - "strings" - "testing" - - "github.com/docker/docker/api/types" - volumetypes "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/cli/internal/test" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/pkg/errors" -) - -func TestVolumeCreateErrors(t *testing.T) { - testCases := []struct { - args []string - flags map[string]string - volumeCreateFunc func(volumetypes.VolumesCreateBody) (types.Volume, error) - expectedError string - }{ - { - args: []string{"volumeName"}, - flags: map[string]string{ - "name": "volumeName", - }, - expectedError: "Conflicting options: either specify --name or provide positional arg, not both", - }, - { - args: []string{"too", "many"}, - expectedError: "requires at most 1 argument(s)", - }, - { - volumeCreateFunc: func(createBody volumetypes.VolumesCreateBody) (types.Volume, error) { - return types.Volume{}, errors.Errorf("error creating volume") - }, - expectedError: "error creating volume", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newCreateCommand( - test.NewFakeCli(&fakeClient{ - volumeCreateFunc: tc.volumeCreateFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestVolumeCreateWithName(t *testing.T) { - name := "foo" - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - volumeCreateFunc: func(body volumetypes.VolumesCreateBody) (types.Volume, error) { - if body.Name != name { - return types.Volume{}, errors.Errorf("expected name %q, got %q", name, body.Name) - } - return types.Volume{ - Name: body.Name, - }, nil - }, - }, buf) - - // Test by flags - cmd := newCreateCommand(cli) - cmd.Flags().Set("name", name) - assert.NilError(t, cmd.Execute()) - assert.Equal(t, strings.TrimSpace(buf.String()), name) - - // Then by args - buf.Reset() - cmd = newCreateCommand(cli) - cmd.SetArgs([]string{name}) - assert.NilError(t, cmd.Execute()) - assert.Equal(t, strings.TrimSpace(buf.String()), name) -} - -func TestVolumeCreateWithFlags(t *testing.T) { - expectedDriver := "foo" - expectedOpts := map[string]string{ - "bar": "1", - "baz": "baz", - } - expectedLabels := map[string]string{ - "lbl1": "v1", - "lbl2": "v2", - } - name := "banana" - - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - volumeCreateFunc: func(body volumetypes.VolumesCreateBody) (types.Volume, error) { - if body.Name != "" { - return types.Volume{}, errors.Errorf("expected empty name, got %q", body.Name) - } - if body.Driver != expectedDriver { - return types.Volume{}, errors.Errorf("expected driver %q, got %q", expectedDriver, body.Driver) - } - if !compareMap(body.DriverOpts, expectedOpts) { - return types.Volume{}, errors.Errorf("expected drivers opts %v, got %v", expectedOpts, body.DriverOpts) - } - if !compareMap(body.Labels, expectedLabels) { - return types.Volume{}, errors.Errorf("expected labels %v, got %v", expectedLabels, body.Labels) - } - return types.Volume{ - Name: name, - }, nil - }, - }, buf) - - cmd := newCreateCommand(cli) - cmd.Flags().Set("driver", "foo") - cmd.Flags().Set("opt", "bar=1") - cmd.Flags().Set("opt", "baz=baz") - cmd.Flags().Set("label", "lbl1=v1") - cmd.Flags().Set("label", "lbl2=v2") - assert.NilError(t, cmd.Execute()) - assert.Equal(t, strings.TrimSpace(buf.String()), name) -} - -func compareMap(actual map[string]string, expected map[string]string) bool { - if len(actual) != len(expected) { - return false - } - for key, value := range actual { - if expectedValue, ok := expected[key]; ok { - if expectedValue != value { - return false - } - } else { - return false - } - } - return true -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/volume/inspect.go b/fn/vendor/github.com/docker/docker/cli/command/volume/inspect.go deleted file mode 100644 index 70db26495..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/volume/inspect.go +++ /dev/null @@ -1,45 +0,0 @@ -package volume - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - format string - names []string -} - -func newInspectCommand(dockerCli command.Cli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] VOLUME [VOLUME...]", - Short: "Display detailed information on one or more volumes", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.names = args - return runInspect(dockerCli, opts) - }, - } - - cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - - return cmd -} - -func runInspect(dockerCli command.Cli, opts inspectOptions) error { - client := dockerCli.Client() - - ctx := context.Background() - - getVolFunc := func(name string) (interface{}, []byte, error) { - i, err := client.VolumeInspect(ctx, name) - return i, nil, err - } - - return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getVolFunc) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/volume/inspect_test.go b/fn/vendor/github.com/docker/docker/cli/command/volume/inspect_test.go deleted file mode 100644 index 7c4cce39d..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/volume/inspect_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package volume - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" -) - -func TestVolumeInspectErrors(t *testing.T) { - testCases := []struct { - args []string - flags map[string]string - volumeInspectFunc func(volumeID string) (types.Volume, error) - expectedError string - }{ - { - expectedError: "requires at least 1 argument", - }, - { - args: []string{"foo"}, - volumeInspectFunc: func(volumeID string) (types.Volume, error) { - return types.Volume{}, errors.Errorf("error while inspecting the volume") - }, - expectedError: "error while inspecting the volume", - }, - { - args: []string{"foo"}, - flags: map[string]string{ - "format": "{{invalid format}}", - }, - expectedError: "Template parsing error", - }, - { - args: []string{"foo", "bar"}, - volumeInspectFunc: func(volumeID string) (types.Volume, error) { - if volumeID == "foo" { - return types.Volume{ - Name: "foo", - }, nil - } - return types.Volume{}, errors.Errorf("error while inspecting the volume") - }, - expectedError: "error while inspecting the volume", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newInspectCommand( - test.NewFakeCli(&fakeClient{ - volumeInspectFunc: tc.volumeInspectFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestVolumeInspectWithoutFormat(t *testing.T) { - testCases := []struct { - name string - args []string - volumeInspectFunc func(volumeID string) (types.Volume, error) - }{ - { - name: "single-volume", - args: []string{"foo"}, - volumeInspectFunc: func(volumeID string) (types.Volume, error) { - if volumeID != "foo" { - return types.Volume{}, errors.Errorf("Invalid volumeID, expected %s, got %s", "foo", volumeID) - } - return *Volume(), nil - }, - }, - { - name: "multiple-volume-with-labels", - args: []string{"foo", "bar"}, - volumeInspectFunc: func(volumeID string) (types.Volume, error) { - return *Volume(VolumeName(volumeID), VolumeLabels(map[string]string{ - "foo": "bar", - })), nil - }, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newInspectCommand( - test.NewFakeCli(&fakeClient{ - volumeInspectFunc: tc.volumeInspectFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), fmt.Sprintf("volume-inspect-without-format.%s.golden", tc.name)) - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) - } -} - -func TestVolumeInspectWithFormat(t *testing.T) { - volumeInspectFunc := func(volumeID string) (types.Volume, error) { - return *Volume(VolumeLabels(map[string]string{ - "foo": "bar", - })), nil - } - testCases := []struct { - name string - format string - args []string - volumeInspectFunc func(volumeID string) (types.Volume, error) - }{ - { - name: "simple-template", - format: "{{.Name}}", - args: []string{"foo"}, - volumeInspectFunc: volumeInspectFunc, - }, - { - name: "json-template", - format: "{{json .Labels}}", - args: []string{"foo"}, - volumeInspectFunc: volumeInspectFunc, - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newInspectCommand( - test.NewFakeCli(&fakeClient{ - volumeInspectFunc: tc.volumeInspectFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - cmd.Flags().Set("format", tc.format) - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), fmt.Sprintf("volume-inspect-with-format.%s.golden", tc.name)) - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) - } -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/volume/list.go b/fn/vendor/github.com/docker/docker/cli/command/volume/list.go deleted file mode 100644 index 3577db955..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/volume/list.go +++ /dev/null @@ -1,73 +0,0 @@ -package volume - -import ( - "sort" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type byVolumeName []*types.Volume - -func (r byVolumeName) Len() int { return len(r) } -func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byVolumeName) Less(i, j int) bool { - return r[i].Name < r[j].Name -} - -type listOptions struct { - quiet bool - format string - filter opts.FilterOpt -} - -func newListCommand(dockerCli command.Cli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List volumes", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display volume names") - flags.StringVar(&opts.format, "format", "", "Pretty-print volumes using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')") - - return cmd -} - -func runList(dockerCli command.Cli, opts listOptions) error { - client := dockerCli.Client() - volumes, err := client.VolumeList(context.Background(), opts.filter.Value()) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().VolumesFormat - } else { - format = formatter.TableFormatKey - } - } - - sort.Sort(byVolumeName(volumes.Volumes)) - - volumeCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewVolumeFormat(format, opts.quiet), - } - return formatter.VolumeWrite(volumeCtx, volumes.Volumes) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/volume/list_test.go b/fn/vendor/github.com/docker/docker/cli/command/volume/list_test.go deleted file mode 100644 index b2306a5d8..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/volume/list_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package volume - -import ( - "bytes" - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - volumetypes "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/cli/config/configfile" - "github.com/docker/docker/cli/internal/test" - "github.com/pkg/errors" - // Import builders to get the builder function as package function - . "github.com/docker/docker/cli/internal/test/builders" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/golden" -) - -func TestVolumeListErrors(t *testing.T) { - testCases := []struct { - args []string - flags map[string]string - volumeListFunc func(filter filters.Args) (volumetypes.VolumesListOKBody, error) - expectedError string - }{ - { - args: []string{"foo"}, - expectedError: "accepts no argument", - }, - { - volumeListFunc: func(filter filters.Args) (volumetypes.VolumesListOKBody, error) { - return volumetypes.VolumesListOKBody{}, errors.Errorf("error listing volumes") - }, - expectedError: "error listing volumes", - }, - } - for _, tc := range testCases { - buf := new(bytes.Buffer) - cmd := newListCommand( - test.NewFakeCli(&fakeClient{ - volumeListFunc: tc.volumeListFunc, - }, buf), - ) - cmd.SetArgs(tc.args) - for key, value := range tc.flags { - cmd.Flags().Set(key, value) - } - cmd.SetOutput(ioutil.Discard) - assert.Error(t, cmd.Execute(), tc.expectedError) - } -} - -func TestVolumeListWithoutFormat(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - volumeListFunc: func(filter filters.Args) (volumetypes.VolumesListOKBody, error) { - return volumetypes.VolumesListOKBody{ - Volumes: []*types.Volume{ - Volume(), - Volume(VolumeName("foo"), VolumeDriver("bar")), - Volume(VolumeName("baz"), VolumeLabels(map[string]string{ - "foo": "bar", - })), - }, - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{}) - cmd := newListCommand(cli) - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), "volume-list-without-format.golden") - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) -} - -func TestVolumeListWithConfigFormat(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - volumeListFunc: func(filter filters.Args) (volumetypes.VolumesListOKBody, error) { - return volumetypes.VolumesListOKBody{ - Volumes: []*types.Volume{ - Volume(), - Volume(VolumeName("foo"), VolumeDriver("bar")), - Volume(VolumeName("baz"), VolumeLabels(map[string]string{ - "foo": "bar", - })), - }, - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{ - VolumesFormat: "{{ .Name }} {{ .Driver }} {{ .Labels }}", - }) - cmd := newListCommand(cli) - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), "volume-list-with-config-format.golden") - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) -} - -func TestVolumeListWithFormat(t *testing.T) { - buf := new(bytes.Buffer) - cli := test.NewFakeCli(&fakeClient{ - volumeListFunc: func(filter filters.Args) (volumetypes.VolumesListOKBody, error) { - return volumetypes.VolumesListOKBody{ - Volumes: []*types.Volume{ - Volume(), - Volume(VolumeName("foo"), VolumeDriver("bar")), - Volume(VolumeName("baz"), VolumeLabels(map[string]string{ - "foo": "bar", - })), - }, - }, nil - }, - }, buf) - cli.SetConfigfile(&configfile.ConfigFile{}) - cmd := newListCommand(cli) - cmd.Flags().Set("format", "{{ .Name }} {{ .Driver }} {{ .Labels }}") - assert.NilError(t, cmd.Execute()) - actual := buf.String() - expected := golden.Get(t, []byte(actual), "volume-list-with-format.golden") - assert.EqualNormalizedString(t, assert.RemoveSpace, actual, string(expected)) -} diff --git a/fn/vendor/github.com/docker/docker/cli/command/volume/prune.go b/fn/vendor/github.com/docker/docker/cli/command/volume/prune.go deleted file mode 100644 index f7d823ffa..000000000 --- a/fn/vendor/github.com/docker/docker/cli/command/volume/prune.go +++ /dev/null @@ -1,78 +0,0 @@ -package volume - -import ( - "fmt" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - units "github.com/docker/go-units" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type pruneOptions struct { - force bool - filter opts.FilterOpt -} - -// NewPruneCommand returns a new cobra prune command for volumes -func NewPruneCommand(dockerCli command.Cli) *cobra.Command { - opts := pruneOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove all unused volumes", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - spaceReclaimed, output, err := runPrune(dockerCli, opts) - if err != nil { - return err - } - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) - return nil - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") - flags.Var(&opts.filter, "filter", "Provide filter values (e.g. 'label= patterns + + captures + + 1 + + name + keyword.other.special-method.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*\b(FROM)\b.*?\b(AS)\b + captures diff --git a/fn/vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile b/fn/vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile index fcf5892be..f95f1758c 100644 --- a/fn/vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile +++ b/fn/vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile @@ -10,7 +10,6 @@ RUN gcc -g -Wall -static userns.c -o /usr/bin/userns-test \ && gcc -g -Wall -static setuid.c -o /usr/bin/setuid-test \ && gcc -g -Wall -static setgid.c -o /usr/bin/setgid-test \ && gcc -g -Wall -static socket.c -o /usr/bin/socket-test \ - && gcc -g -Wall -static raw.c -o /usr/bin/raw-test \ - && gcc -g -Wall -static appletalk.c -o /usr/bin/appletalk-test + && gcc -g -Wall -static raw.c -o /usr/bin/raw-test RUN [ "$(uname -m)" = "x86_64" ] && gcc -s -m32 -nostdlib exit32.s -o /usr/bin/exit32-test || true diff --git a/fn/vendor/github.com/docker/docker/contrib/syscall-test/appletalk.c b/fn/vendor/github.com/docker/docker/contrib/syscall-test/appletalk.c deleted file mode 100644 index 0001dd424..000000000 --- a/fn/vendor/github.com/docker/docker/contrib/syscall-test/appletalk.c +++ /dev/null @@ -1,12 +0,0 @@ -#include -#include - -int main() { - - if (socket(AF_APPLETALK, SOCK_DGRAM, 0) != -1) { - fprintf(stderr, "Opening Appletalk socket worked, should be blocked\n"); - return 1; - } - - return 0; -} diff --git a/fn/vendor/github.com/docker/docker/daemon/apparmor_default.go b/fn/vendor/github.com/docker/docker/daemon/apparmor_default.go index 09dd0541b..2a418b25c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/apparmor_default.go +++ b/fn/vendor/github.com/docker/docker/daemon/apparmor_default.go @@ -28,7 +28,7 @@ func ensureDefaultAppArmorProfile() error { // Load the profile. if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { - return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", defaultApparmorProfile) + return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultApparmorProfile, err) } } diff --git a/fn/vendor/github.com/docker/docker/daemon/archive.go b/fn/vendor/github.com/docker/docker/daemon/archive.go index b1401600c..bd00daca5 100644 --- a/fn/vendor/github.com/docker/docker/daemon/archive.go +++ b/fn/vendor/github.com/docker/docker/daemon/archive.go @@ -7,14 +7,10 @@ import ( "strings" "github.com/docker/docker/api/types" - "github.com/docker/docker/builder" "github.com/docker/docker/container" - "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" "github.com/pkg/errors" ) @@ -83,7 +79,7 @@ func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io // be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will // be an error if unpacking the given content would cause an existing directory // to be replaced with a non-directory and vice versa. -func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { +func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error { container, err := daemon.GetContainer(name) if err != nil { return err @@ -94,7 +90,7 @@ func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNon return err } - return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) + return daemon.containerExtractToDir(container, path, copyUIDGID, noOverwriteDirNonDir, content) } // containerStatPath stats the filesystem resource at the specified path in this @@ -196,7 +192,7 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path // noOverwriteDirNonDir is true then it will be an error if unpacking the // given content would cause an existing directory to be replaced with a non- // directory and vice versa. -func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) (err error) { container.Lock() defer container.Unlock() @@ -279,13 +275,18 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path return ErrRootFSReadOnly } - uid, gid := daemon.GetRemappedUIDGID() - options := &archive.TarOptions{ - NoOverwriteDirNonDir: noOverwriteDirNonDir, - ChownOpts: &archive.TarChownOptions{ - UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? - }, + options := daemon.defaultTarCopyOptions(noOverwriteDirNonDir) + + if copyUIDGID { + var err error + // tarCopyOptions will appropriately pull in the right uid/gid for the + // user/group and will set the options. + options, err = daemon.tarCopyOptions(container, noOverwriteDirNonDir) + if err != nil { + return err + } } + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { return err } @@ -359,131 +360,3 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str daemon.LogContainerEvent(container, "copy") return reader, nil } - -// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container -// specified by a container object. -// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). -// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. -func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error { - srcPath := src.Path() - destExists := true - destDir := false - rootUID, rootGID := daemon.GetRemappedUIDGID() - - // Work in daemon-local OS specific file paths - destPath = filepath.FromSlash(destPath) - - c, err := daemon.GetContainer(cID) - if err != nil { - return err - } - err = daemon.Mount(c) - if err != nil { - return err - } - defer daemon.Unmount(c) - - dest, err := c.GetResourcePath(destPath) - if err != nil { - return err - } - - // Preserve the trailing slash - // TODO: why are we appending another path separator if there was already one? - if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { - destDir = true - dest += string(os.PathSeparator) - } - - destPath = dest - - destStat, err := os.Stat(destPath) - if err != nil { - if !os.IsNotExist(err) { - //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) - return err - } - destExists = false - } - - uidMaps, gidMaps := daemon.GetUIDGIDMaps() - archiver := &archive.Archiver{ - Untar: chrootarchive.Untar, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - - if src.IsDir() { - // copy as directory - if err := archiver.CopyWithTar(srcPath, destPath); err != nil { - return err - } - return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) - } - if decompress && archive.IsArchivePath(srcPath) { - // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) - - // First try to unpack the source as an archive - // to support the untar feature we need to clean up the path a little bit - // because tar is very forgiving. First we need to strip off the archive's - // filename from the path but this is only added if it does not end in slash - tarDest := destPath - if strings.HasSuffix(tarDest, string(os.PathSeparator)) { - tarDest = filepath.Dir(destPath) - } - - // try to successfully untar the orig - err := archiver.UntarPath(srcPath, tarDest) - /* - if err != nil { - logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) - } - */ - return err - } - - // only needed for fixPermissions, but might as well put it before CopyFileWithTar - if destDir || (destExists && destStat.IsDir()) { - destPath = filepath.Join(destPath, src.Name()) - } - - if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { - return err - } - if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil { - return err - } - - return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) -} - -// MountImage returns mounted path with rootfs of an image. -func (daemon *Daemon) MountImage(name string) (string, func() error, error) { - img, err := daemon.GetImage(name) - if err != nil { - return "", nil, errors.Wrapf(err, "no such image: %s", name) - } - - mountID := stringid.GenerateRandomID() - rwLayer, err := daemon.layerStore.CreateRWLayer(mountID, img.RootFS.ChainID(), nil) - if err != nil { - return "", nil, errors.Wrap(err, "failed to create rwlayer") - } - - mountPath, err := rwLayer.Mount("") - if err != nil { - metadata, releaseErr := daemon.layerStore.ReleaseRWLayer(rwLayer) - if releaseErr != nil { - err = errors.Wrapf(err, "failed to release rwlayer: %s", releaseErr.Error()) - } - layer.LogReleaseMetadata(metadata) - return "", nil, errors.Wrap(err, "failed to mount rwlayer") - } - - return mountPath, func() error { - rwLayer.Unmount() - metadata, err := daemon.layerStore.ReleaseRWLayer(rwLayer) - layer.LogReleaseMetadata(metadata) - return err - }, nil -} diff --git a/fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go b/fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go new file mode 100644 index 000000000..fe7722fdb --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go @@ -0,0 +1,15 @@ +package daemon + +import ( + "github.com/docker/docker/pkg/archive" +) + +// defaultTarCopyOptions is the setting that is used when unpacking an archive +// for a copy API event. +func (daemon *Daemon) defaultTarCopyOptions(noOverwriteDirNonDir bool) *archive.TarOptions { + return &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + UIDMaps: daemon.idMappings.UIDs(), + GIDMaps: daemon.idMappings.GIDs(), + } +} diff --git a/fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go b/fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go new file mode 100644 index 000000000..83e6fd9e1 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go @@ -0,0 +1,25 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { + if container.Config.User == "" { + return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil + } + + user, err := idtools.LookupUser(container.Config.User) + if err != nil { + return nil, err + } + + return &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &idtools.IDPair{UID: user.Uid, GID: user.Gid}, + }, nil +} diff --git a/fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go b/fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go new file mode 100644 index 000000000..535efd222 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" +) + +func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { + return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil +} diff --git a/fn/vendor/github.com/docker/docker/daemon/archive_unix.go b/fn/vendor/github.com/docker/docker/daemon/archive_unix.go index 8806e2e19..d5dfad78c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/archive_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/archive_unix.go @@ -3,9 +3,6 @@ package daemon import ( - "os" - "path/filepath" - "github.com/docker/docker/container" ) @@ -25,38 +22,6 @@ func checkIfPathIsInAVolume(container *container.Container, absPath string) (boo return toVolume, nil } -func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { - // If the destination didn't already exist, or the destination isn't a - // directory, then we should Lchown the destination. Otherwise, we shouldn't - // Lchown the destination. - destStat, err := os.Stat(destination) - if err != nil { - // This should *never* be reached, because the destination must've already - // been created while untar-ing the context. - return err - } - doChownDestination := !destExisted || !destStat.IsDir() - - // We Walk on the source rather than on the destination because we don't - // want to change permissions on things we haven't created or modified. - return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { - // Do not alter the walk root iff. it existed before, as it doesn't fall under - // the domain of "things we should chown". - if !doChownDestination && (source == fullpath) { - return nil - } - - // Path is prefixed by source: substitute with destination instead. - cleaned, err := filepath.Rel(source, fullpath) - if err != nil { - return err - } - - fullpath = filepath.Join(destination, cleaned) - return os.Lchown(fullpath, uid, gid) - }) -} - // isOnlineFSOperationPermitted returns an error if an online filesystem operation // is not permitted. func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { diff --git a/fn/vendor/github.com/docker/docker/daemon/archive_windows.go b/fn/vendor/github.com/docker/docker/daemon/archive_windows.go index 1c5a894f7..ab105607d 100644 --- a/fn/vendor/github.com/docker/docker/daemon/archive_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/archive_windows.go @@ -17,11 +17,6 @@ func checkIfPathIsInAVolume(container *container.Container, absPath string) (boo return false, nil } -func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { - // chown is not supported on Windows - return nil -} - // isOnlineFSOperationPermitted returns an error if an online filesystem operation // is not permitted (such as stat or for copying). Running Hyper-V containers // cannot have their file-system interrogated from the host as the filter is diff --git a/fn/vendor/github.com/docker/docker/daemon/attach.go b/fn/vendor/github.com/docker/docker/daemon/attach.go index fb213132f..32410393a 100644 --- a/fn/vendor/github.com/docker/docker/daemon/attach.go +++ b/fn/vendor/github.com/docker/docker/daemon/attach.go @@ -1,9 +1,9 @@ package daemon import ( + "context" "fmt" "io" - "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/api/errors" @@ -31,7 +31,11 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA return err } if container.IsPaused() { - err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) + err := fmt.Errorf("Container %s is paused, unpause the container before attach.", prefixOrName) + return errors.NewRequestConflictError(err) + } + if container.IsRestarting() { + err := fmt.Errorf("Container %s is restarting, wait until the container is running.", prefixOrName) return errors.NewRequestConflictError(err) } @@ -73,7 +77,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA } // ContainerAttachRaw attaches the provided streams to the container's stdio -func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool) error { +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error { container, err := daemon.GetContainer(prefixOrName) if err != nil { return err @@ -86,6 +90,7 @@ func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadClose CloseStdin: container.Config.StdinOnce, } container.StreamConfig.AttachStreams(&cfg) + close(attached) if cfg.UseStdin { cfg.Stdin = stdin } @@ -101,15 +106,23 @@ func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadClose func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error { if logs { - logDriver, err := daemon.getLogger(c) + logDriver, logCreated, err := daemon.getLogger(c) if err != nil { return err } + if logCreated { + defer func() { + if err = logDriver.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + }() + } cLog, ok := logDriver.(logger.LogReader) if !ok { return logger.ErrReadLogsNotSupported } logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + defer logs.Close() LogLoop: for { @@ -151,21 +164,18 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach cfg.Stdin = nil } - waitChan := make(chan struct{}) if c.Config.StdinOnce && !c.Config.Tty { + // Wait for the container to stop before returning. + waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning) defer func() { - <-waitChan - }() - go func() { - c.WaitStop(-1 * time.Second) - close(waitChan) + _ = <-waitChan // Ignore returned exit code. }() } ctx := c.InitAttachContext() err := <-c.StreamConfig.CopyStreams(ctx, cfg) if err != nil { - if _, ok := err.(stream.DetachError); ok { + if _, ok := err.(term.EscapeError); ok { daemon.LogContainerEvent(c, "detach") } else { logrus.Errorf("attach failed with error: %v", err) diff --git a/fn/vendor/github.com/docker/docker/daemon/build.go b/fn/vendor/github.com/docker/docker/daemon/build.go new file mode 100644 index 000000000..9b518d64f --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/build.go @@ -0,0 +1,196 @@ +package daemon + +import ( + "io" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type releaseableLayer struct { + released bool + layerStore layer.Store + roLayer layer.Layer + rwLayer layer.RWLayer +} + +func (rl *releaseableLayer) Mount() (string, error) { + var err error + var chainID layer.ChainID + if rl.roLayer != nil { + chainID = rl.roLayer.ChainID() + } + + mountID := stringid.GenerateRandomID() + rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, nil) + if err != nil { + return "", errors.Wrap(err, "failed to create rwlayer") + } + + return rl.rwLayer.Mount("") +} + +func (rl *releaseableLayer) Commit(platform string) (builder.ReleaseableLayer, error) { + var chainID layer.ChainID + if rl.roLayer != nil { + chainID = rl.roLayer.ChainID() + } + + stream, err := rl.rwLayer.TarStream() + if err != nil { + return nil, err + } + + newLayer, err := rl.layerStore.Register(stream, chainID, layer.Platform(platform)) + if err != nil { + return nil, err + } + + if layer.IsEmpty(newLayer.DiffID()) { + _, err := rl.layerStore.Release(newLayer) + return &releaseableLayer{layerStore: rl.layerStore}, err + } + return &releaseableLayer{layerStore: rl.layerStore, roLayer: newLayer}, nil +} + +func (rl *releaseableLayer) DiffID() layer.DiffID { + if rl.roLayer == nil { + return layer.DigestSHA256EmptyTar + } + return rl.roLayer.DiffID() +} + +func (rl *releaseableLayer) Release() error { + if rl.released { + return nil + } + rl.released = true + rl.releaseRWLayer() + return rl.releaseROLayer() +} + +func (rl *releaseableLayer) releaseRWLayer() error { + if rl.rwLayer == nil { + return nil + } + metadata, err := rl.layerStore.ReleaseRWLayer(rl.rwLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + logrus.Errorf("Failed to release RWLayer: %s", err) + } + return err +} + +func (rl *releaseableLayer) releaseROLayer() error { + if rl.roLayer == nil { + return nil + } + metadata, err := rl.layerStore.Release(rl.roLayer) + layer.LogReleaseMetadata(metadata) + return err +} + +func newReleasableLayerForImage(img *image.Image, layerStore layer.Store) (builder.ReleaseableLayer, error) { + if img == nil || img.RootFS.ChainID() == "" { + return &releaseableLayer{layerStore: layerStore}, nil + } + // Hold a reference to the image layer so that it can't be removed before + // it is released + roLayer, err := layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) + } + return &releaseableLayer{layerStore: layerStore, roLayer: roLayer}, nil +} + +// TODO: could this use the regular daemon PullImage ? +func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, platform string) (*image.Image, error) { + ref, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config, use it + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig(authConfigs, repoInfo.Index) + pullRegistryAuth = &resolvedConfig + } + + if err := daemon.pullImageWithReference(ctx, ref, platform, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return daemon.GetImage(name) +} + +// GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID. +// Every call to GetImageAndReleasableLayer MUST call releasableLayer.Release() to prevent +// leaking of layers. +func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) { + if refOrID == "" { + layer, err := newReleasableLayerForImage(nil, daemon.stores[opts.Platform].layerStore) + return nil, layer, err + } + + if opts.PullOption != backend.PullOptionForcePull { + image, err := daemon.GetImage(refOrID) + if err != nil && opts.PullOption == backend.PullOptionNoPull { + return nil, nil, err + } + // TODO: shouldn't we error out if error is different from "not found" ? + if image != nil { + layer, err := newReleasableLayerForImage(image, daemon.stores[opts.Platform].layerStore) + return image, layer, err + } + } + + image, err := daemon.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.Platform) + if err != nil { + return nil, nil, err + } + layer, err := newReleasableLayerForImage(image, daemon.stores[opts.Platform].layerStore) + return image, layer, err +} + +// CreateImage creates a new image by adding a config and ID to the image store. +// This is similar to LoadImage() except that it receives JSON encoded bytes of +// an image instead of a tar archive. +func (daemon *Daemon) CreateImage(config []byte, parent string, platform string) (builder.Image, error) { + if platform == "" { + platform = runtime.GOOS + } + id, err := daemon.stores[platform].imageStore.Create(config) + if err != nil { + return nil, errors.Wrapf(err, "failed to create image") + } + + if parent != "" { + if err := daemon.stores[platform].imageStore.SetParent(id, image.ID(parent)); err != nil { + return nil, errors.Wrapf(err, "failed to set parent %s", parent) + } + } + + return daemon.stores[platform].imageStore.Get(id) +} + +// IDMappings returns uid/gid mappings for the builder +func (daemon *Daemon) IDMappings() *idtools.IDMappings { + return daemon.idMappings +} diff --git a/fn/vendor/github.com/docker/docker/daemon/cache.go b/fn/vendor/github.com/docker/docker/daemon/cache.go index 8e3d20775..219b0b38d 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cache.go +++ b/fn/vendor/github.com/docker/docker/daemon/cache.go @@ -7,12 +7,12 @@ import ( ) // MakeImageCache creates a stateful image cache. -func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache { +func (daemon *Daemon) MakeImageCache(sourceRefs []string, platform string) builder.ImageCache { if len(sourceRefs) == 0 { - return cache.NewLocal(daemon.imageStore) + return cache.NewLocal(daemon.stores[platform].imageStore) } - cache := cache.New(daemon.imageStore) + cache := cache.New(daemon.stores[platform].imageStore) for _, ref := range sourceRefs { img, err := daemon.GetImage(ref) diff --git a/fn/vendor/github.com/docker/docker/daemon/checkpoint.go b/fn/vendor/github.com/docker/docker/daemon/checkpoint.go index a4136d2c3..d3028f1e2 100644 --- a/fn/vendor/github.com/docker/docker/daemon/checkpoint.go +++ b/fn/vendor/github.com/docker/docker/daemon/checkpoint.go @@ -67,7 +67,6 @@ func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreat } checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), true) - if err != nil { return fmt.Errorf("cannot checkpoint container %s: %s", name, err) } @@ -104,7 +103,10 @@ func (daemon *Daemon) CheckpointList(name string, config types.CheckpointListOpt return nil, err } - checkpointDir, err := getCheckpointDir(config.CheckpointDir, "", name, container.ID, container.CheckpointDir(), true) + checkpointDir, err := getCheckpointDir(config.CheckpointDir, "", name, container.ID, container.CheckpointDir(), false) + if err != nil { + return nil, err + } if err := os.MkdirAll(checkpointDir, 0755); err != nil { return nil, err diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster.go b/fn/vendor/github.com/docker/docker/daemon/cluster.go index b7970edbb..d22970bcd 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster.go @@ -2,12 +2,14 @@ package daemon import ( apitypes "github.com/docker/docker/api/types" + lncluster "github.com/docker/libnetwork/cluster" ) // Cluster is the interface for github.com/docker/docker/daemon/cluster.(*Cluster). type Cluster interface { ClusterStatus NetworkManager + SendClusterEvent(event lncluster.ConfigEventType) } // ClusterStatus interface provides information about the Swarm status of the Cluster diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/cluster.go b/fn/vendor/github.com/docker/docker/daemon/cluster/cluster.go index fec07dc55..57fc4d2d6 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/cluster.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/cluster.go @@ -49,8 +49,10 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types/network" types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/controllers/plugin" executorpkg "github.com/docker/docker/daemon/cluster/executor" "github.com/docker/docker/pkg/signal" + lncluster "github.com/docker/libnetwork/cluster" swarmapi "github.com/docker/swarmkit/api" swarmnode "github.com/docker/swarmkit/node" "github.com/pkg/errors" @@ -96,6 +98,7 @@ type Config struct { Root string Name string Backend executorpkg.Backend + PluginBackend plugin.Backend NetworkSubnetsProvider NetworkSubnetsProvider // DefaultAdvertiseAddr is the default host/IP or network interface to use @@ -104,6 +107,9 @@ type Config struct { // path to store runtime state, such as the swarm control socket RuntimeRoot string + + // WatchStream is a channel to pass watch API notifications to daemon + WatchStream chan *swarmapi.WatchMessage } // Cluster provides capabilities to participate in a cluster as a worker or a @@ -115,8 +121,9 @@ type Cluster struct { root string runtimeRoot string config Config - configEvent chan struct{} // todo: make this array and goroutine safe + configEvent chan lncluster.ConfigEventType // todo: make this array and goroutine safe attachers map[string]*attacher + watchStream chan *swarmapi.WatchMessage } // attacher manages the in-memory attachment state of a container @@ -147,22 +154,31 @@ func New(config Config) (*Cluster, error) { c := &Cluster{ root: root, config: config, - configEvent: make(chan struct{}, 10), + configEvent: make(chan lncluster.ConfigEventType, 10), runtimeRoot: config.RuntimeRoot, attachers: make(map[string]*attacher), + watchStream: config.WatchStream, } + return c, nil +} + +// Start the Cluster instance +// TODO The split between New and Start can be join again when the SendClusterEvent +// method is no longer required +func (c *Cluster) Start() error { + root := filepath.Join(c.config.Root, swarmDirName) nodeConfig, err := loadPersistentState(root) if err != nil { if os.IsNotExist(err) { - return c, nil + return nil } - return nil, err + return err } nr, err := c.newNodeRunner(*nodeConfig) if err != nil { - return nil, err + return err } c.nr = nr @@ -172,10 +188,10 @@ func New(config Config) (*Cluster, error) { case err := <-nr.Ready(): if err != nil { logrus.WithError(err).Error("swarm component could not be started") - return c, nil + return nil } } - return c, nil + return nil } func (c *Cluster) newNodeRunner(conf nodeStartConfig) (*nodeRunner, error) { @@ -270,33 +286,45 @@ func (c *Cluster) GetAdvertiseAddress() string { return c.currentNodeState().actualLocalAddr } -// GetRemoteAddress returns a known advertise address of a remote manager if -// available. -// todo: change to array/connect with info -func (c *Cluster) GetRemoteAddress() string { +// GetDataPathAddress returns the address to be used for the data path traffic, if specified. +func (c *Cluster) GetDataPathAddress() string { c.mu.RLock() defer c.mu.RUnlock() - return c.getRemoteAddress() -} - -func (c *Cluster) getRemoteAddress() string { - state := c.currentNodeState() - if state.swarmNode == nil { - return "" - } - nodeID := state.swarmNode.NodeID() - for _, r := range state.swarmNode.Remotes() { - if r.NodeID != nodeID { - return r.Addr - } + if c.nr != nil { + return c.nr.config.DataPathAddr } return "" } +// GetRemoteAddressList returns the advertise address for each of the remote managers if +// available. +func (c *Cluster) GetRemoteAddressList() []string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.getRemoteAddressList() +} + +func (c *Cluster) getRemoteAddressList() []string { + state := c.currentNodeState() + if state.swarmNode == nil { + return []string{} + } + + nodeID := state.swarmNode.NodeID() + remotes := state.swarmNode.Remotes() + addressList := make([]string, 0, len(remotes)) + for _, r := range remotes { + if r.NodeID != nodeID { + addressList = append(addressList, r.Addr) + } + } + return addressList +} + // ListenClusterEvents returns a channel that receives messages on cluster // participation changes. // todo: make cancelable and accessible to multiple callers -func (c *Cluster) ListenClusterEvents() <-chan struct{} { +func (c *Cluster) ListenClusterEvents() <-chan lncluster.ConfigEventType { return c.configEvent } @@ -334,8 +362,9 @@ func (c *Cluster) Cleanup() { c.mu.Unlock() return } - defer c.mu.Unlock() state := c.currentNodeState() + c.mu.Unlock() + if state.IsActiveManager() { active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) if err == nil { @@ -345,11 +374,15 @@ func (c *Cluster) Cleanup() { } } } + if err := node.Stop(); err != nil { logrus.Errorf("failed to shut down cluster node: %v", err) signal.DumpStacks("") } + + c.mu.Lock() c.nr = nil + c.mu.Unlock() } func managerStats(client swarmapi.ControlClient, currentNodeID string) (current bool, reachable int, unreachable int, err error) { @@ -396,3 +429,13 @@ func (c *Cluster) lockedManagerAction(fn func(ctx context.Context, state nodeSta return fn(ctx, state) } + +// SendClusterEvent allows to send cluster events on the configEvent channel +// TODO This method should not be exposed. +// Currently it is used to notify the network controller that the keys are +// available +func (c *Cluster) SendClusterEvent(event lncluster.ConfigEventType) { + c.mu.RLock() + defer c.mu.RUnlock() + c.configEvent <- event +} diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/configs.go b/fn/vendor/github.com/docker/docker/daemon/cluster/configs.go new file mode 100644 index 000000000..3d418c140 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/configs.go @@ -0,0 +1,117 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetConfig returns a config from a managed swarm cluster +func (c *Cluster) GetConfig(input string) (types.Config, error) { + var config *swarmapi.Config + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + config = s + return nil + }); err != nil { + return types.Config{}, err + } + return convert.ConfigFromGRPC(config), nil +} + +// GetConfigs returns all configs of a managed swarm cluster. +func (c *Cluster) GetConfigs(options apitypes.ConfigListOptions) ([]types.Config, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListConfigsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListConfigs(ctx, + &swarmapi.ListConfigsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + configs := []types.Config{} + + for _, config := range r.Configs { + configs = append(configs, convert.ConfigFromGRPC(config)) + } + + return configs, nil +} + +// CreateConfig creates a new config in a managed swarm cluster. +func (c *Cluster) CreateConfig(s types.ConfigSpec) (string, error) { + var resp *swarmapi.CreateConfigResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + configSpec := convert.ConfigSpecToGRPC(s) + + r, err := state.controlClient.CreateConfig(ctx, + &swarmapi.CreateConfigRequest{Spec: &configSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + return resp.Config.ID, nil +} + +// RemoveConfig removes a config from a managed swarm cluster. +func (c *Cluster) RemoveConfig(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + config, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + + req := &swarmapi.RemoveConfigRequest{ + ConfigID: config.ID, + } + + _, err = state.controlClient.RemoveConfig(ctx, req) + return err + }) +} + +// UpdateConfig updates a config in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateConfig(input string, version uint64, spec types.ConfigSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + config, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + + configSpec := convert.ConfigSpecToGRPC(spec) + + _, err = state.controlClient.UpdateConfig(ctx, + &swarmapi.UpdateConfigRequest{ + ConfigID: config.ID, + ConfigVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &configSpec, + }) + return err + }) +} diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go b/fn/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go new file mode 100644 index 000000000..e72edcdd7 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go @@ -0,0 +1,261 @@ +package plugin + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm/runtime" + "github.com/docker/docker/plugin" + "github.com/docker/docker/plugin/v2" + "github.com/docker/swarmkit/api" + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Controller is the controller for the plugin backend. +// Plugins are managed as a singleton object with a desired state (different from containers). +// With the the plugin controller instead of having a strict create->start->stop->remove +// task lifecycle like containers, we manage the desired state of the plugin and let +// the plugin manager do what it already does and monitor the plugin. +// We'll also end up with many tasks all pointing to the same plugin ID. +// +// TODO(@cpuguy83): registry auth is intentionally not supported until we work out +// the right way to pass registry crednetials via secrets. +type Controller struct { + backend Backend + spec runtime.PluginSpec + logger *logrus.Entry + + pluginID string + serviceID string + taskID string + + // hook used to signal tests that `Wait()` is actually ready and waiting + signalWaitReady func() +} + +// Backend is the interface for interacting with the plugin manager +// Controller actions are passed to the configured backend to do the real work. +type Backend interface { + Disable(name string, config *enginetypes.PluginDisableConfig) error + Enable(name string, config *enginetypes.PluginEnableConfig) error + Remove(name string, config *enginetypes.PluginRmConfig) error + Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error + Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + Get(name string) (*v2.Plugin, error) + SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func()) +} + +// NewController returns a new cluster plugin controller +func NewController(backend Backend, t *api.Task) (*Controller, error) { + spec, err := readSpec(t) + if err != nil { + return nil, err + } + return &Controller{ + backend: backend, + spec: spec, + serviceID: t.ServiceID, + logger: logrus.WithFields(logrus.Fields{ + "controller": "plugin", + "task": t.ID, + "plugin": spec.Name, + })}, nil +} + +func readSpec(t *api.Task) (runtime.PluginSpec, error) { + var cfg runtime.PluginSpec + + generic := t.Spec.GetGeneric() + if err := proto.Unmarshal(generic.Payload.Value, &cfg); err != nil { + return cfg, errors.Wrap(err, "error reading plugin spec") + } + return cfg, nil +} + +// Update is the update phase from swarmkit +func (p *Controller) Update(ctx context.Context, t *api.Task) error { + p.logger.Debug("Update") + return nil +} + +// Prepare is the prepare phase from swarmkit +func (p *Controller) Prepare(ctx context.Context) (err error) { + p.logger.Debug("Prepare") + + remote, err := reference.ParseNormalizedNamed(p.spec.Remote) + if err != nil { + return errors.Wrapf(err, "error parsing remote reference %q", p.spec.Remote) + } + + if p.spec.Name == "" { + p.spec.Name = remote.String() + } + + var authConfig enginetypes.AuthConfig + privs := convertPrivileges(p.spec.Privileges) + + pl, err := p.backend.Get(p.spec.Name) + + defer func() { + if pl != nil && err == nil { + pl.Acquire() + } + }() + + if err == nil && pl != nil { + if pl.SwarmServiceID != p.serviceID { + return errors.Errorf("plugin already exists: %s", p.spec.Name) + } + if pl.IsEnabled() { + if err := p.backend.Disable(pl.GetID(), &enginetypes.PluginDisableConfig{ForceDisable: true}); err != nil { + p.logger.WithError(err).Debug("could not disable plugin before running upgrade") + } + } + p.pluginID = pl.GetID() + return p.backend.Upgrade(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard) + } + + if err := p.backend.Pull(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard, plugin.WithSwarmService(p.serviceID)); err != nil { + return err + } + pl, err = p.backend.Get(p.spec.Name) + if err != nil { + return err + } + p.pluginID = pl.GetID() + + return nil +} + +// Start is the start phase from swarmkit +func (p *Controller) Start(ctx context.Context) error { + p.logger.Debug("Start") + + pl, err := p.backend.Get(p.pluginID) + if err != nil { + return err + } + + if p.spec.Disabled { + if pl.IsEnabled() { + return p.backend.Disable(p.pluginID, &enginetypes.PluginDisableConfig{ForceDisable: false}) + } + return nil + } + if !pl.IsEnabled() { + return p.backend.Enable(p.pluginID, &enginetypes.PluginEnableConfig{Timeout: 30}) + } + return nil +} + +// Wait causes the task to wait until returned +func (p *Controller) Wait(ctx context.Context) error { + p.logger.Debug("Wait") + + pl, err := p.backend.Get(p.pluginID) + if err != nil { + return err + } + + events, cancel := p.backend.SubscribeEvents(1, plugin.EventDisable{Plugin: pl.PluginObj}, plugin.EventRemove{Plugin: pl.PluginObj}, plugin.EventEnable{Plugin: pl.PluginObj}) + defer cancel() + + if p.signalWaitReady != nil { + p.signalWaitReady() + } + + if !p.spec.Disabled != pl.IsEnabled() { + return errors.New("mismatched plugin state") + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case e := <-events: + p.logger.Debugf("got event %#T", e) + + switch e.(type) { + case plugin.EventEnable: + if p.spec.Disabled { + return errors.New("plugin enabled") + } + case plugin.EventRemove: + return errors.New("plugin removed") + case plugin.EventDisable: + if !p.spec.Disabled { + return errors.New("plugin disabled") + } + } + } + } +} + +func isNotFound(err error) bool { + _, ok := errors.Cause(err).(plugin.ErrNotFound) + return ok +} + +// Shutdown is the shutdown phase from swarmkit +func (p *Controller) Shutdown(ctx context.Context) error { + p.logger.Debug("Shutdown") + return nil +} + +// Terminate is the terminate phase from swarmkit +func (p *Controller) Terminate(ctx context.Context) error { + p.logger.Debug("Terminate") + return nil +} + +// Remove is the remove phase from swarmkit +func (p *Controller) Remove(ctx context.Context) error { + p.logger.Debug("Remove") + + pl, err := p.backend.Get(p.pluginID) + if err != nil { + if isNotFound(err) { + return nil + } + return err + } + + pl.Release() + if pl.GetRefCount() > 0 { + p.logger.Debug("skipping remove due to ref count") + return nil + } + + // This may error because we have exactly 1 plugin, but potentially multiple + // tasks which are calling remove. + err = p.backend.Remove(p.pluginID, &enginetypes.PluginRmConfig{ForceRemove: true}) + if isNotFound(err) { + return nil + } + return err +} + +// Close is the close phase from swarmkit +func (p *Controller) Close() error { + p.logger.Debug("Close") + return nil +} + +func convertPrivileges(ls []*runtime.PluginPrivilege) enginetypes.PluginPrivileges { + var out enginetypes.PluginPrivileges + for _, p := range ls { + pp := enginetypes.PluginPrivilege{ + Name: p.Name, + Description: p.Description, + Value: p.Value, + } + out = append(out, pp) + } + return out +} diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller_test.go b/fn/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller_test.go new file mode 100644 index 000000000..17b77cc89 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller_test.go @@ -0,0 +1,390 @@ +package plugin + +import ( + "errors" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm/runtime" + "github.com/docker/docker/pkg/pubsub" + "github.com/docker/docker/plugin" + "github.com/docker/docker/plugin/v2" + "golang.org/x/net/context" +) + +const ( + pluginTestName = "test" + pluginTestRemote = "testremote" + pluginTestRemoteUpgrade = "testremote2" +) + +func TestPrepare(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + + if b.p == nil { + t.Fatal("pull not performed") + } + + c = newTestController(b, false) + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if b.p == nil { + t.Fatal("unexpected nil") + } + if b.p.PluginObj.PluginReference != pluginTestRemoteUpgrade { + t.Fatal("upgrade not performed") + } + + c = newTestController(b, false) + c.serviceID = "1" + if err := c.Prepare(ctx); err == nil { + t.Fatal("expected error on prepare") + } +} + +func TestStart(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + if !b.p.IsEnabled() { + t.Fatal("expected plugin to be enabled") + } + + c = newTestController(b, true) + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + if b.p.IsEnabled() { + t.Fatal("expected plugin to be disabled") + } + + c = newTestController(b, false) + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + if !b.p.IsEnabled() { + t.Fatal("expected plugin to be enabled") + } +} + +func TestWaitCancel(t *testing.T) { + b := newMockBackend() + c := newTestController(b, true) + ctx := context.Background() + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + ctxCancel, cancel := context.WithCancel(ctx) + chErr := make(chan error) + go func() { + chErr <- c.Wait(ctxCancel) + }() + cancel() + select { + case err := <-chErr: + if err != context.Canceled { + t.Fatal(err) + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for cancelation") + } +} + +func TestWaitDisabled(t *testing.T) { + b := newMockBackend() + c := newTestController(b, true) + ctx := context.Background() + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + chErr := make(chan error) + go func() { + chErr <- c.Wait(ctx) + }() + + if err := b.Enable("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + ctxWaitReady, cancelCtxWaitReady := context.WithTimeout(ctx, 30*time.Second) + c.signalWaitReady = cancelCtxWaitReady + defer cancelCtxWaitReady() + + go func() { + chErr <- c.Wait(ctx) + }() + + chEvent, cancel := b.SubscribeEvents(1) + defer cancel() + + if err := b.Disable("test", nil); err != nil { + t.Fatal(err) + } + + select { + case <-chEvent: + <-ctxWaitReady.Done() + if err := ctxWaitReady.Err(); err == context.DeadlineExceeded { + t.Fatal(err) + } + select { + case <-chErr: + t.Fatal("wait returned unexpectedly") + default: + // all good + } + case <-chErr: + t.Fatal("wait returned unexpectedly") + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := b.Remove("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "removed") { + t.Fatal(err) + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } +} + +func TestWaitEnabled(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + chErr := make(chan error) + go func() { + chErr <- c.Wait(ctx) + }() + + if err := b.Disable("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + ctxWaitReady, ctxWaitCancel := context.WithCancel(ctx) + c.signalWaitReady = ctxWaitCancel + defer ctxWaitCancel() + + go func() { + chErr <- c.Wait(ctx) + }() + + chEvent, cancel := b.SubscribeEvents(1) + defer cancel() + + if err := b.Enable("test", nil); err != nil { + t.Fatal(err) + } + + select { + case <-chEvent: + <-ctxWaitReady.Done() + if err := ctxWaitReady.Err(); err == context.DeadlineExceeded { + t.Fatal(err) + } + select { + case <-chErr: + t.Fatal("wait returned unexpectedly") + default: + // all good + } + case <-chErr: + t.Fatal("wait returned unexpectedly") + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := b.Remove("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "removed") { + t.Fatal(err) + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } +} + +func TestRemove(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Shutdown(ctx); err != nil { + t.Fatal(err) + } + + c2 := newTestController(b, false) + if err := c2.Prepare(ctx); err != nil { + t.Fatal(err) + } + + if err := c.Remove(ctx); err != nil { + t.Fatal(err) + } + if b.p == nil { + t.Fatal("plugin removed unexpectedly") + } + if err := c2.Shutdown(ctx); err != nil { + t.Fatal(err) + } + if err := c2.Remove(ctx); err != nil { + t.Fatal(err) + } + if b.p != nil { + t.Fatal("expected plugin to be removed") + } +} + +func newTestController(b Backend, disabled bool) *Controller { + return &Controller{ + logger: &logrus.Entry{Logger: &logrus.Logger{Out: ioutil.Discard}}, + backend: b, + spec: runtime.PluginSpec{ + Name: pluginTestName, + Remote: pluginTestRemote, + Disabled: disabled, + }, + } +} + +func newMockBackend() *mockBackend { + return &mockBackend{ + pub: pubsub.NewPublisher(0, 0), + } +} + +type mockBackend struct { + p *v2.Plugin + pub *pubsub.Publisher +} + +func (m *mockBackend) Disable(name string, config *enginetypes.PluginDisableConfig) error { + m.p.PluginObj.Enabled = false + m.pub.Publish(plugin.EventDisable{}) + return nil +} + +func (m *mockBackend) Enable(name string, config *enginetypes.PluginEnableConfig) error { + m.p.PluginObj.Enabled = true + m.pub.Publish(plugin.EventEnable{}) + return nil +} + +func (m *mockBackend) Remove(name string, config *enginetypes.PluginRmConfig) error { + m.p = nil + m.pub.Publish(plugin.EventRemove{}) + return nil +} + +func (m *mockBackend) Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error { + m.p = &v2.Plugin{ + PluginObj: enginetypes.Plugin{ + ID: "1234", + Name: name, + PluginReference: ref.String(), + }, + } + return nil +} + +func (m *mockBackend) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error { + m.p.PluginObj.PluginReference = pluginTestRemoteUpgrade + return nil +} + +func (m *mockBackend) Get(name string) (*v2.Plugin, error) { + if m.p == nil { + return nil, errors.New("not found") + } + return m.p, nil +} + +func (m *mockBackend) SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func()) { + ch := m.pub.SubscribeTopicWithBuffer(nil, buffer) + cancel = func() { m.pub.Evict(ch) } + return ch, cancel +} diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/config.go b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/config.go new file mode 100644 index 000000000..6b28712ff --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/config.go @@ -0,0 +1,61 @@ +package convert + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// ConfigFromGRPC converts a grpc Config to a Config. +func ConfigFromGRPC(s *swarmapi.Config) swarmtypes.Config { + config := swarmtypes.Config{ + ID: s.ID, + Spec: swarmtypes.ConfigSpec{ + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + }, + } + + config.Version.Index = s.Meta.Version.Index + // Meta + config.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + config.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + return config +} + +// ConfigSpecToGRPC converts Config to a grpc Config. +func ConfigSpecToGRPC(s swarmtypes.ConfigSpec) swarmapi.ConfigSpec { + return swarmapi.ConfigSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } +} + +// ConfigReferencesFromGRPC converts a slice of grpc ConfigReference to ConfigReference +func ConfigReferencesFromGRPC(s []*swarmapi.ConfigReference) []*swarmtypes.ConfigReference { + refs := []*swarmtypes.ConfigReference{} + + for _, r := range s { + ref := &swarmtypes.ConfigReference{ + ConfigID: r.ConfigID, + ConfigName: r.ConfigName, + } + + if t, ok := r.Target.(*swarmapi.ConfigReference_File); ok { + ref.File = &swarmtypes.ConfigReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/container.go b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/container.go index 99753c8d7..6ac6f331f 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/container.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/container.go @@ -13,8 +13,11 @@ import ( gogotypes "github.com/gogo/protobuf/types" ) -func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { - containerSpec := types.ContainerSpec{ +func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec { + if c == nil { + return nil + } + containerSpec := &types.ContainerSpec{ Image: c.Image, Labels: c.Labels, Command: c.Command, @@ -30,6 +33,7 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { ReadOnly: c.ReadOnly, Hosts: c.Hosts, Secrets: secretReferencesFromGRPC(c.Secrets), + Configs: configReferencesFromGRPC(c.Configs), } if c.DNSConfig != nil { @@ -137,6 +141,7 @@ func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretRefer return refs } + func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference { refs := make([]*types.SecretReference, 0, len(sr)) for _, s := range sr { @@ -161,7 +166,55 @@ func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretRef return refs } -func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { +func configReferencesToGRPC(sr []*types.ConfigReference) []*swarmapi.ConfigReference { + refs := make([]*swarmapi.ConfigReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.ConfigReference{ + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + } + if s.File != nil { + ref.Target = &swarmapi.ConfigReference_File{ + File: &swarmapi.FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} + +func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigReference { + refs := make([]*types.ConfigReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("config target not a file: config=%s", s.ConfigID) + continue + } + refs = append(refs, &types.ConfigReference{ + File: &types.ConfigReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + }) + } + + return refs +} + +func containerToGRPC(c *types.ContainerSpec) (*swarmapi.ContainerSpec, error) { containerSpec := &swarmapi.ContainerSpec{ Image: c.Image, Labels: c.Labels, @@ -178,6 +231,7 @@ func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { ReadOnly: c.ReadOnly, Hosts: c.Hosts, Secrets: secretReferencesToGRPC(c.Secrets), + Configs: configReferencesToGRPC(c.Configs), } if c.DNSConfig != nil { diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/network.go b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/network.go index 6e28b172f..6f8b7938c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/network.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/network.go @@ -6,6 +6,7 @@ import ( basictypes "github.com/docker/docker/api/types" networktypes "github.com/docker/docker/api/types/network" types "github.com/docker/docker/api/types/swarm" + netconst "github.com/docker/libnetwork/datastore" swarmapi "github.com/docker/swarmkit/api" gogotypes "github.com/gogo/protobuf/types" ) @@ -28,12 +29,19 @@ func networkFromGRPC(n *swarmapi.Network) types.Network { IPv6Enabled: n.Spec.Ipv6Enabled, Internal: n.Spec.Internal, Attachable: n.Spec.Attachable, - Ingress: n.Spec.Ingress, + Ingress: IsIngressNetwork(n), IPAMOptions: ipamFromGRPC(n.Spec.IPAM), + Scope: netconst.SwarmScope, }, IPAMOptions: ipamFromGRPC(n.IPAM), } + if n.Spec.GetNetwork() != "" { + network.Spec.ConfigFrom = &networktypes.ConfigReference{ + Network: n.Spec.GetNetwork(), + } + } + // Meta network.Version.Index = n.Meta.Version.Index network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) @@ -152,15 +160,21 @@ func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { nr := basictypes.NetworkResource{ ID: n.ID, Name: n.Spec.Annotations.Name, - Scope: "swarm", + Scope: netconst.SwarmScope, EnableIPv6: spec.Ipv6Enabled, IPAM: ipam, Internal: spec.Internal, Attachable: spec.Attachable, - Ingress: spec.Ingress, + Ingress: IsIngressNetwork(&n), Labels: n.Spec.Annotations.Labels, } + if n.Spec.GetNetwork() != "" { + nr.ConfigFrom = networktypes.ConfigReference{ + Network: n.Spec.GetNetwork(), + } + } + if n.DriverState != nil { nr.Driver = n.DriverState.Name nr.Options = n.DriverState.Options @@ -206,5 +220,20 @@ func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.N } ns.IPAM.Configs = ipamSpec } + if create.ConfigFrom != nil { + ns.ConfigFrom = &swarmapi.NetworkSpec_Network{ + Network: create.ConfigFrom.Network, + } + } return ns } + +// IsIngressNetwork check if the swarm network is an ingress network +func IsIngressNetwork(n *swarmapi.Network) bool { + if n.Spec.Ingress { + return true + } + // Check if legacy defined ingress network + _, ok := n.Spec.Annotations.Labels["com.docker.swarm.internal"] + return ok && n.Spec.Annotations.Name == "ingress" +} diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/node.go b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/node.go index fe6cdfee9..f075783e8 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/node.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/node.go @@ -50,6 +50,11 @@ func NodeFromGRPC(n swarmapi.Node) types.Node { node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) } } + if n.Description.TLSInfo != nil { + node.Description.TLSInfo.TrustRoot = string(n.Description.TLSInfo.TrustRoot) + node.Description.TLSInfo.CertIssuerPublicKey = n.Description.TLSInfo.CertIssuerPublicKey + node.Description.TLSInfo.CertIssuerSubject = n.Description.TLSInfo.CertIssuerSubject + } } //Manager diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/service.go b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/service.go index 98ea22635..947debdf5 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/service.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/service.go @@ -5,17 +5,33 @@ import ( "strings" types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/swarm/runtime" "github.com/docker/docker/pkg/namesgenerator" swarmapi "github.com/docker/swarmkit/api" + "github.com/gogo/protobuf/proto" gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +var ( + // ErrUnsupportedRuntime returns an error if the runtime is not supported by the daemon + ErrUnsupportedRuntime = errors.New("unsupported runtime") ) // ServiceFromGRPC converts a grpc Service to a Service. -func ServiceFromGRPC(s swarmapi.Service) types.Service { +func ServiceFromGRPC(s swarmapi.Service) (types.Service, error) { + curSpec, err := serviceSpecFromGRPC(&s.Spec) + if err != nil { + return types.Service{}, err + } + prevSpec, err := serviceSpecFromGRPC(s.PreviousSpec) + if err != nil { + return types.Service{}, err + } service := types.Service{ ID: s.ID, - Spec: *serviceSpecFromGRPC(&s.Spec), - PreviousSpec: serviceSpecFromGRPC(s.PreviousSpec), + Spec: *curSpec, + PreviousSpec: prevSpec, Endpoint: endpointFromGRPC(s.Endpoint), } @@ -44,34 +60,58 @@ func ServiceFromGRPC(s swarmapi.Service) types.Service { } startedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.StartedAt) - if !startedAt.IsZero() { + if !startedAt.IsZero() && startedAt.Unix() != 0 { service.UpdateStatus.StartedAt = &startedAt } completedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.CompletedAt) - if !completedAt.IsZero() { + if !completedAt.IsZero() && completedAt.Unix() != 0 { service.UpdateStatus.CompletedAt = &completedAt } service.UpdateStatus.Message = s.UpdateStatus.Message } - return service + return service, nil } -func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) *types.ServiceSpec { +func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) (*types.ServiceSpec, error) { if spec == nil { - return nil + return nil, nil } serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) for _, n := range spec.Networks { - serviceNetworks = append(serviceNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + serviceNetworks = append(serviceNetworks, netConfig) + + } + + taskTemplate, err := taskSpecFromGRPC(spec.Task) + if err != nil { + return nil, err + } + + switch t := spec.Task.GetRuntime().(type) { + case *swarmapi.TaskSpec_Container: + containerConfig := t.Container + taskTemplate.ContainerSpec = containerSpecFromGRPC(containerConfig) + taskTemplate.Runtime = types.RuntimeContainer + case *swarmapi.TaskSpec_Generic: + switch t.Generic.Kind { + case string(types.RuntimePlugin): + taskTemplate.Runtime = types.RuntimePlugin + default: + return nil, fmt.Errorf("unknown task runtime type: %s", t.Generic.Payload.TypeUrl) + } + + default: + return nil, fmt.Errorf("error creating service; unsupported runtime %T", t) } convertedSpec := &types.ServiceSpec{ Annotations: annotationsFromGRPC(spec.Annotations), - TaskTemplate: taskSpecFromGRPC(spec.Task), + TaskTemplate: taskTemplate, Networks: serviceNetworks, EndpointSpec: endpointSpecFromGRPC(spec.Endpoint), } @@ -90,7 +130,7 @@ func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) *types.ServiceSpec { } } - return convertedSpec + return convertedSpec, nil } // ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. @@ -102,12 +142,15 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks)) for _, n := range s.Networks { - serviceNetworks = append(serviceNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + serviceNetworks = append(serviceNetworks, netConfig) } taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks)) for _, n := range s.TaskTemplate.Networks { - taskNetworks = append(taskNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + taskNetworks = append(taskNetworks, netConfig) + } spec := swarmapi.ServiceSpec{ @@ -124,11 +167,40 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { Networks: serviceNetworks, } - containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) - if err != nil { - return swarmapi.ServiceSpec{}, err + switch s.TaskTemplate.Runtime { + case types.RuntimeContainer, "": // if empty runtime default to container + if s.TaskTemplate.ContainerSpec != nil { + containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} + } + case types.RuntimePlugin: + if s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, errors.New("plugins must not use replicated mode") + } + + s.Mode.Global = &types.GlobalService{} // must always be global + + if s.TaskTemplate.PluginSpec != nil { + pluginSpec, err := proto.Marshal(s.TaskTemplate.PluginSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: string(types.RuntimePlugin), + Payload: &gogotypes.Any{ + TypeUrl: string(types.RuntimeURLPlugin), + Value: pluginSpec, + }, + }, + } + } + default: + return swarmapi.ServiceSpec{}, ErrUnsupportedRuntime } - spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) if err != nil { @@ -149,9 +221,17 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { }) } } + var platforms []*swarmapi.Platform + for _, plat := range s.TaskTemplate.Placement.Platforms { + platforms = append(platforms, &swarmapi.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } spec.Task.Placement = &swarmapi.Placement{ Constraints: s.TaskTemplate.Placement.Constraints, Preferences: preferences, + Platforms: platforms, } } @@ -344,6 +424,13 @@ func placementFromGRPC(p *swarmapi.Placement) *types.Placement { } } + for _, plat := range p.Platforms { + r.Platforms = append(r.Platforms, types.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } + return r } @@ -440,14 +527,14 @@ func updateConfigToGRPC(updateConfig *types.UpdateConfig) (*swarmapi.UpdateConfi return converted, nil } -func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) types.TaskSpec { +func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) (types.TaskSpec, error) { taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(taskSpec.Networks)) for _, n := range taskSpec.Networks { - taskNetworks = append(taskNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + taskNetworks = append(taskNetworks, netConfig) } - return types.TaskSpec{ - ContainerSpec: containerSpecFromGRPC(taskSpec.GetContainer()), + t := types.TaskSpec{ Resources: resourcesFromGRPC(taskSpec.Resources), RestartPolicy: restartPolicyFromGRPC(taskSpec.Restart), Placement: placementFromGRPC(taskSpec.Placement), @@ -455,4 +542,26 @@ func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) types.TaskSpec { Networks: taskNetworks, ForceUpdate: taskSpec.ForceUpdate, } + + switch taskSpec.GetRuntime().(type) { + case *swarmapi.TaskSpec_Container, nil: + c := taskSpec.GetContainer() + if c != nil { + t.ContainerSpec = containerSpecFromGRPC(c) + } + case *swarmapi.TaskSpec_Generic: + g := taskSpec.GetGeneric() + if g != nil { + switch g.Kind { + case string(types.RuntimePlugin): + var p runtime.PluginSpec + if err := proto.Unmarshal(g.Payload.Value, &p); err != nil { + return t, errors.Wrap(err, "error unmarshalling plugin spec") + } + t.PluginSpec = &p + } + } + } + + return t, nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go new file mode 100644 index 000000000..1b6598974 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go @@ -0,0 +1,150 @@ +package convert + +import ( + "testing" + + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/swarm/runtime" + swarmapi "github.com/docker/swarmkit/api" + google_protobuf3 "github.com/gogo/protobuf/types" +) + +func TestServiceConvertFromGRPCRuntimeContainer(t *testing.T) { + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Container{ + Container: &swarmapi.ContainerSpec{ + Image: "alpine:latest", + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimeContainer { + t.Fatalf("expected type %s; received %T", swarmtypes.RuntimeContainer, svc.Spec.TaskTemplate.Runtime) + } +} + +func TestServiceConvertFromGRPCGenericRuntimePlugin(t *testing.T) { + kind := string(swarmtypes.RuntimePlugin) + url := swarmtypes.RuntimeURLPlugin + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: kind, + Payload: &google_protobuf3.Any{ + TypeUrl: string(url), + }, + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimePlugin { + t.Fatalf("expected type %s; received %T", swarmtypes.RuntimePlugin, svc.Spec.TaskTemplate.Runtime) + } +} + +func TestServiceConvertToGRPCGenericRuntimePlugin(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: swarmtypes.RuntimePlugin, + PluginSpec: &runtime.PluginSpec{}, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + svc, err := ServiceSpecToGRPC(s) + if err != nil { + t.Fatal(err) + } + + v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Generic) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Generic") + } + + if v.Generic.Payload.TypeUrl != string(swarmtypes.RuntimeURLPlugin) { + t.Fatalf("expected url %s; received %s", swarmtypes.RuntimeURLPlugin, v.Generic.Payload.TypeUrl) + } +} + +func TestServiceConvertToGRPCContainerRuntime(t *testing.T) { + image := "alpine:latest" + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + ContainerSpec: &swarmtypes.ContainerSpec{ + Image: image, + }, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + svc, err := ServiceSpecToGRPC(s) + if err != nil { + t.Fatal(err) + } + + v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Container) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Container") + } + + if v.Container.Image != image { + t.Fatalf("expected image %s; received %s", image, v.Container.Image) + } +} + +func TestServiceConvertToGRPCGenericRuntimeCustom(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: "customruntime", + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + if _, err := ServiceSpecToGRPC(s); err != ErrUnsupportedRuntime { + t.Fatal(err) + } +} diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go index 98e0ce25e..2ea89b968 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go @@ -7,6 +7,7 @@ import ( types "github.com/docker/docker/api/types/swarm" swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" gogotypes "github.com/gogo/protobuf/types" ) @@ -29,7 +30,17 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { EncryptionConfig: types.EncryptionConfig{ AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, }, + CAConfig: types.CAConfig{ + // do not include the signing CA cert or key (it should already be redacted via the swarm APIs) - + // the key because it's secret, and the cert because otherwise doing a get + update on the spec + // can cause issues because the key would be missing and the cert wouldn't + ForceRotate: c.Spec.CAConfig.ForceRotate, + }, }, + TLSInfo: types.TLSInfo{ + TrustRoot: string(c.RootCA.CACert), + }, + RootRotationInProgress: c.RootCA.RootRotation != nil, }, JoinTokens: types.JoinTokens{ Worker: c.RootCA.JoinTokens.Worker, @@ -37,6 +48,12 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { }, } + issuerInfo, err := ca.IssuerFromAPIRootCA(&c.RootCA) + if err == nil && issuerInfo != nil { + swarm.TLSInfo.CertIssuerSubject = issuerInfo.Subject + swarm.TLSInfo.CertIssuerPublicKey = issuerInfo.PublicKey + } + heartbeatPeriod, _ := gogotypes.DurationFromProto(c.Spec.Dispatcher.HeartbeatPeriod) swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod @@ -47,6 +64,7 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), URL: ca.URL, Options: ca.Options, + CACert: string(ca.CACert), }) } @@ -102,6 +120,14 @@ func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.Clu if s.CAConfig.NodeCertExpiry != 0 { spec.CAConfig.NodeCertExpiry = gogotypes.DurationProto(s.CAConfig.NodeCertExpiry) } + if s.CAConfig.SigningCACert != "" { + spec.CAConfig.SigningCACert = []byte(s.CAConfig.SigningCACert) + } + if s.CAConfig.SigningCAKey != "" { + // do propagate the signing CA key here because we want to provide it TO the swarm APIs + spec.CAConfig.SigningCAKey = []byte(s.CAConfig.SigningCAKey) + } + spec.CAConfig.ForceRotate = s.CAConfig.ForceRotate for _, ca := range s.CAConfig.ExternalCAs { protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] @@ -112,6 +138,7 @@ func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.Clu Protocol: swarmapi.ExternalCA_CAProtocol(protocol), URL: ca.URL, Options: ca.Options, + CACert: []byte(ca.CACert), }) } diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/task.go b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/task.go index b90d24e35..e301415c6 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/convert/task.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/convert/task.go @@ -9,19 +9,22 @@ import ( ) // TaskFromGRPC converts a grpc Task to a Task. -func TaskFromGRPC(t swarmapi.Task) types.Task { +func TaskFromGRPC(t swarmapi.Task) (types.Task, error) { if t.Spec.GetAttachment() != nil { - return types.Task{} + return types.Task{}, nil } containerStatus := t.Status.GetContainer() - + taskSpec, err := taskSpecFromGRPC(t.Spec) + if err != nil { + return types.Task{}, err + } task := types.Task{ ID: t.ID, Annotations: annotationsFromGRPC(t.Annotations), ServiceID: t.ServiceID, Slot: int(t.Slot), NodeID: t.NodeID, - Spec: taskSpecFromGRPC(t.Spec), + Spec: taskSpec, Status: types.TaskStatus{ State: types.TaskState(strings.ToLower(t.Status.State.String())), Message: t.Status.Message, @@ -49,7 +52,7 @@ func TaskFromGRPC(t swarmapi.Task) types.Task { } if t.Status.PortStatus == nil { - return task + return task, nil } for _, p := range t.Status.PortStatus.Ports { @@ -62,5 +65,5 @@ func TaskFromGRPC(t swarmapi.Task) types.Task { }) } - return task + return task, nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go index 13b643c4b..fbe900656 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go @@ -13,6 +13,7 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" swarmtypes "github.com/docker/docker/api/types/swarm" + containerpkg "github.com/docker/docker/container" clustertypes "github.com/docker/docker/daemon/cluster/provider" "github.com/docker/docker/plugin" "github.com/docker/libnetwork" @@ -29,7 +30,7 @@ type Backend interface { FindNetwork(idName string) (libnetwork.Network, error) SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error) ReleaseIngress() (<-chan struct{}, error) - PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error ContainerStop(name string, seconds *int) error @@ -39,11 +40,12 @@ type Backend interface { DeactivateContainerServiceBinding(containerName string) error UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) - ContainerWaitWithContext(ctx context.Context, name string) error + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) ContainerRm(name string, config *types.ContainerRmConfig) error ContainerKill(name string, sig uint64) error - SetContainerSecretStore(name string, store exec.SecretGetter) error + SetContainerDependencyStore(name string, store exec.DependencyGetter) error SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error + SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error SystemInfo() (*types.Info, error) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) Containers(config *types.ContainerListOptions) ([]*types.Container, error) @@ -55,7 +57,7 @@ type Backend interface { UnsubscribeFromEvents(listener chan interface{}) UpdateAttachment(string, string, string, *network.NetworkingConfig) error WaitForDetachment(context.Context, string, string, string, string) error - GetRepository(context.Context, reference.NamedTagged, *types.AuthConfig) (distribution.Repository, bool, error) + GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) LookupImage(name string) (*types.ImageInspect, error) PluginManager() *plugin.Manager PluginGetter() *plugin.Store diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go index 1c669e68e..92e4947e6 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "os" + "runtime" "strings" "syscall" "time" @@ -17,8 +18,10 @@ import ( "github.com/docker/docker/api/types/backend" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" + containerpkg "github.com/docker/docker/container" "github.com/docker/docker/daemon/cluster/convert" executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/pkg/system" "github.com/docker/libnetwork" "github.com/docker/swarmkit/agent/exec" "github.com/docker/swarmkit/api" @@ -33,21 +36,21 @@ import ( // are mostly naked calls to the client API, seeded with information from // containerConfig. type containerAdapter struct { - backend executorpkg.Backend - container *containerConfig - secrets exec.SecretGetter + backend executorpkg.Backend + container *containerConfig + dependencies exec.DependencyGetter } -func newContainerAdapter(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*containerAdapter, error) { +func newContainerAdapter(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*containerAdapter, error) { ctnr, err := newContainerConfig(task) if err != nil { return nil, err } return &containerAdapter{ - container: ctnr, - backend: b, - secrets: secrets, + container: ctnr, + backend: b, + dependencies: dependencies, }, nil } @@ -87,7 +90,13 @@ func (c *containerAdapter) pullImage(ctx context.Context) error { pr, pw := io.Pipe() metaHeaders := map[string][]string{} go func() { - err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) + // TODO @jhowardmsft LCOW Support: This will need revisiting as + // the stack is built up to include LCOW support for swarm. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + err := c.backend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw) pw.CloseWithError(err) }() @@ -175,7 +184,7 @@ func (c *containerAdapter) removeNetworks(ctx context.Context) error { } func (c *containerAdapter) networkAttach(ctx context.Context) error { - config := c.container.createNetworkingConfig() + config := c.container.createNetworkingConfig(c.backend) var ( networkName string @@ -194,7 +203,7 @@ func (c *containerAdapter) networkAttach(ctx context.Context) error { } func (c *containerAdapter) waitForDetach(ctx context.Context) error { - config := c.container.createNetworkingConfig() + config := c.container.createNetworkingConfig(c.backend) var ( networkName string @@ -215,20 +224,19 @@ func (c *containerAdapter) waitForDetach(ctx context.Context) error { func (c *containerAdapter) create(ctx context.Context) error { var cr containertypes.ContainerCreateCreatedBody var err error - if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ Name: c.container.name(), Config: c.container.config(), HostConfig: c.container.hostConfig(), // Use the first network in container create - NetworkingConfig: c.container.createNetworkingConfig(), + NetworkingConfig: c.container.createNetworkingConfig(c.backend), }); err != nil { return err } // Docker daemon currently doesn't support multiple networks in container create // Connect to all other networks - nc := c.container.connectNetworkingConfig() + nc := c.container.connectNetworkingConfig(c.backend) if nc != nil { for n, ep := range nc.EndpointsConfig { @@ -243,13 +251,18 @@ func (c *containerAdapter) create(ctx context.Context) error { return errors.New("unable to get container from task spec") } - // configure secrets - if err := c.backend.SetContainerSecretStore(cr.ID, c.secrets); err != nil { + if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil { return err } - refs := convert.SecretReferencesFromGRPC(container.Secrets) - if err := c.backend.SetContainerSecretReferences(cr.ID, refs); err != nil { + // configure secrets + secretRefs := convert.SecretReferencesFromGRPC(container.Secrets) + if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil { + return err + } + + configRefs := convert.ConfigReferencesFromGRPC(container.Configs) + if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil { return err } @@ -332,8 +345,8 @@ func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { return eventsq } -func (c *containerAdapter) wait(ctx context.Context) error { - return c.backend.ContainerWaitWithContext(ctx, c.container.nameOrID()) +func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) { + return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning) } func (c *containerAdapter) shutdown(ctx context.Context) error { @@ -400,11 +413,11 @@ func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscription apiOptions := &types.ContainerLogsOptions{ Follow: options.Follow, - // TODO(stevvooe): Parse timestamp out of message. This - // absolutely needs to be done before going to production with - // this, at it is completely redundant. + // Always say yes to Timestamps and Details. we make the decision + // of whether to return these to the user or not way higher up the + // stack. Timestamps: true, - Details: false, // no clue what to do with this, let's just deprecate it. + Details: true, } if options.Since != nil { diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go index e0ee81a8b..54f95a1fb 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go @@ -20,8 +20,8 @@ type networkAttacherController struct { closed chan struct{} } -func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*networkAttacherController, error) { - adapter, err := newContainerAdapter(b, task, secrets) +func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*networkAttacherController, error) { + adapter, err := newContainerAdapter(b, task, dependencies) if err != nil { return nil, err } diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go index dcac7281b..501da3286 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go @@ -18,8 +18,11 @@ import ( enginemount "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" clustertypes "github.com/docker/docker/daemon/cluster/provider" "github.com/docker/go-connections/nat" + netconst "github.com/docker/libnetwork/datastore" "github.com/docker/swarmkit/agent/exec" "github.com/docker/swarmkit/api" "github.com/docker/swarmkit/template" @@ -374,6 +377,14 @@ func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { } } + if len(c.task.Networks) > 0 { + labels := c.task.Networks[0].Network.Spec.Annotations.Labels + name := c.task.Networks[0].Network.Spec.Annotations.Name + if v, ok := labels["com.docker.swarm.predefined"]; ok && v == "true" { + hc.NetworkMode = enginecontainer.NetworkMode(name) + } + } + return hc } @@ -428,7 +439,7 @@ func (c *containerConfig) resources() enginecontainer.Resources { } // Docker daemon supports just 1 network during container create. -func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig { +func (c *containerConfig) createNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { var networks []*api.NetworkAttachment if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil { networks = c.task.Networks @@ -436,19 +447,18 @@ func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig { epConfig := make(map[string]*network.EndpointSettings) if len(networks) > 0 { - epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0]) + epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0], b) } return &network.NetworkingConfig{EndpointsConfig: epConfig} } // TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create -func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig { +func (c *containerConfig) connectNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { var networks []*api.NetworkAttachment if c.task.Spec.GetContainer() != nil { networks = c.task.Networks } - // First network is used during container create. Other networks are used in "docker network connect" if len(networks) < 2 { return nil @@ -456,12 +466,12 @@ func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig { epConfig := make(map[string]*network.EndpointSettings) for _, na := range networks[1:] { - epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na) + epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na, b) } return &network.NetworkingConfig{EndpointsConfig: epConfig} } -func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings { +func getEndpointConfig(na *api.NetworkAttachment, b executorpkg.Backend) *network.EndpointSettings { var ipv4, ipv6 string for _, addr := range na.Addresses { ip, _, err := net.ParseCIDR(addr) @@ -479,13 +489,20 @@ func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings { } } - return &network.EndpointSettings{ + n := &network.EndpointSettings{ NetworkID: na.Network.ID, IPAMConfig: &network.EndpointIPAMConfig{ IPv4Address: ipv4, IPv6Address: ipv6, }, + DriverOpts: na.DriverAttachmentOpts, } + if v, ok := na.Network.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok && v == "true" { + if ln, err := b.FindNetwork(na.Network.Spec.Annotations.Name); err == nil { + n.NetworkID = ln.ID() + } + } + return n } func (c *containerConfig) virtualIP(networkID string) string { @@ -570,27 +587,38 @@ func (c *containerConfig) networkCreateRequest(name string) (clustertypes.Networ options := types.NetworkCreate{ // ID: na.Network.ID, - Driver: na.Network.DriverState.Name, - IPAM: &network.IPAM{ - Driver: na.Network.IPAM.Driver.Name, - Options: na.Network.IPAM.Driver.Options, - }, - Options: na.Network.DriverState.Options, Labels: na.Network.Spec.Annotations.Labels, Internal: na.Network.Spec.Internal, Attachable: na.Network.Spec.Attachable, - Ingress: na.Network.Spec.Ingress, + Ingress: convert.IsIngressNetwork(na.Network), EnableIPv6: na.Network.Spec.Ipv6Enabled, CheckDuplicate: true, + Scope: netconst.SwarmScope, } - for _, ic := range na.Network.IPAM.Configs { - c := network.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, + if na.Network.Spec.GetNetwork() != "" { + options.ConfigFrom = &network.ConfigReference{ + Network: na.Network.Spec.GetNetwork(), + } + } + + if na.Network.DriverState != nil { + options.Driver = na.Network.DriverState.Name + options.Options = na.Network.DriverState.Options + } + if na.Network.IPAM != nil { + options.IPAM = &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + Options: na.Network.IPAM.Driver.Options, + } + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) } - options.IPAM.Config = append(options.IPAM.Config, c) } return clustertypes.NetworkCreateRequest{ diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go index 5c6f80350..163643e39 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go @@ -21,6 +21,8 @@ import ( "golang.org/x/time/rate" ) +const defaultGossipConvergeDelay = 2 * time.Second + // controller implements agent.Controller against docker's API. // // Most operations against docker's API are done through the container name, @@ -39,8 +41,8 @@ type controller struct { var _ exec.Controller = &controller{} // NewController returns a docker exec runner for the provided task. -func newController(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*controller, error) { - adapter, err := newContainerAdapter(b, task, secrets) +func newController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*controller, error) { + adapter, err := newContainerAdapter(b, task, dependencies) if err != nil { return nil, err } @@ -277,30 +279,50 @@ func (r *controller) Wait(pctx context.Context) error { } }() - err := r.adapter.wait(ctx) - if ctx.Err() != nil { - return ctx.Err() + waitC, err := r.adapter.wait(ctx) + if err != nil { + return err } - if err != nil { - ee := &exitError{} - if ec, ok := err.(exec.ExitCoder); ok { - ee.code = ec.ExitCode() + if status := <-waitC; status.ExitCode() != 0 { + exitErr := &exitError{ + code: status.ExitCode(), } + + // Set the cause if it is knowable. select { case e := <-healthErr: - ee.cause = e + exitErr.cause = e default: - if err.Error() != "" { - ee.cause = err + if status.Err() != nil { + exitErr.cause = status.Err() } } - return ee + + return exitErr } return nil } +func (r *controller) hasServiceBinding() bool { + if r.task == nil { + return false + } + + // service is attached to a network besides the default bridge + for _, na := range r.task.Networks { + if na.Network == nil || + na.Network.DriverState == nil || + na.Network.DriverState.Name == "bridge" && na.Network.Spec.Annotations.Name == "bridge" { + continue + } + return true + } + + return false +} + // Shutdown the container cleanly. func (r *controller) Shutdown(ctx context.Context) error { if err := r.checkClosed(); err != nil { @@ -311,12 +333,18 @@ func (r *controller) Shutdown(ctx context.Context) error { r.cancelPull() } - // remove container from service binding - if err := r.adapter.deactivateServiceBinding(); err != nil { - log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name()) - // Don't return an error here, because failure to deactivate - // the service binding is expected if the container was never - // started. + if r.hasServiceBinding() { + // remove container from service binding + if err := r.adapter.deactivateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name()) + // Don't return an error here, because failure to deactivate + // the service binding is expected if the container was never + // started. + } + + // add a delay for gossip converge + // TODO(dongluochen): this delay should be configurable to fit different cluster size and network delay. + time.Sleep(defaultGossipConvergeDelay) } if err := r.adapter.shutdown(ctx); err != nil { @@ -437,9 +465,20 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti return err } - if err := r.waitReady(ctx); err != nil { - return errors.Wrap(err, "container not ready for logs") + // if we're following, wait for this container to be ready. there is a + // problem here: if the container will never be ready (for example, it has + // been totally deleted) then this will wait forever. however, this doesn't + // actually cause any UI issues, and shouldn't be a problem. the stuck wait + // will go away when the follow (context) is canceled. + if options.Follow { + if err := r.waitReady(ctx); err != nil { + return errors.Wrap(err, "container not ready for logs") + } } + // if we're not following, we're not gonna wait for the container to be + // ready. just call logs. if the container isn't ready, the call will fail + // and return an error. no big deal, we don't care, we only want the logs + // we can get RIGHT NOW with no follow logsContext, cancel := context.WithCancel(ctx) msgs, err := r.adapter.logs(logsContext, options) @@ -486,10 +525,18 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti stream = api.LogStreamStderr } + // parse the details out of the Attrs map + attrs := []api.LogAttr{} + for k, v := range msg.Attrs { + attr := api.LogAttr{Key: k, Value: v} + attrs = append(attrs, attr) + } + if err := publisher.Publish(ctx, api.LogMessage{ Context: msgctx, Timestamp: tsp, Stream: stream, + Attrs: attrs, Data: msg.Line, }); err != nil { return errors.Wrap(err, "failed to publish log message") diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go index 6be0f3156..a71a9412e 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go @@ -1,31 +1,38 @@ package container import ( + "fmt" "sort" "strings" + "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/controllers/plugin" executorpkg "github.com/docker/docker/daemon/cluster/executor" clustertypes "github.com/docker/docker/daemon/cluster/provider" networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent" "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/agent/secrets" "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/naming" "golang.org/x/net/context" ) type executor struct { - backend executorpkg.Backend - secrets exec.SecretsManager + backend executorpkg.Backend + pluginBackend plugin.Backend + dependencies exec.DependencyManager } // NewExecutor returns an executor from the docker client. -func NewExecutor(b executorpkg.Backend) exec.Executor { +func NewExecutor(b executorpkg.Backend, p plugin.Backend) exec.Executor { return &executor{ - backend: b, - secrets: secrets.NewManager(), + backend: b, + pluginBackend: p, + dependencies: agent.NewDependencyManager(), } } @@ -52,6 +59,7 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { // the plugin list by default. addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) addPlugins("Authorization", info.Plugins.Authorization) + addPlugins("Log", info.Plugins.Log) // add v2 plugins v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs()) @@ -62,11 +70,15 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { continue } plgnTyp := typ.Capability - if typ.Capability == "volumedriver" { + switch typ.Capability { + case "volumedriver": plgnTyp = "Volume" - } else if typ.Capability == "networkdriver" { + case "networkdriver": plgnTyp = "Network" + case "logdriver": + plgnTyp = "Log" } + plugins[api.PluginDescription{ Type: plgnTyp, Name: plgn.Name, @@ -152,13 +164,41 @@ func (e *executor) Configure(ctx context.Context, node *api.Node) error { // Controller returns a docker container runner. func (e *executor) Controller(t *api.Task) (exec.Controller, error) { + dependencyGetter := agent.Restrict(e.dependencies, t) + if t.Spec.GetAttachment() != nil { - return newNetworkAttacherController(e.backend, t, e.secrets) + return newNetworkAttacherController(e.backend, t, dependencyGetter) } - ctlr, err := newController(e.backend, t, secrets.Restrict(e.secrets, t)) - if err != nil { - return nil, err + var ctlr exec.Controller + switch r := t.Spec.GetRuntime().(type) { + case *api.TaskSpec_Generic: + logrus.WithFields(logrus.Fields{ + "kind": r.Generic.Kind, + "type_url": r.Generic.Payload.TypeUrl, + }).Debug("custom runtime requested") + runtimeKind, err := naming.Runtime(t.Spec) + if err != nil { + return ctlr, err + } + switch runtimeKind { + case string(swarmtypes.RuntimePlugin): + c, err := plugin.NewController(e.pluginBackend, t) + if err != nil { + return ctlr, err + } + ctlr = c + default: + return ctlr, fmt.Errorf("unsupported runtime type: %q", r.Generic.Kind) + } + case *api.TaskSpec_Container: + c, err := newController(e.backend, t, dependencyGetter) + if err != nil { + return ctlr, err + } + ctlr = c + default: + return ctlr, fmt.Errorf("unsupported runtime: %q", r) } return ctlr, nil @@ -182,7 +222,11 @@ func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { } func (e *executor) Secrets() exec.SecretsManager { - return e.secrets + return e.dependencies.Secrets() +} + +func (e *executor) Configs() exec.ConfigsManager { + return e.dependencies.Configs() } type sortedPlugins []api.PluginDescription diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go index 4abf0999b..b6f188557 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go @@ -38,14 +38,12 @@ func TestHealthStates(t *testing.T) { } c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "id", - Name: "name", - Config: &containertypes.Config{ - Image: "image_name", - Labels: map[string]string{ - "com.docker.swarm.task.id": "id", - }, + ID: "id", + Name: "name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", }, }, } diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/filters.go b/fn/vendor/github.com/docker/docker/daemon/cluster/filters.go index d356a449a..efda7dc8d 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/filters.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/filters.go @@ -57,6 +57,7 @@ func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) e // internal use in checking create/update progress. Therefore, // we prefix it with a '_'. "_up-to-date": true, + "runtime": true, } if err := filter.Validate(accepted); err != nil { return nil, err @@ -73,6 +74,7 @@ func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) e ServiceIDs: filter.Get("service"), NodeIDs: filter.Get("node"), UpToDate: len(filter.Get("_up-to-date")) != 0, + Runtimes: filter.Get("runtime"), } for _, s := range filter.Get("desired-state") { @@ -103,3 +105,19 @@ func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Fi Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), }, nil } + +func newListConfigsFilters(filter filters.Args) (*swarmapi.ListConfigsRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListConfigsRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/filters_test.go b/fn/vendor/github.com/docker/docker/daemon/cluster/filters_test.go index 8f5fa8316..fd0c8c369 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/filters_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/filters_test.go @@ -51,7 +51,52 @@ func TestNewListSecretsFilters(t *testing.T) { for _, filter := range invalidFilters { if _, err := newListSecretsFilters(filter); err == nil { - t.Fatalf("Should get an error for filter %s, while got nil", filter) + t.Fatalf("Should get an error for filter %v, while got nil", filter) + } + } +} + +func TestNewListConfigsFilters(t *testing.T) { + validNameFilter := filters.NewArgs() + validNameFilter.Add("name", "test_name") + + validIDFilter := filters.NewArgs() + validIDFilter.Add("id", "7c9009d6720f6de3b492f5") + + validLabelFilter := filters.NewArgs() + validLabelFilter.Add("label", "type=test") + validLabelFilter.Add("label", "storage=ssd") + validLabelFilter.Add("label", "memory") + + validAllFilter := filters.NewArgs() + validAllFilter.Add("name", "nodeName") + validAllFilter.Add("id", "7c9009d6720f6de3b492f5") + validAllFilter.Add("label", "type=test") + validAllFilter.Add("label", "memory") + + validFilters := []filters.Args{ + validNameFilter, + validIDFilter, + validLabelFilter, + validAllFilter, + } + + invalidTypeFilter := filters.NewArgs() + invalidTypeFilter.Add("nonexist", "aaaa") + + invalidFilters := []filters.Args{ + invalidTypeFilter, + } + + for _, filter := range validFilters { + if _, err := newListConfigsFilters(filter); err != nil { + t.Fatalf("Should get no error, got %v", err) + } + } + + for _, filter := range invalidFilters { + if _, err := newListConfigsFilters(filter); err == nil { + t.Fatalf("Should get an error for filter %v, while got nil", filter) } } } diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/helpers.go b/fn/vendor/github.com/docker/docker/daemon/cluster/helpers.go index 98c7cc547..a74118c42 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/helpers.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/helpers.go @@ -174,6 +174,42 @@ func getSecret(ctx context.Context, c swarmapi.ControlClient, input string) (*sw return rl.Secrets[0], nil } +func getConfig(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Config, error) { + // attempt to lookup config by full ID + if rg, err := c.GetConfig(ctx, &swarmapi.GetConfigRequest{ConfigID: input}); err == nil { + return rg.Config, nil + } + + // If any error (including NotFound), ListConfigs to match via full name. + rl, err := c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ + Filters: &swarmapi.ListConfigsRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Configs) == 0 { + // If any error or 0 result, ListConfigs to match via ID prefix. + rl, err = c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ + Filters: &swarmapi.ListConfigsRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Configs) == 0 { + err := fmt.Errorf("config %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Configs); l > 1 { + return nil, fmt.Errorf("config %s is ambiguous (%d matches found)", input, l) + } + + return rl.Configs[0], nil +} + func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { // GetNetwork to match via full ID. if rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}); err == nil { diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go b/fn/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go index 83e74ad46..993ccb62a 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go @@ -10,8 +10,10 @@ var ( errNoSuchInterface = errors.New("no such interface") errNoIP = errors.New("could not find the system's IP address") errMustSpecifyListenAddr = errors.New("must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified") + errBadNetworkIdentifier = errors.New("must specify a valid IP address or interface name") errBadListenAddr = errors.New("listen address must be an IP address or network interface (with optional port number)") errBadAdvertiseAddr = errors.New("advertise address must be a non-zero IP address or network interface (with optional port number)") + errBadDataPathAddr = errors.New("data path address must be a non-zero IP address or network interface (without a port number)") errBadDefaultAdvertiseAddr = errors.New("default advertise address must be a non-zero IP address or network interface (without a port number)") ) @@ -20,23 +22,17 @@ func resolveListenAddr(specifiedAddr string) (string, string, error) { if err != nil { return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr) } - // Does the host component match any of the interface names on the // system? If so, use the address from that interface. - interfaceAddr, err := resolveInterfaceAddr(specifiedHost) - if err == nil { - return interfaceAddr.String(), specifiedPort, nil - } - if err != errNoSuchInterface { + specifiedIP, err := resolveInputIPAddr(specifiedHost, true) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadListenAddr + } return "", "", err } - // If it's not an interface, it must be an IP (for now) - if net.ParseIP(specifiedHost) == nil { - return "", "", errBadListenAddr - } - - return specifiedHost, specifiedPort, nil + return specifiedIP.String(), specifiedPort, nil } func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) { @@ -57,43 +53,32 @@ func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (st advertiseHost = advertiseAddr advertisePort = listenAddrPort } - // Does the host component match any of the interface names on the // system? If so, use the address from that interface. - interfaceAddr, err := resolveInterfaceAddr(advertiseHost) - if err == nil { - return interfaceAddr.String(), advertisePort, nil - } - if err != errNoSuchInterface { + advertiseIP, err := resolveInputIPAddr(advertiseHost, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadAdvertiseAddr + } return "", "", err } - // If it's not an interface, it must be an IP (for now) - if ip := net.ParseIP(advertiseHost); ip == nil || ip.IsUnspecified() { - return "", "", errBadAdvertiseAddr - } - - return advertiseHost, advertisePort, nil + return advertiseIP.String(), advertisePort, nil } if c.config.DefaultAdvertiseAddr != "" { // Does the default advertise address component match any of the // interface names on the system? If so, use the address from // that interface. - interfaceAddr, err := resolveInterfaceAddr(c.config.DefaultAdvertiseAddr) - if err == nil { - return interfaceAddr.String(), listenAddrPort, nil - } - if err != errNoSuchInterface { + defaultAdvertiseIP, err := resolveInputIPAddr(c.config.DefaultAdvertiseAddr, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadDefaultAdvertiseAddr + } return "", "", err } - // If it's not an interface, it must be an IP (for now) - if ip := net.ParseIP(c.config.DefaultAdvertiseAddr); ip == nil || ip.IsUnspecified() { - return "", "", errBadDefaultAdvertiseAddr - } - - return c.config.DefaultAdvertiseAddr, listenAddrPort, nil + return defaultAdvertiseIP.String(), listenAddrPort, nil } systemAddr, err := c.resolveSystemAddr() @@ -103,6 +88,22 @@ func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (st return systemAddr.String(), listenAddrPort, nil } +func resolveDataPathAddr(dataPathAddr string) (string, error) { + if dataPathAddr == "" { + // dataPathAddr is not defined + return "", nil + } + // If a data path flag is specified try to resolve the IP address. + dataPathIP, err := resolveInputIPAddr(dataPathAddr, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadDataPathAddr + } + return "", err + } + return dataPathIP.String(), nil +} + func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { // Use a specific interface's IP address. intf, err := net.InterfaceByName(specifiedInterface) @@ -149,6 +150,30 @@ func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { return interfaceAddr6, nil } +// resolveInputIPAddr tries to resolve the IP address from the string passed as input +// - tries to match the string as an interface name, if so returns the IP address associated with it +// - on failure of previous step tries to parse the string as an IP address itself +// if succeeds returns the IP address +func resolveInputIPAddr(input string, isUnspecifiedValid bool) (net.IP, error) { + // Try to see if it is an interface name + interfaceAddr, err := resolveInterfaceAddr(input) + if err == nil { + return interfaceAddr, nil + } + // String matched interface but there is a potential ambiguity to be resolved + if err != errNoSuchInterface { + return nil, err + } + + // String is not an interface check if it is a valid IP + if ip := net.ParseIP(input); ip != nil && (isUnspecifiedValid || !ip.IsUnspecified()) { + return ip, nil + } + + // Not valid IP found + return nil, errBadNetworkIdentifier +} + func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) { // Use the system's only IP address, or fail if there are // multiple addresses to choose from. Skip interfaces which diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/networks.go b/fn/vendor/github.com/docker/docker/daemon/cluster/networks.go index 4f91c4c13..1906c37bd 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/networks.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/networks.go @@ -17,7 +17,28 @@ import ( // GetNetworks returns all current cluster managed networks. func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { - return c.getNetworks(nil) + list, err := c.getNetworks(nil) + if err != nil { + return nil, err + } + removePredefinedNetworks(&list) + return list, nil +} + +func removePredefinedNetworks(networks *[]apitypes.NetworkResource) { + if networks == nil { + return + } + var idxs []int + for i, n := range *networks { + if v, ok := n.Labels["com.docker.swarm.predefined"]; ok && v == "true" { + idxs = append(idxs, i) + } + } + for i, idx := range idxs { + idx -= i + *networks = append((*networks)[:idx], (*networks)[idx+1:]...) + } } func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) { @@ -37,7 +58,7 @@ func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([] return nil, err } - var networks []apitypes.NetworkResource + networks := make([]apitypes.NetworkResource, 0, len(r.Networks)) for _, network := range r.Networks { networks = append(networks, convert.BasicNetworkFromGRPC(*network)) @@ -269,16 +290,27 @@ func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.Control if len(networks) == 0 { networks = s.Networks } - for i, n := range networks { apiNetwork, err := getNetwork(ctx, client, n.Target) if err != nil { - if ln, _ := c.config.Backend.FindNetwork(n.Target); ln != nil && !ln.Info().Dynamic() { + ln, _ := c.config.Backend.FindNetwork(n.Target) + if ln != nil && runconfig.IsPreDefinedNetwork(ln.Name()) { + // Need to retrieve the corresponding predefined swarm network + // and use its id for the request. + apiNetwork, err = getNetwork(ctx, client, ln.Name()) + if err != nil { + err = fmt.Errorf("could not find the corresponding predefined swarm network: %v", err) + return apierrors.NewRequestNotFoundError(err) + } + goto setid + } + if ln != nil && !ln.Info().Dynamic() { err = fmt.Errorf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name()) return apierrors.NewRequestForbiddenError(err) } return err } + setid: networks[i].Target = apiNetwork.ID } return nil diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/noderunner.go b/fn/vendor/github.com/docker/docker/daemon/cluster/noderunner.go index 5057e7f60..a1eda066b 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/noderunner.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/noderunner.go @@ -11,6 +11,7 @@ import ( "github.com/Sirupsen/logrus" types "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/daemon/cluster/executor/container" + lncluster "github.com/docker/libnetwork/cluster" swarmapi "github.com/docker/swarmkit/api" swarmnode "github.com/docker/swarmkit/node" "github.com/pkg/errors" @@ -46,7 +47,13 @@ type nodeStartConfig struct { ListenAddr string // AdvertiseAddr is the address other nodes should connect to, // including a port. - AdvertiseAddr string + AdvertiseAddr string + // DataPathAddr is the address that has to be used for the data path + DataPathAddr string + // JoinInProgress is set to true if a join operation has started, but + // not completed yet. + JoinInProgress bool + joinAddr string forceNewCluster bool joinToken string @@ -94,6 +101,13 @@ func (n *nodeRunner) start(conf nodeStartConfig) error { control = filepath.Join(n.cluster.runtimeRoot, controlSocket) } + joinAddr := conf.joinAddr + if joinAddr == "" && conf.JoinInProgress { + // We must have been restarted while trying to join a cluster. + // Continue trying to join instead of forming our own cluster. + joinAddr = conf.RemoteAddr + } + // Hostname is not set here. Instead, it is obtained from // the node description that is reported periodically swarmnodeConfig := swarmnode.Config{ @@ -101,10 +115,10 @@ func (n *nodeRunner) start(conf nodeStartConfig) error { ListenControlAPI: control, ListenRemoteAPI: conf.ListenAddr, AdvertiseRemoteAPI: conf.AdvertiseAddr, - JoinAddr: conf.joinAddr, + JoinAddr: joinAddr, StateDir: n.cluster.root, JoinToken: conf.joinToken, - Executor: container.NewExecutor(n.cluster.config.Backend), + Executor: container.NewExecutor(n.cluster.config.Backend, n.cluster.config.PluginBackend), HeartbeatTick: 1, ElectionTick: 3, UnlockKey: conf.lockKey, @@ -129,6 +143,9 @@ func (n *nodeRunner) start(conf nodeStartConfig) error { n.done = make(chan struct{}) n.ready = make(chan struct{}) n.swarmNode = node + if conf.joinAddr != "" { + conf.JoinInProgress = true + } n.config = conf savePersistentState(n.cluster.root, conf) @@ -155,11 +172,55 @@ func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmn } else { n.controlClient = swarmapi.NewControlClient(conn) n.logsClient = swarmapi.NewLogsClient(conn) + // push store changes to daemon + go n.watchClusterEvents(ctx, conn) } } n.grpcConn = conn n.mu.Unlock() - n.cluster.configEvent <- struct{}{} + n.cluster.SendClusterEvent(lncluster.EventSocketChange) + } +} + +func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientConn) { + client := swarmapi.NewWatchClient(conn) + watch, err := client.Watch(ctx, &swarmapi.WatchRequest{ + Entries: []*swarmapi.WatchRequest_WatchEntry{ + { + Kind: "node", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "service", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "network", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "secret", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + }, + IncludeOldObject: true, + }) + if err != nil { + logrus.WithError(err).Error("failed to watch cluster store") + return + } + for { + msg, err := watch.Recv() + if err != nil { + // store watch is broken + logrus.WithError(err).Error("failed to receive changes from store watch API") + return + } + select { + case <-ctx.Done(): + return + case n.cluster.watchStream <- msg: + } } } @@ -168,11 +229,15 @@ func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, case <-node.Ready(): n.mu.Lock() n.err = nil + if n.config.JoinInProgress { + n.config.JoinInProgress = false + savePersistentState(n.cluster.root, n.config) + } n.mu.Unlock() close(ready) case <-ctx.Done(): } - n.cluster.configEvent <- struct{}{} + n.cluster.SendClusterEvent(lncluster.EventNodeReady) } func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) { @@ -210,11 +275,11 @@ func (n *nodeRunner) Stop() error { n.stopping = true ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) defer cancel() + n.mu.Unlock() if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { - n.mu.Unlock() return err } - n.mu.Unlock() + n.cluster.SendClusterEvent(lncluster.EventNodeLeave) <-n.done return nil } @@ -258,7 +323,6 @@ func (n *nodeRunner) enableReconnectWatcher() { delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) n.cancelReconnect = cancel - config := n.config go func() { <-delayCtx.Done() if delayCtx.Err() != context.DeadlineExceeded { @@ -269,9 +333,8 @@ func (n *nodeRunner) enableReconnectWatcher() { if n.stopping { return } - config.RemoteAddr = n.cluster.getRemoteAddress() - config.joinAddr = config.RemoteAddr - if err := n.start(config); err != nil { + + if err := n.start(n.config); err != nil { n.err = err } }() diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/nodes.go b/fn/vendor/github.com/docker/docker/daemon/cluster/nodes.go index ebd47e9b6..839c8f78e 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/nodes.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/nodes.go @@ -34,7 +34,7 @@ func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, erro return nil, err } - nodes := []types.Node{} + nodes := make([]types.Node, 0, len(r.Nodes)) for _, node := range r.Nodes { nodes = append(nodes, convert.NodeFromGRPC(*node)) diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/secrets.go b/fn/vendor/github.com/docker/docker/daemon/cluster/secrets.go index af034a6e8..3947286cb 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/secrets.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/secrets.go @@ -48,7 +48,7 @@ func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret return nil, err } - secrets := []types.Secret{} + secrets := make([]types.Secret, 0, len(r.Secrets)) for _, secret := range r.Secrets { secrets = append(secrets, convert.SecretFromGRPC(secret)) diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/services.go b/fn/vendor/github.com/docker/docker/daemon/cluster/services.go index 8d5d4a5ed..42397fa00 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/services.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/services.go @@ -40,18 +40,26 @@ func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Serv // be good to have accepted file check in the same file as // the filter processing (in the for loop below). accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - "mode": true, + "name": true, + "id": true, + "label": true, + "mode": true, + "runtime": true, } if err := options.Filters.Validate(accepted); err != nil { return nil, err } + + if len(options.Filters.Get("runtime")) == 0 { + // Default to using the container runtime filter + options.Filters.Add("runtime", string(types.RuntimeContainer)) + } + filters := &swarmapi.ListServicesRequest_Filters{ NamePrefixes: options.Filters.Get("name"), IDPrefixes: options.Filters.Get("id"), Labels: runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")), + Runtimes: options.Filters.Get("runtime"), } ctx, cancel := c.getRequestContext() @@ -64,7 +72,7 @@ func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Serv return nil, err } - services := []types.Service{} + services := make([]types.Service, 0, len(r.Services)) for _, service := range r.Services { if options.Filters.Include("mode") { @@ -80,7 +88,11 @@ func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Serv continue } } - services = append(services, convert.ServiceFromGRPC(*service)) + svcs, err := convert.ServiceFromGRPC(*service) + if err != nil { + return nil, err + } + services = append(services, svcs) } return services, nil @@ -99,11 +111,15 @@ func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, }); err != nil { return types.Service{}, err } - return convert.ServiceFromGRPC(*service), nil + svc, err := convert.ServiceFromGRPC(*service) + if err != nil { + return types.Service{}, err + } + return svc, nil } // CreateService creates a new service in a managed swarm cluster. -func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apitypes.ServiceCreateResponse, error) { +func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) { var resp *apitypes.ServiceCreateResponse err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { err := c.populateNetworkID(ctx, state.controlClient, &s) @@ -116,63 +132,89 @@ func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apity return apierrors.NewBadRequestError(err) } - ctnr := serviceSpec.Task.GetContainer() - if ctnr == nil { - return errors.New("service does not use container tasks") - } - - if encodedAuth != "" { - ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} - } - - // retrieve auth config from encoded auth - authConfig := &apitypes.AuthConfig{} - if encodedAuth != "" { - if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) - } - } - resp = &apitypes.ServiceCreateResponse{} - // pin image by digest - if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { - digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) - if err != nil { - logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) - // warning in the client response should be concise - resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image)) - } else if ctnr.Image != digestImage { - logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) - ctnr.Image = digestImage - } else { - logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) + switch serviceSpec.Task.Runtime.(type) { + // handle other runtimes here + case *swarmapi.TaskSpec_Generic: + switch serviceSpec.Task.GetGeneric().Kind { + case string(types.RuntimePlugin): + if s.TaskTemplate.PluginSpec == nil { + return errors.New("plugin spec must be set") + } } - // Replace the context with a fresh one. - // If we timed out while communicating with the - // registry, then "ctx" will already be expired, which - // would cause UpdateService below to fail. Reusing - // "ctx" could make it impossible to create a service - // if the registry is slow or unresponsive. - var cancel func() - ctx, cancel = c.getRequestContext() - defer cancel() - } + r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return err + } - r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) - if err != nil { - return err - } + resp.ID = r.Service.ID + case *swarmapi.TaskSpec_Container: + ctnr := serviceSpec.Task.GetContainer() + if ctnr == nil { + return errors.New("service does not use container tasks") + } + if encodedAuth != "" { + ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } - resp.ID = r.Service.ID + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + authReader := strings.NewReader(encodedAuth) + dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader)) + if err := dec.Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + // pin image by digest for API versions < 1.30 + // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" + // should be removed in the future. Since integration tests only use the + // latest API version, so this is no longer required. + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { + digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) + // warning in the client response should be concise + resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image)) + + } else if ctnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) + ctnr.Image = digestImage + + } else { + logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) + + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to create a service + // if the registry is slow or unresponsive. + var cancel func() + ctx, cancel = c.getRequestContext() + defer cancel() + } + + r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return err + } + + resp.ID = r.Service.ID + } return nil }) + return resp, err } // UpdateService updates existing service to match new properties. -func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions) (*apitypes.ServiceUpdateResponse, error) { +func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) { var resp *apitypes.ServiceUpdateResponse err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { @@ -192,72 +234,85 @@ func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec typ return err } - newCtnr := serviceSpec.Task.GetContainer() - if newCtnr == nil { - return errors.New("service does not use container tasks") - } - - encodedAuth := flags.EncodedRegistryAuth - if encodedAuth != "" { - newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} - } else { - // this is needed because if the encodedAuth isn't being updated then we - // shouldn't lose it, and continue to use the one that was already present - var ctnr *swarmapi.ContainerSpec - switch flags.RegistryAuthFrom { - case apitypes.RegistryAuthFromSpec, "": - ctnr = currentService.Spec.Task.GetContainer() - case apitypes.RegistryAuthFromPreviousSpec: - if currentService.PreviousSpec == nil { - return errors.New("service does not have a previous spec") - } - ctnr = currentService.PreviousSpec.Task.GetContainer() - default: - return errors.New("unsupported registryAuthFrom value") - } - if ctnr == nil { - return errors.New("service does not use container tasks") - } - newCtnr.PullOptions = ctnr.PullOptions - // update encodedAuth so it can be used to pin image by digest - if ctnr.PullOptions != nil { - encodedAuth = ctnr.PullOptions.RegistryAuth - } - } - - // retrieve auth config from encoded auth - authConfig := &apitypes.AuthConfig{} - if encodedAuth != "" { - if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) - } - } - resp = &apitypes.ServiceUpdateResponse{} - // pin image by digest - if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { - digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) - if err != nil { - logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) - // warning in the client response should be concise - resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image)) - } else if newCtnr.Image != digestImage { - logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) - newCtnr.Image = digestImage - } else { - logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) + switch serviceSpec.Task.Runtime.(type) { + case *swarmapi.TaskSpec_Generic: + switch serviceSpec.Task.GetGeneric().Kind { + case string(types.RuntimePlugin): + if spec.TaskTemplate.PluginSpec == nil { + return errors.New("plugin spec must be set") + } + } + case *swarmapi.TaskSpec_Container: + newCtnr := serviceSpec.Task.GetContainer() + if newCtnr == nil { + return errors.New("service does not use container tasks") } - // Replace the context with a fresh one. - // If we timed out while communicating with the - // registry, then "ctx" will already be expired, which - // would cause UpdateService below to fail. Reusing - // "ctx" could make it impossible to update a service - // if the registry is slow or unresponsive. - var cancel func() - ctx, cancel = c.getRequestContext() - defer cancel() + encodedAuth := flags.EncodedRegistryAuth + if encodedAuth != "" { + newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } else { + // this is needed because if the encodedAuth isn't being updated then we + // shouldn't lose it, and continue to use the one that was already present + var ctnr *swarmapi.ContainerSpec + switch flags.RegistryAuthFrom { + case apitypes.RegistryAuthFromSpec, "": + ctnr = currentService.Spec.Task.GetContainer() + case apitypes.RegistryAuthFromPreviousSpec: + if currentService.PreviousSpec == nil { + return errors.New("service does not have a previous spec") + } + ctnr = currentService.PreviousSpec.Task.GetContainer() + default: + return errors.New("unsupported registryAuthFrom value") + } + if ctnr == nil { + return errors.New("service does not use container tasks") + } + newCtnr.PullOptions = ctnr.PullOptions + // update encodedAuth so it can be used to pin image by digest + if ctnr.PullOptions != nil { + encodedAuth = ctnr.PullOptions.RegistryAuth + } + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + // pin image by digest for API versions < 1.30 + // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" + // should be removed in the future. Since integration tests only use the + // latest API version, so this is no longer required. + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { + digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) + // warning in the client response should be concise + resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image)) + } else if newCtnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) + newCtnr.Image = digestImage + } else { + logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to update a service + // if the registry is slow or unresponsive. + var cancel func() + ctx, cancel = c.getRequestContext() + defer cancel() + } } var rollback swarmapi.UpdateServiceRequest_Rollback @@ -409,9 +464,17 @@ func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector if err != nil { m.Err = err } + // copy over all of the details + for _, d := range msg.Attrs { + m.Attrs[d.Key] = d.Value + } + // we have the final say over context details (in case there + // is a conflict (if the user added a detail with a context's + // key for some reason)) m.Attrs[contextPrefix+".node.id"] = msg.Context.NodeID m.Attrs[contextPrefix+".service.id"] = msg.Context.ServiceID m.Attrs[contextPrefix+".task.id"] = msg.Context.TaskID + switch msg.Stream { case swarmapi.LogStreamStdout: m.Source = "stdout" diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/swarm.go b/fn/vendor/github.com/docker/docker/daemon/cluster/swarm.go index 3e01a99cf..ef0596b6c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/swarm.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/swarm.go @@ -25,19 +25,20 @@ import ( func (c *Cluster) Init(req types.InitRequest) (string, error) { c.controlMutex.Lock() defer c.controlMutex.Unlock() - c.mu.Lock() if c.nr != nil { if req.ForceNewCluster { + // Take c.mu temporarily to wait for presently running + // API handlers to finish before shutting down the node. + c.mu.Lock() + c.mu.Unlock() + if err := c.nr.Stop(); err != nil { - c.mu.Unlock() return "", err } } else { - c.mu.Unlock() return "", errSwarmExists } } - c.mu.Unlock() if err := validateAndSanitizeInitRequest(&req); err != nil { return "", apierrors.NewBadRequestError(err) @@ -53,6 +54,11 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) { return "", err } + dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) + if err != nil { + return "", err + } + localAddr := listenHost // If the local address is undetermined, the advertise address @@ -82,16 +88,13 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) { } } - if !req.ForceNewCluster { - clearPersistentState(c.root) - } - nr, err := c.newNodeRunner(nodeStartConfig{ forceNewCluster: req.ForceNewCluster, autolock: req.AutoLockManagers, LocalAddr: localAddr, ListenAddr: net.JoinHostPort(listenHost, listenPort), AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), + DataPathAddr: dataPathAddr, availability: req.Availability, }) if err != nil { @@ -102,16 +105,14 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) { c.mu.Unlock() if err := <-nr.Ready(); err != nil { + c.mu.Lock() + c.nr = nil + c.mu.Unlock() if !req.ForceNewCluster { // if failure on first attempt don't keep state if err := clearPersistentState(c.root); err != nil { return "", err } } - if err != nil { - c.mu.Lock() - c.nr = nil - c.mu.Unlock() - } return "", err } state := nr.State() @@ -154,12 +155,16 @@ func (c *Cluster) Join(req types.JoinRequest) error { } } - clearPersistentState(c.root) + dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) + if err != nil { + return err + } nr, err := c.newNodeRunner(nodeStartConfig{ RemoteAddr: req.RemoteAddrs[0], ListenAddr: net.JoinHostPort(listenHost, listenPort), AdvertiseAddr: advertiseAddr, + DataPathAddr: dataPathAddr, joinAddr: req.RemoteAddrs[0], joinToken: req.JoinToken, availability: req.Availability, @@ -180,6 +185,9 @@ func (c *Cluster) Join(req types.JoinRequest) error { c.mu.Lock() c.nr = nil c.mu.Unlock() + if err := clearPersistentState(c.root); err != nil { + return err + } } return err } @@ -325,9 +333,10 @@ func (c *Cluster) Leave(force bool) error { state := c.currentNodeState() + c.mu.Unlock() + if errors.Cause(state.err) == errSwarmLocked && !force { // leave a locked swarm without --force is not allowed - c.mu.Unlock() return errors.New("Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message.") } @@ -339,7 +348,6 @@ func (c *Cluster) Leave(force bool) error { if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { if isLastManager(reachable, unreachable) { msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " - c.mu.Unlock() return errors.New(msg) } msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) @@ -350,18 +358,19 @@ func (c *Cluster) Leave(force bool) error { } msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." - c.mu.Unlock() return errors.New(msg) } // release readers in here if err := nr.Stop(); err != nil { logrus.Errorf("failed to shut down cluster node: %v", err) signal.DumpStacks("") - c.mu.Unlock() return err } + + c.mu.Lock() c.nr = nil c.mu.Unlock() + if nodeID := state.NodeID(); nodeID != "" { nodeContainers, err := c.listContainerForNode(nodeID) if err != nil { @@ -374,7 +383,6 @@ func (c *Cluster) Leave(force bool) error { } } - c.configEvent <- struct{}{} // todo: cleanup optional? if err := clearPersistentState(c.root); err != nil { return err diff --git a/fn/vendor/github.com/docker/docker/daemon/cluster/tasks.go b/fn/vendor/github.com/docker/docker/daemon/cluster/tasks.go index 6a6c59ffe..f0d6621dc 100644 --- a/fn/vendor/github.com/docker/docker/daemon/cluster/tasks.go +++ b/fn/vendor/github.com/docker/docker/daemon/cluster/tasks.go @@ -19,7 +19,7 @@ func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, erro return nil, c.errNoManager(state) } - byName := func(filter filters.Args) error { + filterTransform := func(filter filters.Args) error { if filter.Include("service") { serviceFilters := filter.Get("service") for _, serviceFilter := range serviceFilters { @@ -42,10 +42,15 @@ func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, erro filter.Add("node", node.ID) } } + if !filter.Include("runtime") { + // default to only showing container tasks + filter.Add("runtime", "container") + filter.Add("runtime", "") + } return nil } - filters, err := newListTasksFilters(options.Filters, byName) + filters, err := newListTasksFilters(options.Filters, filterTransform) if err != nil { return nil, err } @@ -60,12 +65,13 @@ func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, erro return nil, err } - tasks := []types.Task{} - + tasks := make([]types.Task, 0, len(r.Tasks)) for _, task := range r.Tasks { - if task.Spec.GetContainer() != nil { - tasks = append(tasks, convert.TaskFromGRPC(*task)) + t, err := convert.TaskFromGRPC(*task) + if err != nil { + return nil, err } + tasks = append(tasks, t) } return tasks, nil } @@ -83,5 +89,5 @@ func (c *Cluster) GetTask(input string) (types.Task, error) { }); err != nil { return types.Task{}, err } - return convert.TaskFromGRPC(*task), nil + return convert.TaskFromGRPC(*task) } diff --git a/fn/vendor/github.com/docker/docker/daemon/commit.go b/fn/vendor/github.com/docker/docker/daemon/commit.go index f3e840c7c..084f48858 100644 --- a/fn/vendor/github.com/docker/docker/daemon/commit.go +++ b/fn/vendor/github.com/docker/docker/daemon/commit.go @@ -12,7 +12,6 @@ import ( containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/container" - "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" @@ -160,71 +159,47 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str } }() - var history []image.History - rootFS := image.NewRootFS() - osVersion := "" - var osFeatures []string - - if container.ImageID != "" { - img, err := daemon.imageStore.Get(container.ImageID) + var parent *image.Image + if container.ImageID == "" { + parent = new(image.Image) + parent.RootFS = image.NewRootFS() + } else { + parent, err = daemon.stores[container.Platform].imageStore.Get(container.ImageID) if err != nil { return "", err } - history = img.History - rootFS = img.RootFS - osVersion = img.OSVersion - osFeatures = img.OSFeatures } - l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) + l, err := daemon.stores[container.Platform].layerStore.Register(rwTar, parent.RootFS.ChainID(), layer.Platform(container.Platform)) if err != nil { return "", err } - defer layer.ReleaseAndLog(daemon.layerStore, l) + defer layer.ReleaseAndLog(daemon.stores[container.Platform].layerStore, l) - h := image.History{ - Author: c.Author, - Created: time.Now().UTC(), - CreatedBy: strings.Join(container.Config.Cmd, " "), - Comment: c.Comment, - EmptyLayer: true, + containerConfig := c.ContainerConfig + if containerConfig == nil { + containerConfig = container.Config } - - if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { - h.EmptyLayer = false - rootFS.Append(diffID) + cc := image.ChildConfig{ + ContainerID: container.ID, + Author: c.Author, + Comment: c.Comment, + ContainerConfig: containerConfig, + Config: newConfig, + DiffID: l.DiffID(), } - - history = append(history, h) - - config, err := json.Marshal(&image.Image{ - V1Image: image.V1Image{ - DockerVersion: dockerversion.Version, - Config: newConfig, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - Container: container.ID, - ContainerConfig: *container.Config, - Author: c.Author, - Created: h.Created, - }, - RootFS: rootFS, - History: history, - OSFeatures: osFeatures, - OSVersion: osVersion, - }) - + config, err := json.Marshal(image.NewChildImage(parent, cc, container.Platform)) if err != nil { return "", err } - id, err := daemon.imageStore.Create(config) + id, err := daemon.stores[container.Platform].imageStore.Create(config) if err != nil { return "", err } if container.ImageID != "" { - if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { + if err := daemon.stores[container.Platform].imageStore.SetParent(id, container.ImageID); err != nil { return "", err } } @@ -243,7 +218,7 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str return "", err } } - if err := daemon.TagImageWithReference(id, newTag); err != nil { + if err := daemon.TagImageWithReference(id, container.Platform, newTag); err != nil { return "", err } imageRef = reference.FamiliarString(newTag) diff --git a/fn/vendor/github.com/docker/docker/daemon/config/config.go b/fn/vendor/github.com/docker/docker/daemon/config/config.go index 134fc671d..beb147224 100644 --- a/fn/vendor/github.com/docker/docker/daemon/config/config.go +++ b/fn/vendor/github.com/docker/docker/daemon/config/config.go @@ -9,7 +9,6 @@ import ( "io/ioutil" "reflect" "runtime" - "sort" "strings" "sync" @@ -103,10 +102,15 @@ type CommonConfig struct { RootDeprecated string `json:"graph,omitempty"` Root string `json:"data-root,omitempty"` SocketGroup string `json:"group,omitempty"` - TrustKeyPath string `json:"-"` CorsHeaders string `json:"api-cors-header,omitempty"` EnableCors bool `json:"api-enable-cors,omitempty"` + // TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests + // when pushing to a registry which does not support schema 2. This field is marked as + // deprecated because schema 1 manifests are deprecated in favor of schema 2 and the + // daemon ID will use a dedicated identifier not shared with exported signatures. + TrustKeyPath string `json:"deprecated-key-path,omitempty"` + // LiveRestoreEnabled determines whether we should keep containers // alive upon daemon shutdown/start LiveRestoreEnabled bool `json:"live-restore,omitempty"` @@ -503,19 +507,6 @@ func Validate(config *Config) error { return nil } -// GetAuthorizationPlugins returns daemon's sorted authorization plugins -func (conf *Config) GetAuthorizationPlugins() []string { - conf.Lock() - defer conf.Unlock() - - authPlugins := make([]string, 0, len(conf.AuthorizationPlugins)) - for _, p := range conf.AuthorizationPlugins { - authPlugins = append(authPlugins, p) - } - sort.Strings(authPlugins) - return authPlugins -} - // ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not. func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { diff --git a/fn/vendor/github.com/docker/docker/daemon/config/config_common_unix.go b/fn/vendor/github.com/docker/docker/daemon/config/config_common_unix.go index 2e818cb34..d11cceba2 100644 --- a/fn/vendor/github.com/docker/docker/daemon/config/config_common_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/config/config_common_unix.go @@ -59,7 +59,7 @@ func (conf *Config) GetExecRoot() string { return conf.ExecRoot } -// GetInitPath returns the configure docker-init path +// GetInitPath returns the configured docker-init path func (conf *Config) GetInitPath() string { conf.Lock() defer conf.Unlock() diff --git a/fn/vendor/github.com/docker/docker/daemon/config/config_test.go b/fn/vendor/github.com/docker/docker/daemon/config/config_test.go index 7508c213c..cc5f01063 100644 --- a/fn/vendor/github.com/docker/docker/daemon/config/config_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/config/config_test.go @@ -9,8 +9,9 @@ import ( "github.com/docker/docker/daemon/discovery" "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil" "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" ) func TestDaemonConfigurationNotFound(t *testing.T) { @@ -61,9 +62,9 @@ func TestFindConfigurationConflicts(t *testing.T) { flags := pflag.NewFlagSet("test", pflag.ContinueOnError) flags.String("authorization-plugins", "", "") - assert.NilError(t, flags.Set("authorization-plugins", "asdf")) + assert.NoError(t, flags.Set("authorization-plugins", "asdf")) - assert.Error(t, + testutil.ErrorContains(t, findConfigurationConflicts(config, flags), "authorization-plugins: (from flag: asdf, from file: foobar)") } @@ -74,10 +75,10 @@ func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { var hosts []string flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to") - assert.NilError(t, flags.Set("host", "tcp://127.0.0.1:4444")) - assert.NilError(t, flags.Set("host", "unix:///var/run/docker.sock")) + assert.NoError(t, flags.Set("host", "tcp://127.0.0.1:4444")) + assert.NoError(t, flags.Set("host", "unix:///var/run/docker.sock")) - assert.Error(t, findConfigurationConflicts(config, flags), "hosts") + testutil.ErrorContains(t, findConfigurationConflicts(config, flags), "hosts") } func TestDaemonConfigurationMergeConflicts(t *testing.T) { diff --git a/fn/vendor/github.com/docker/docker/daemon/config/config_unix_test.go b/fn/vendor/github.com/docker/docker/daemon/config/config_unix_test.go index 3e7d1be94..9e52cb70f 100644 --- a/fn/vendor/github.com/docker/docker/daemon/config/config_unix_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/config/config_unix_test.go @@ -3,21 +3,18 @@ package config import ( - "io/ioutil" - "runtime" - "testing" + + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/docker/go-units" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestDaemonConfigurationMerge(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - - f.Write([]byte(` +func TestGetConflictFreeConfiguration(t *testing.T) { + configFileData := string([]byte(` { "debug": true, "default-ulimits": { @@ -32,7 +29,49 @@ func TestDaemonConfigurationMerge(t *testing.T) { } }`)) - f.Close() + file := tempfile.NewTempFile(t, "docker-config", configFileData) + defer file.Remove() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", nil), "default-ulimit", "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := getConflictFreeConfiguration(file.Name(), flags) + require.NoError(t, err) + + assert.True(t, cc.Debug) + + expectedUlimits := map[string]*units.Ulimit{ + "nofile": { + Name: "nofile", + Hard: 2048, + Soft: 1024, + }, + } + + assert.Equal(t, expectedUlimits, cc.Ulimits) +} + +func TestDaemonConfigurationMerge(t *testing.T) { + configFileData := string([]byte(` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }`)) + + file := tempfile.NewTempFile(t, "docker-config", configFileData) + defer file.Remove() c := &Config{ CommonConfig: CommonConfig{ @@ -44,66 +83,55 @@ func TestDaemonConfigurationMerge(t *testing.T) { }, } - cc, err := MergeDaemonConfigurations(c, nil, configFile) - if err != nil { - t.Fatal(err) - } - if !cc.Debug { - t.Fatalf("expected %v, got %v\n", true, cc.Debug) - } - if !cc.AutoRestart { - t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) - } - if cc.LogConfig.Type != "syslog" { - t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", nil), "default-ulimit", "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := MergeDaemonConfigurations(c, flags, file.Name()) + require.NoError(t, err) + + assert.True(t, cc.Debug) + assert.True(t, cc.AutoRestart) + + expectedLogConfig := LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test_tag"}, } - if configValue, OK := cc.LogConfig.Config["tag"]; !OK { - t.Fatal("expected syslog config attributes, got nil\n") - } else { - if configValue != "test_tag" { - t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) - } + assert.Equal(t, expectedLogConfig, cc.LogConfig) + + expectedUlimits := map[string]*units.Ulimit{ + "nofile": { + Name: "nofile", + Hard: 2048, + Soft: 1024, + }, } - if cc.Ulimits == nil { - t.Fatal("expected default ulimit config, got nil\n") - } else { - if _, OK := cc.Ulimits["nofile"]; OK { - if cc.Ulimits["nofile"].Name != "nofile" || - cc.Ulimits["nofile"].Hard != 2048 || - cc.Ulimits["nofile"].Soft != 1024 { - t.Fatalf("expected default ulimit name, hard and soft are nofile, 2048, 1024, got %s, %d, %d\n", cc.Ulimits["nofile"].Name, cc.Ulimits["nofile"].Hard, cc.Ulimits["nofile"].Soft) - } - } else { - t.Fatal("expected default ulimit name nofile, got nil\n") - } - } + assert.Equal(t, expectedUlimits, cc.Ulimits) } func TestDaemonConfigurationMergeShmSize(t *testing.T) { - if runtime.GOOS == "solaris" { - t.Skip("ShmSize not supported on Solaris\n") - } - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - - f.Write([]byte(` + data := string([]byte(` { "default-shm-size": "1g" }`)) - f.Close() + file := tempfile.NewTempFile(t, "docker-config", data) + defer file.Remove() c := &Config{} - cc, err := MergeDaemonConfigurations(c, nil, configFile) - if err != nil { - t.Fatal(err) - } + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + shmSize := opts.MemBytes(DefaultShmSize) + flags.Var(&shmSize, "default-shm-size", "") + + cc, err := MergeDaemonConfigurations(c, flags, file.Name()) + require.NoError(t, err) + expectedValue := 1 * 1024 * 1024 * 1024 if cc.ShmSize.Value() != int64(expectedValue) { t.Fatalf("expected default shm size %d, got %d", expectedValue, cc.ShmSize.Value()) diff --git a/fn/vendor/github.com/docker/docker/daemon/config/config_windows_test.go b/fn/vendor/github.com/docker/docker/daemon/config/config_windows_test.go index 1c435c630..92ee8e4ac 100644 --- a/fn/vendor/github.com/docker/docker/daemon/config/config_windows_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/config/config_windows_test.go @@ -5,6 +5,11 @@ package config import ( "io/ioutil" "testing" + + "github.com/docker/docker/opts" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDaemonConfigurationMerge(t *testing.T) { @@ -35,25 +40,21 @@ func TestDaemonConfigurationMerge(t *testing.T) { }, } - cc, err := MergeDaemonConfigurations(c, nil, configFile) - if err != nil { - t.Fatal(err) - } - if !cc.Debug { - t.Fatalf("expected %v, got %v\n", true, cc.Debug) - } - if !cc.AutoRestart { - t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) - } - if cc.LogConfig.Type != "syslog" { - t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := MergeDaemonConfigurations(c, flags, configFile) + require.NoError(t, err) + + assert.True(t, cc.Debug) + assert.True(t, cc.AutoRestart) + + expectedLogConfig := LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test_tag"}, } - if configValue, OK := cc.LogConfig.Config["tag"]; !OK { - t.Fatal("expected syslog config attributes, got nil\n") - } else { - if configValue != "test_tag" { - t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) - } - } + assert.Equal(t, expectedLogConfig, cc.LogConfig) } diff --git a/fn/vendor/github.com/docker/docker/daemon/configs.go b/fn/vendor/github.com/docker/docker/daemon/configs.go new file mode 100644 index 000000000..31da56b2d --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/configs.go @@ -0,0 +1,23 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + swarmtypes "github.com/docker/docker/api/types/swarm" +) + +// SetContainerConfigReferences sets the container config references needed +func (daemon *Daemon) SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error { + if !configsSupported() && len(refs) > 0 { + logrus.Warn("configs are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.ConfigReferences = refs + + return nil +} diff --git a/fn/vendor/github.com/docker/docker/daemon/configs_linux.go b/fn/vendor/github.com/docker/docker/daemon/configs_linux.go new file mode 100644 index 000000000..af20ad78b --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/configs_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package daemon + +func configsSupported() bool { + return true +} diff --git a/fn/vendor/github.com/docker/docker/daemon/configs_unsupported.go b/fn/vendor/github.com/docker/docker/daemon/configs_unsupported.go new file mode 100644 index 000000000..1a7cbc9dc --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/configs_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package daemon + +func configsSupported() bool { + return false +} diff --git a/fn/vendor/github.com/docker/docker/daemon/configs_windows.go b/fn/vendor/github.com/docker/docker/daemon/configs_windows.go new file mode 100644 index 000000000..7cb2e9c43 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/configs_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package daemon + +func configsSupported() bool { + return true +} diff --git a/fn/vendor/github.com/docker/docker/daemon/container.go b/fn/vendor/github.com/docker/docker/daemon/container.go index 50878f01e..149df0dec 100644 --- a/fn/vendor/github.com/docker/docker/daemon/container.go +++ b/fn/vendor/github.com/docker/docker/daemon/container.go @@ -18,6 +18,7 @@ import ( "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/nat" + "github.com/opencontainers/selinux/go-selinux/label" ) // GetContainer looks for a container using the provided information, which could be @@ -90,6 +91,9 @@ func (daemon *Daemon) load(id string) (*container.Container, error) { if err := container.FromDisk(); err != nil { return nil, err } + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return nil, err + } if container.ID != id { return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) @@ -99,7 +103,7 @@ func (daemon *Daemon) load(id string) (*container.Container, error) { } // Register makes a container object usable by the daemon as -func (daemon *Daemon) Register(c *container.Container) { +func (daemon *Daemon) Register(c *container.Container) error { // Attach to stdout and stderr if c.Config.OpenStdin { c.StreamConfig.NewInputPipes() @@ -107,11 +111,17 @@ func (daemon *Daemon) Register(c *container.Container) { c.StreamConfig.NewNopInputPipe() } + // once in the memory store it is visible to other goroutines + // grab a Lock until it has been checkpointed to avoid races + c.Lock() + defer c.Unlock() + daemon.containers.Add(c.ID, c) daemon.idIndex.Add(c.ID) + return c.CheckpointTo(daemon.containersReplica) } -func (daemon *Daemon) newContainer(name string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { +func (daemon *Daemon) newContainer(name string, platform string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { var ( id string err error @@ -144,8 +154,8 @@ func (daemon *Daemon) newContainer(name string, config *containertypes.Config, h base.ImageID = imgID base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} base.Name = name - base.Driver = daemon.GraphDriverName() - + base.Driver = daemon.GraphDriverName(platform) + base.Platform = platform return base, err } @@ -212,7 +222,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig * runconfig.SetDefaultNetModeIfBlank(hostConfig) container.HostConfig = hostConfig - return container.ToDisk() + return container.CheckpointTo(daemon.containersReplica) } // verifyContainerSettings performs validation of the hostconfig and config @@ -244,20 +254,20 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon // Validate the healthcheck params of Config if config.Healthcheck != nil { - if config.Healthcheck.Interval != 0 && config.Healthcheck.Interval < time.Second { - return nil, fmt.Errorf("Interval in Healthcheck cannot be less than one second") + if config.Healthcheck.Interval != 0 && config.Healthcheck.Interval < containertypes.MinimumDuration { + return nil, fmt.Errorf("Interval in Healthcheck cannot be less than %s", containertypes.MinimumDuration) } - if config.Healthcheck.Timeout != 0 && config.Healthcheck.Timeout < time.Second { - return nil, fmt.Errorf("Timeout in Healthcheck cannot be less than one second") + if config.Healthcheck.Timeout != 0 && config.Healthcheck.Timeout < containertypes.MinimumDuration { + return nil, fmt.Errorf("Timeout in Healthcheck cannot be less than %s", containertypes.MinimumDuration) } if config.Healthcheck.Retries < 0 { return nil, fmt.Errorf("Retries in Healthcheck cannot be negative") } - if config.Healthcheck.StartPeriod < 0 { - return nil, fmt.Errorf("StartPeriod in Healthcheck cannot be negative") + if config.Healthcheck.StartPeriod != 0 && config.Healthcheck.StartPeriod < containertypes.MinimumDuration { + return nil, fmt.Errorf("StartPeriod in Healthcheck cannot be less than %s", containertypes.MinimumDuration) } } } @@ -301,7 +311,7 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon return nil, fmt.Errorf("maximum retry count cannot be negative") } case "": - // do nothing + // do nothing default: return nil, fmt.Errorf("invalid restart policy '%s'", p.Name) } diff --git a/fn/vendor/github.com/docker/docker/daemon/container_operations.go b/fn/vendor/github.com/docker/docker/daemon/container_operations.go index 82ca4701a..7c7dcc7ce 100644 --- a/fn/vendor/github.com/docker/docker/daemon/container_operations.go +++ b/fn/vendor/github.com/docker/docker/daemon/container_operations.go @@ -44,6 +44,7 @@ func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []str return nil } + func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { var ( sboxOptions []libnetwork.SandboxOption @@ -568,7 +569,7 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) error { } - if err := container.WriteHostConfig(); err != nil { + if _, err := container.WriteHostConfig(); err != nil { return err } networkActions.WithValues("allocate").UpdateSince(start) @@ -885,7 +886,12 @@ func (daemon *Daemon) initializeNetworking(container *container.Container) error if err != nil { return err } - initializeNetworkingPaths(container, nc) + + err = daemon.initializeNetworkingPaths(container, nc) + if err != nil { + return err + } + container.Config.Hostname = nc.Config.Hostname container.Config.Domainname = nc.Config.Domainname return nil @@ -980,6 +986,9 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName if endpointConfig == nil { endpointConfig = &networktypes.EndpointSettings{} } + container.Lock() + defer container.Unlock() + if !container.Running { if container.RemovalInProgress || container.Dead { return errRemovalContainer(container.ID) @@ -1002,15 +1011,16 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName return err } } - if err := container.ToDiskLocking(); err != nil { - return fmt.Errorf("Error saving container to disk: %v", err) - } - return nil + + return container.CheckpointTo(daemon.containersReplica) } // DisconnectFromNetwork disconnects container from network n. func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error { n, err := daemon.FindNetwork(networkName) + container.Lock() + defer container.Unlock() + if !container.Running || (err != nil && force) { if container.RemovalInProgress || container.Dead { return errRemovalContainer(container.ID) @@ -1038,16 +1048,16 @@ func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, netw return err } - if err := container.ToDiskLocking(); err != nil { - return fmt.Errorf("Error saving container to disk: %v", err) + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + return err } if n != nil { - attributes := map[string]string{ + daemon.LogNetworkEventWithAttributes(n, "disconnect", map[string]string{ "container": container.ID, - } - daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) + }) } + return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/container_operations_solaris.go b/fn/vendor/github.com/docker/docker/daemon/container_operations_solaris.go index 1653948de..c5728d0ee 100644 --- a/fn/vendor/github.com/docker/docker/daemon/container_operations_solaris.go +++ b/fn/vendor/github.com/docker/docker/daemon/container_operations_solaris.go @@ -42,5 +42,6 @@ func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[] return nil } -func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error { + return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/container_operations_unix.go b/fn/vendor/github.com/docker/docker/daemon/container_operations_unix.go index 17d5a061d..09c2b7df1 100644 --- a/fn/vendor/github.com/docker/docker/daemon/container_operations_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/container_operations_unix.go @@ -3,6 +3,7 @@ package daemon import ( + "context" "fmt" "io/ioutil" "os" @@ -19,7 +20,7 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" "github.com/docker/libnetwork" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" ) @@ -108,14 +109,14 @@ func (daemon *Daemon) setupIpcDirs(c *container.Container) error { } c.ShmPath = "/dev/shm" } else { - rootUID, rootGID := daemon.GetRemappedUIDGID() + rootIDs := daemon.idMappings.RootPair() if !c.HasMountFor("/dev/shm") { shmPath, err := c.ShmResourcePath() if err != nil { return err } - if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(shmPath, 0700, rootIDs); err != nil { return err } @@ -127,7 +128,7 @@ func (daemon *Daemon) setupIpcDirs(c *container.Container) error { if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { return fmt.Errorf("mounting shm tmpfs: %s", err) } - if err := os.Chown(shmPath, rootUID, rootGID); err != nil { + if err := os.Chown(shmPath, rootIDs.UID, rootIDs.GID); err != nil { return err } } @@ -145,6 +146,13 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { localMountPath := c.SecretMountPath() logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + // retrieve possible remapped range start for root UID, GID + rootIDs := daemon.idMappings.RootPair() + // create tmpfs + if err := idtools.MkdirAllAndChown(localMountPath, 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating secret local mount path") + } + defer func() { if setupErr != nil { // cleanup @@ -156,35 +164,26 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { } }() - // retrieve possible remapped range start for root UID, GID - rootUID, rootGID := daemon.GetRemappedUIDGID() - // create tmpfs - if err := idtools.MkdirAllAs(localMountPath, 0700, rootUID, rootGID); err != nil { - return errors.Wrap(err, "error creating secret local mount path") - } - tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootUID, rootGID) + tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID) if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil { return errors.Wrap(err, "unable to setup secret mount") } - for _, s := range c.SecretReferences { - if c.SecretStore == nil { - return fmt.Errorf("secret store is not initialized") - } + if c.DependencyStore == nil { + return fmt.Errorf("secret store is not initialized") + } + for _, s := range c.SecretReferences { // TODO (ehazlett): use type switch when more are supported if s.File == nil { - return fmt.Errorf("secret target type is not a file target") + logrus.Error("secret target type is not a file target") + continue } - targetPath := filepath.Clean(s.File.Name) - // ensure that the target is a filename only; no paths allowed - if targetPath != filepath.Base(targetPath) { - return fmt.Errorf("error creating secret: secret must not be a path") - } - - fPath := filepath.Join(localMountPath, targetPath) - if err := idtools.MkdirAllAs(filepath.Dir(fPath), 0700, rootUID, rootGID); err != nil { + // secrets are created in the SecretMountPath on the host, at a + // single level + fPath := c.SecretFilePath(*s) + if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil { return errors.Wrap(err, "error creating secret mount path") } @@ -192,9 +191,9 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { "name": s.File.Name, "path": fPath, }).Debug("injecting secret") - secret := c.SecretStore.Get(s.SecretID) - if secret == nil { - return fmt.Errorf("unable to get secret from secret store") + secret, err := c.DependencyStore.Secrets().Get(s.SecretID) + if err != nil { + return errors.Wrap(err, "unable to get secret from secret store") } if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { return errors.Wrap(err, "error injecting secret") @@ -209,7 +208,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { return err } - if err := os.Chown(fPath, rootUID+uid, rootGID+gid); err != nil { + if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { return errors.Wrap(err, "error setting ownership for secret") } } @@ -224,11 +223,84 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { return nil } -func killProcessDirectly(container *container.Container) error { - if _, err := container.WaitStop(10 * time.Second); err != nil { +func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { + if len(c.ConfigReferences) == 0 { + return nil + } + + localPath := c.ConfigsDirPath() + logrus.Debugf("configs: setting up config dir: %s", localPath) + + // retrieve possible remapped range start for root UID, GID + rootIDs := daemon.idMappings.RootPair() + // create tmpfs + if err := idtools.MkdirAllAndChown(localPath, 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating config dir") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localPath); err != nil { + logrus.Errorf("error cleaning up config dir: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("config store is not initialized") + } + + for _, configRef := range c.ConfigReferences { + // TODO (ehazlett): use type switch when more are supported + if configRef.File == nil { + logrus.Error("config target type is not a file target") + continue + } + + fPath := c.ConfigFilePath(*configRef) + + log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) + + if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating config path") + } + + log.Debug("injecting config") + config, err := c.DependencyStore.Configs().Get(configRef.ConfigID) + if err != nil { + return errors.Wrap(err, "unable to get config from config store") + } + if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil { + return errors.Wrap(err, "error injecting config") + } + + uid, err := strconv.Atoi(configRef.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(configRef.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for config") + } + } + + return nil +} + +func killProcessDirectly(cntr *container.Container) error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Block until the container to stops or timeout. + status := <-cntr.Wait(ctx, container.WaitConditionNotRunning) + if status.Err() != nil { // Ensure that we don't kill ourselves - if pid := container.GetPID(); pid != 0 { - logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) + if pid := cntr.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID)) if err := syscall.Kill(pid, 9); err != nil { if err != syscall.ESRCH { return err @@ -277,8 +349,9 @@ func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[] return nil } -func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error { container.HostnamePath = nc.HostnamePath container.HostsPath = nc.HostsPath container.ResolvConfPath = nc.ResolvConfPath + return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/container_operations_windows.go b/fn/vendor/github.com/docker/docker/daemon/container_operations_windows.go index df29ee7f4..2788f1a7c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/container_operations_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/container_operations_windows.go @@ -1,16 +1,70 @@ -// +build windows - package daemon import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" "github.com/docker/docker/container" + "github.com/docker/docker/pkg/system" "github.com/docker/libnetwork" + "github.com/pkg/errors" ) func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { return nil, nil } +func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { + if len(c.ConfigReferences) == 0 { + return nil + } + + localPath := c.ConfigsDirPath() + logrus.Debugf("configs: setting up config dir: %s", localPath) + + // create local config root + if err := system.MkdirAllWithACL(localPath, 0, system.SddlAdministratorsLocalSystem); err != nil { + return errors.Wrap(err, "error creating config dir") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localPath); err != nil { + logrus.Errorf("error cleaning up config dir: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("config store is not initialized") + } + + for _, configRef := range c.ConfigReferences { + // TODO (ehazlett): use type switch when more are supported + if configRef.File == nil { + logrus.Error("config target type is not a file target") + continue + } + + fPath := c.ConfigFilePath(*configRef) + + log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) + + log.Debug("injecting config") + config, err := c.DependencyStore.Configs().Get(configRef.ConfigID) + if err != nil { + return errors.Wrap(err, "unable to get config from config store") + } + if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil { + return errors.Wrap(err, "error injecting config") + } + } + + return nil +} + // getSize returns real size & virtual size func (daemon *Daemon) getSize(containerID string) (int64, int64) { // TODO Windows @@ -35,6 +89,57 @@ func detachMounted(path string) error { return nil } +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath := c.SecretMountPath() + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + // create local secret root + if err := system.MkdirAllWithACL(localMountPath, 0, system.SddlAdministratorsLocalSystem); err != nil { + return errors.Wrap(err, "error creating secret local directory") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localMountPath); err != nil { + logrus.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + for _, s := range c.SecretReferences { + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + logrus.Error("secret target type is not a file target") + continue + } + + // secrets are created in the SecretMountPath on the host, at a + // single level + fPath := c.SecretFilePath(*s) + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret, err := c.DependencyStore.Secrets().Get(s.SecretID) + if err != nil { + return errors.Wrap(err, "unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + } + + return nil +} + func killProcessDirectly(container *container.Container) error { return nil } @@ -55,6 +160,43 @@ func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[] return nil } -func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error { + + if nc.HostConfig.Isolation.IsHyperV() { + return fmt.Errorf("sharing of hyperv containers network is not supported") + } + container.NetworkSharedContainerID = nc.ID + + if nc.NetworkSettings != nil { + for n := range nc.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(n) + if err != nil { + continue + } + + ep, err := nc.GetEndpointInNetwork(sn) + if err != nil { + continue + } + + data, err := ep.DriverInfo() + if err != nil { + continue + } + + if data["GW_INFO"] != nil { + gwInfo := data["GW_INFO"].(map[string]interface{}) + if gwInfo["hnsid"] != nil { + container.SharedEndpointList = append(container.SharedEndpointList, gwInfo["hnsid"].(string)) + } + } + + if data["hnsid"] != nil { + container.SharedEndpointList = append(container.SharedEndpointList, data["hnsid"].(string)) + } + } + } + + return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/create.go b/fn/vendor/github.com/docker/docker/daemon/create.go index 55a106c64..78070fd29 100644 --- a/fn/vendor/github.com/docker/docker/daemon/create.go +++ b/fn/vendor/github.com/docker/docker/daemon/create.go @@ -19,8 +19,9 @@ import ( "github.com/docker/docker/layer" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" ) // CreateManagedContainer creates a container that is managed by a Service @@ -75,6 +76,16 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( err error ) + // TODO: @jhowardmsft LCOW support - at a later point, can remove the hard-coding + // to force the platform to be linux. + // Default the platform if not supplied + if params.Platform == "" { + params.Platform = runtime.GOOS + } + if system.LCOWSupported() { + params.Platform = "linux" + } + if params.Config.Image != "" { img, err = daemon.GetImage(params.Config.Image) if err != nil { @@ -82,9 +93,23 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( } if runtime.GOOS == "solaris" && img.OS != "solaris " { - return nil, errors.New("Platform on which parent image was created is not Solaris") + return nil, errors.New("platform on which parent image was created is not Solaris") } imgID = img.ID() + + if runtime.GOOS == "windows" && img.OS == "linux" && !system.LCOWSupported() { + return nil, errors.New("platform on which parent image was created is not Windows") + } + } + + // Make sure the platform requested matches the image + if img != nil { + if params.Platform != img.Platform() { + // Ignore this in LCOW mode. @jhowardmsft TODO - This will need revisiting later. + if !system.LCOWSupported() { + return nil, fmt.Errorf("cannot create a %s container from a %s image", params.Platform, img.Platform()) + } + } } if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { @@ -95,7 +120,7 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( return nil, err } - if container, err = daemon.newContainer(params.Name, params.Config, params.HostConfig, imgID, managed); err != nil { + if container, err = daemon.newContainer(params.Name, params.Platform, params.Config, params.HostConfig, imgID, managed); err != nil { return nil, err } defer func() { @@ -117,14 +142,11 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( return nil, err } - rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) - if err != nil { + rootIDs := daemon.idMappings.RootPair() + if err := idtools.MkdirAndChown(container.Root, 0700, rootIDs); err != nil { return nil, err } - if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { - return nil, err - } - if err := idtools.MkdirAs(container.CheckpointDir(), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, rootIDs); err != nil { return nil, err } @@ -145,16 +167,21 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( runconfig.SetDefaultNetModeIfBlank(container.HostConfig) daemon.updateContainerNetworkSettings(container, endpointsConfigs) - - if err := container.ToDisk(); err != nil { - logrus.Errorf("Error saving new container to disk: %v", err) + if err := daemon.Register(container); err != nil { return nil, err } - daemon.Register(container) + stateCtr.set(container.ID, "stopped") daemon.LogContainerEvent(container, "create") return container, nil } +func toHostConfigSelinuxLabels(labels []string) []string { + for i, l := range labels { + labels[i] = "label=" + l + } + return labels +} + func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) ([]string, error) { for _, opt := range hostConfig.SecurityOpt { con := strings.Split(opt, "=") @@ -167,7 +194,7 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) pidMode := hostConfig.PidMode privileged := hostConfig.Privileged if ipcMode.IsHost() || pidMode.IsHost() || privileged { - return label.DisableSecOpt(), nil + return toHostConfigSelinuxLabels(label.DisableSecOpt()), nil } var ipcLabel []string @@ -181,7 +208,7 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) } ipcLabel = label.DupSecOpt(c.ProcessLabel) if pidContainer == "" { - return ipcLabel, err + return toHostConfigSelinuxLabels(ipcLabel), err } } if pidContainer != "" { @@ -192,7 +219,7 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) pidLabel = label.DupSecOpt(c.ProcessLabel) if ipcContainer == "" { - return pidLabel, err + return toHostConfigSelinuxLabels(pidLabel), err } } @@ -202,7 +229,7 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") } } - return pidLabel, nil + return toHostConfigSelinuxLabels(pidLabel), nil } return nil, nil } @@ -210,7 +237,7 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) func (daemon *Daemon) setRWLayer(container *container.Container) error { var layerID layer.ChainID if container.ImageID != "" { - img, err := daemon.imageStore.Get(container.ImageID) + img, err := daemon.stores[container.Platform].imageStore.Get(container.ImageID) if err != nil { return err } @@ -223,7 +250,7 @@ func (daemon *Daemon) setRWLayer(container *container.Container) error { StorageOpt: container.HostConfig.StorageOpt, } - rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts) + rwLayer, err := daemon.stores[container.Platform].layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts) if err != nil { return err } diff --git a/fn/vendor/github.com/docker/docker/daemon/create_unix.go b/fn/vendor/github.com/docker/docker/daemon/create_unix.go index 2fe5c98a7..2501a3374 100644 --- a/fn/vendor/github.com/docker/docker/daemon/create_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/create_unix.go @@ -12,7 +12,7 @@ import ( mounttypes "github.com/docker/docker/api/types/mount" "github.com/docker/docker/container" "github.com/docker/docker/pkg/stringid" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" ) // createContainerPlatformSpecificSettings performs platform specific container create functionality @@ -22,8 +22,8 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *contain } defer daemon.Unmount(container) - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { + rootIDs := daemon.idMappings.RootPair() + if err := container.SetupWorkingDirectory(rootIDs); err != nil { return err } diff --git a/fn/vendor/github.com/docker/docker/daemon/daemon.go b/fn/vendor/github.com/docker/docker/daemon/daemon.go index 080ff1202..cdaa5c9a4 100644 --- a/fn/vendor/github.com/docker/docker/daemon/daemon.go +++ b/fn/vendor/github.com/docker/docker/daemon/daemon.go @@ -6,6 +6,7 @@ package daemon import ( + "context" "fmt" "io/ioutil" "net" @@ -18,7 +19,7 @@ import ( "time" "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" "github.com/docker/docker/api" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" @@ -39,7 +40,6 @@ import ( "github.com/docker/docker/layer" "github.com/docker/docker/libcontainerd" "github.com/docker/docker/migrate/v1" - "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/registrar" @@ -69,48 +69,70 @@ var ( errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.") ) +type daemonStore struct { + graphDriver string + imageRoot string + imageStore image.Store + layerStore layer.Store + distributionMetadataStore dmetadata.Store + referenceStore refstore.Store +} + // Daemon holds information about the Docker daemon. type Daemon struct { - ID string - repository string - containers container.Store - execCommands *exec.Store - referenceStore refstore.Store - downloadManager *xfer.LayerDownloadManager - uploadManager *xfer.LayerUploadManager - distributionMetadataStore dmetadata.Store - trustKey libtrust.PrivateKey - idIndex *truncindex.TruncIndex - configStore *config.Config - statsCollector *stats.Collector - defaultLogConfig containertypes.LogConfig - RegistryService registry.Service - EventsService *events.Events - netController libnetwork.NetworkController - volumes *store.VolumeStore - discoveryWatcher discovery.Reloader - root string - seccompEnabled bool - apparmorEnabled bool - shutdown bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - layerStore layer.Store - imageStore image.Store - PluginStore *plugin.Store // todo: remove - pluginManager *plugin.Manager - nameIndex *registrar.Registrar - linkIndex *linkIndex - containerd libcontainerd.Client - containerdRemote libcontainerd.Remote - defaultIsolation containertypes.Isolation // Default isolation mode on Windows - clusterProvider cluster.Provider - cluster Cluster + ID string + repository string + containers container.Store + containersReplica container.ViewDB + execCommands *exec.Store + downloadManager *xfer.LayerDownloadManager + uploadManager *xfer.LayerUploadManager + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *config.Config + statsCollector *stats.Collector + defaultLogConfig containertypes.LogConfig + RegistryService registry.Service + EventsService *events.Events + netController libnetwork.NetworkController + volumes *store.VolumeStore + discoveryWatcher discovery.Reloader + root string + seccompEnabled bool + apparmorEnabled bool + shutdown bool + idMappings *idtools.IDMappings + stores map[string]daemonStore // By container target platform + PluginStore *plugin.Store // todo: remove + pluginManager *plugin.Manager + nameIndex *registrar.Registrar + linkIndex *linkIndex + containerd libcontainerd.Client + containerdRemote libcontainerd.Remote + defaultIsolation containertypes.Isolation // Default isolation mode on Windows + clusterProvider cluster.Provider + cluster Cluster + metricsPluginListener net.Listener machineMemory uint64 seccompProfile []byte seccompProfilePath string + + diskUsageRunning int32 + pruneRunning int32 + hosts map[string]bool // hosts stores the addresses the daemon is listening on + startupDone chan struct{} +} + +// StoreHosts stores the addresses the daemon is listening on +func (daemon *Daemon) StoreHosts(hosts []string) { + if daemon.hosts == nil { + daemon.hosts = make(map[string]bool) + } + for _, h := range hosts { + daemon.hosts[h] = true + } } // HasExperimental returns whether the experimental features of the daemon are enabled or not @@ -122,10 +144,7 @@ func (daemon *Daemon) HasExperimental() bool { } func (daemon *Daemon) restore() error { - var ( - currentDriver = daemon.GraphDriverName() - containers = make(map[string]*container.Container) - ) + containers := make(map[string]*container.Container) logrus.Info("Loading containers: start.") @@ -143,8 +162,9 @@ func (daemon *Daemon) restore() error { } // Ignore the container if it does not support the current driver being used by the graph - if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { - rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) + currentDriverForContainerPlatform := daemon.stores[container.Platform].graphDriver + if (container.Driver == "" && currentDriverForContainerPlatform == "aufs") || container.Driver == currentDriverForContainerPlatform { + rwlayer, err := daemon.stores[container.Platform].layerStore.GetRWLayer(container.ID) if err != nil { logrus.Errorf("Failed to load container mount %v: %v", id, err) continue @@ -163,17 +183,20 @@ func (daemon *Daemon) restore() error { activeSandboxes := make(map[string]interface{}) for id, c := range containers { if err := daemon.registerName(c); err != nil { - logrus.Errorf("Failed to register container %s: %s", c.ID, err) + logrus.Errorf("Failed to register container name %s: %s", c.ID, err) delete(containers, id) continue } - daemon.Register(c) - // verify that all volumes valid and have been migrated from the pre-1.7 layout if err := daemon.verifyVolumesInfo(c); err != nil { // don't skip the container due to error logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) } + if err := daemon.Register(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + delete(containers, id) + continue + } // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. // We should rewrite it to use the daemon defaults. @@ -192,10 +215,12 @@ func (daemon *Daemon) restore() error { wg.Add(1) go func(c *container.Container) { defer wg.Done() - if err := backportMountSpec(c); err != nil { - logrus.Error("Failed to migrate old mounts to use new spec format") + daemon.backportMountSpec(c) + if err := daemon.checkpointAndSave(c); err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") } + daemon.setStateCounter(c) if c.IsRunning() || c.IsPaused() { c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil { @@ -213,7 +238,6 @@ func (daemon *Daemon) restore() error { // The error is only logged here. logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) } else { - // if mount success, then unmount it if err := daemon.Unmount(c); err != nil { logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) } @@ -251,6 +275,7 @@ func (daemon *Daemon) restore() error { } } + c.Lock() if c.RemovalInProgress { // We probably crashed in the middle of a removal, reset // the flag. @@ -261,10 +286,13 @@ func (daemon *Daemon) restore() error { // be removed. So we put the container in the "dead" // state and leave further processing up to them. logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) - c.ResetRemovalInProgress() - c.SetDead() - c.ToDisk() + c.RemovalInProgress = false + c.Dead = true + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + logrus.Errorf("Failed to update container %s state: %v", c.ID, err) + } } + c.Unlock() }(c) } wg.Wait() @@ -507,31 +535,30 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe return nil, err } - uidMaps, gidMaps, err := setupRemappedRoot(config) + idMappings, err := setupRemappedRoot(config) if err != nil { return nil, err } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - + rootIDs := idMappings.RootPair() if err := setupDaemonProcess(config); err != nil { return nil, err } // set up the tmpDir to use a canonical path - tmp, err := prepareTempDir(config.Root, rootUID, rootGID) + tmp, err := prepareTempDir(config.Root, rootIDs) if err != nil { return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) } - realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) + realTmp, err := getRealPath(tmp) if err != nil { return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } os.Setenv("TMPDIR", realTmp) - d := &Daemon{configStore: config} + d := &Daemon{ + configStore: config, + startupDone: make(chan struct{}), + } // Ensure the daemon is properly shutdown if there is a failure during // initialization defer func() { @@ -570,25 +597,46 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe } daemonRepo := filepath.Join(config.Root, "containers") - if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil && !os.IsExist(err) { return nil, err } if runtime.GOOS == "windows" { - if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil && !os.IsExist(err) { return nil, err } } - driverName := os.Getenv("DOCKER_DRIVER") - if driverName == "" { - driverName = config.GraphDriver + // On Windows we don't support the environment variable, or a user supplied graphdriver + // as Windows has no choice in terms of which graphdrivers to use. It's a case of + // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, + // lcow. Unix platforms however run a single graphdriver for all containers, and it can + // be set through an environment variable, a daemon start parameter, or chosen through + // initialization of the layerstore through driver priority order for example. + d.stores = make(map[string]daemonStore) + if runtime.GOOS == "windows" { + d.stores["windows"] = daemonStore{graphDriver: "windowsfilter"} + if system.LCOWSupported() { + d.stores["linux"] = daemonStore{graphDriver: "lcow"} + } + } else { + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } + d.stores[runtime.GOOS] = daemonStore{graphDriver: driverName} // May still be empty. Layerstore init determines instead. } d.RegistryService = registryService d.PluginStore = pluginStore logger.RegisterPluginGetter(d.PluginStore) + metricsSockPath, err := d.listenMetricsSock() + if err != nil { + return nil, err + } + registerMetricsPluginCallback(d.PluginStore, metricsSockPath) + // Plugin system initialization should happen before restore. Do not change order. d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ Root: filepath.Join(config.Root, "plugins"), @@ -604,45 +652,60 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe return nil, errors.Wrap(err, "couldn't create plugin manager") } - d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ - StorePath: config.Root, - MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), - GraphDriver: driverName, - GraphDriverOptions: config.GraphOptions, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - PluginGetter: d.PluginStore, - ExperimentalEnabled: config.Experimental, - }) - if err != nil { - return nil, err + var graphDrivers []string + for platform, ds := range d.stores { + ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: ds.graphDriver, + GraphDriverOptions: config.GraphOptions, + IDMappings: idMappings, + PluginGetter: d.PluginStore, + ExperimentalEnabled: config.Experimental, + Platform: platform, + }) + if err != nil { + return nil, err + } + ds.graphDriver = ls.DriverName() // As layerstore may set the driver + ds.layerStore = ls + d.stores[platform] = ds + graphDrivers = append(graphDrivers, ls.DriverName()) } - graphDriver := d.layerStore.DriverName() - imageRoot := filepath.Join(config.Root, "image", graphDriver) - // Configure and validate the kernels security support - if err := configureKernelSecuritySupport(config, graphDriver); err != nil { + if err := configureKernelSecuritySupport(config, graphDrivers); err != nil { return nil, err } logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) - d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) + lsMap := make(map[string]layer.Store) + for platform, ds := range d.stores { + lsMap[platform] = ds.layerStore + } + d.downloadManager = xfer.NewLayerDownloadManager(lsMap, *config.MaxConcurrentDownloads) logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) - ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) - if err != nil { - return nil, err - } + for platform, ds := range d.stores { + imageRoot := filepath.Join(config.Root, "image", ds.graphDriver) + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } - d.imageStore, err = image.NewImageStore(ifs, d.layerStore) - if err != nil { - return nil, err + var is image.Store + is, err = image.NewImageStore(ifs, platform, ds.layerStore) + if err != nil { + return nil, err + } + ds.imageRoot = imageRoot + ds.imageStore = is + d.stores[platform] = ds } // Configure the volumes driver - volStore, err := d.configureVolumes(rootUID, rootGID) + volStore, err := d.configureVolumes(rootIDs) if err != nil { return nil, err } @@ -654,27 +717,35 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe trustDir := filepath.Join(config.Root, "trust") - if err := system.MkdirAll(trustDir, 0700); err != nil { - return nil, err - } - - distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) - if err != nil { + if err := system.MkdirAll(trustDir, 0700, ""); err != nil { return nil, err } eventsService := events.New() - referenceStore, err := refstore.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) - if err != nil { - return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) - } + for platform, ds := range d.stores { + dms, err := dmetadata.NewFSMetadataStore(filepath.Join(ds.imageRoot, "distribution"), platform) + if err != nil { + return nil, err + } - migrationStart := time.Now() - if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { - logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) + rs, err := refstore.NewReferenceStore(filepath.Join(ds.imageRoot, "repositories.json"), platform) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + ds.distributionMetadataStore = dms + ds.referenceStore = rs + d.stores[platform] = ds + + // No content-addressability migration on Windows as it never supported pre-CA + if runtime.GOOS != "windows" { + migrationStart := time.Now() + if err := v1.Migrate(config.Root, ds.graphDriver, ds.layerStore, ds.imageStore, rs, dms); err != nil { + logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) + } + logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) + } } - logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) // Discovery is only enabled when the daemon is launched with an address to advertise. When // initialized, the daemon is registered and we can store the discovery backend as it's read-only @@ -692,9 +763,10 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe d.ID = trustKey.PublicKey().KeyID() d.repository = daemonRepo d.containers = container.NewMemoryStore() + if d.containersReplica, err = container.NewViewDB(); err != nil { + return nil, err + } d.execCommands = exec.NewStore() - d.referenceStore = referenceStore - d.distributionMetadataStore = distributionMetadataStore d.trustKey = trustKey d.idIndex = truncindex.NewTruncIndex([]string{}) d.statsCollector = d.newStatsCollector(1 * time.Second) @@ -705,8 +777,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe d.EventsService = eventsService d.volumes = volStore d.root = config.Root - d.uidMaps = uidMaps - d.gidMaps = gidMaps + d.idMappings = idMappings d.seccompEnabled = sysInfo.Seccomp d.apparmorEnabled = sysInfo.AppArmor @@ -724,24 +795,47 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe if err := d.restore(); err != nil { return nil, err } + close(d.startupDone) // FIXME: this method never returns an error info, _ := d.SystemInfo() - engineVersion.WithValues( + engineInfo.WithValues( dockerversion.Version, dockerversion.GitCommit, info.Architecture, info.Driver, info.KernelVersion, info.OperatingSystem, + info.OSType, + info.ID, ).Set(1) engineCpus.Set(float64(info.NCPU)) engineMemory.Set(float64(info.MemTotal)) + gd := "" + for platform, ds := range d.stores { + if len(gd) > 0 { + gd += ", " + } + gd += ds.graphDriver + if len(d.stores) > 1 { + gd = fmt.Sprintf("%s (%s)", gd, platform) + } + } + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver(s)": gd, + }).Info("Docker daemon") + return d, nil } +func (daemon *Daemon) waitForStartupDone() { + <-daemon.startupDone +} + func (daemon *Daemon) shutdownContainer(c *container.Container) error { stopTimeout := c.StopTimeout() // TODO(windows): Handle docker restart with paused containers @@ -760,7 +854,12 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error { if err := daemon.containerUnpause(c); err != nil { return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) } - if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil { + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(stopTimeout)*time.Second) + defer cancel() + + // Wait with timeout for container to exit. + if status := <-c.Wait(ctx, container.WaitConditionNotRunning); status.Err() != nil { logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout) sig, ok := signal.SignalMap["KILL"] if !ok { @@ -769,8 +868,10 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error { if err := daemon.kill(c, int(sig)); err != nil { logrus.Errorf("Failed to SIGKILL container %s", c.ID) } - c.WaitStop(-1 * time.Second) - return err + // Wait for exit again without a timeout. + // Explicitly ignore the result. + _ = <-c.Wait(context.Background(), container.WaitConditionNotRunning) + return status.Err() } } // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force @@ -778,7 +879,9 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error { return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) } - c.WaitStop(-1 * time.Second) + // Wait without timeout for the container to exit. + // Ignore the result. + _ = <-c.Wait(context.Background(), container.WaitConditionNotRunning) return nil } @@ -815,6 +918,8 @@ func (daemon *Daemon) Shutdown() error { if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { // check if there are any running containers, if none we should do some cleanup if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { + // metrics plugins still need some cleanup + daemon.cleanupMetricsPlugins() return nil } } @@ -830,7 +935,7 @@ func (daemon *Daemon) Shutdown() error { logrus.Errorf("Stop container error: %v", err) return } - if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { + if mountid, err := daemon.stores[c.Platform].layerStore.GetMountID(c.ID); err == nil { daemon.cleanupMountsByID(mountid) } logrus.Debugf("container stopped %s", c.ID) @@ -843,9 +948,11 @@ func (daemon *Daemon) Shutdown() error { } } - if daemon.layerStore != nil { - if err := daemon.layerStore.Cleanup(); err != nil { - logrus.Errorf("Error during layer Store.Cleanup(): %v", err) + for platform, ds := range daemon.stores { + if ds.layerStore != nil { + if err := ds.layerStore.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, platform) + } } } @@ -855,6 +962,8 @@ func (daemon *Daemon) Shutdown() error { daemon.DaemonLeavesCluster() } + daemon.cleanupMetricsPlugins() + // Shutdown plugins after containers and layerstore. Don't change the order. daemon.pluginShutdown() @@ -886,7 +995,7 @@ func (daemon *Daemon) Mount(container *container.Container) error { if container.BaseFS != "" && runtime.GOOS != "windows" { daemon.Unmount(container) return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - daemon.GraphDriverName(), container.ID, container.BaseFS, dir) + daemon.GraphDriverName(container.Platform), container.ID, container.BaseFS, dir) } } container.BaseFS = dir // TODO: combine these fields @@ -928,34 +1037,19 @@ func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) { } // GraphDriverName returns the name of the graph driver used by the layer.Store -func (daemon *Daemon) GraphDriverName() string { - return daemon.layerStore.DriverName() -} - -// GetUIDGIDMaps returns the current daemon's user namespace settings -// for the full uid and gid maps which will be applied to containers -// started in this instance. -func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { - return daemon.uidMaps, daemon.gidMaps -} - -// GetRemappedUIDGID returns the current daemon's uid and gid values -// if user namespaces are in use for this daemon instance. If not -// this function will return "real" root values of 0, 0. -func (daemon *Daemon) GetRemappedUIDGID() (int, int) { - uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) - return uid, gid +func (daemon *Daemon) GraphDriverName(platform string) string { + return daemon.stores[platform].layerStore.DriverName() } // prepareTempDir prepares and returns the default directory to use // for temporary files. // If it doesn't exist, it is created. If it exists, its content is removed. -func prepareTempDir(rootDir string, rootUID, rootGID int) (string, error) { +func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) { var tmpDir string if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { tmpDir = filepath.Join(rootDir, "tmp") newName := tmpDir + "-old" - if err := os.Rename(tmpDir, newName); err != nil { + if err := os.Rename(tmpDir, newName); err == nil { go func() { if err := os.RemoveAll(newName); err != nil { logrus.Warnf("failed to delete old tmp directory: %s", newName) @@ -970,12 +1064,12 @@ func prepareTempDir(rootDir string, rootUID, rootGID int) (string, error) { } // We don't remove the content of tmpdir if it's not the default, // it may hold things that do not belong to us. - return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) + return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs) } func (daemon *Daemon) setupInitLayer(initPath string) error { - rootUID, rootGID := daemon.GetRemappedUIDGID() - return initlayer.Setup(initPath, rootUID, rootGID) + rootIDs := daemon.idMappings.RootPair() + return initlayer.Setup(initPath, rootIDs) } func setDefaultMtu(conf *config.Config) { @@ -986,8 +1080,8 @@ func setDefaultMtu(conf *config.Config) { conf.Mtu = config.DefaultNetworkMtu } -func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { - volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) +func (daemon *Daemon) configureVolumes(rootIDs idtools.IDPair) (*store.VolumeStore, error) { + volumesDriver, err := local.New(daemon.configStore.Root, rootIDs) if err != nil { return nil, err } @@ -1127,24 +1221,33 @@ func CreateDaemonRoot(config *config.Config) error { if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { realRoot = config.Root } else { - realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) + realRoot, err = getRealPath(config.Root) if err != nil { return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) } } - uidMaps, gidMaps, err := setupRemappedRoot(config) - if err != nil { - return err - } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + idMappings, err := setupRemappedRoot(config) if err != nil { return err } + return setupDaemonRoot(config, realRoot, idMappings.RootPair()) +} - if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { - return err +// checkpointAndSave grabs a container lock to safely call container.CheckpointTo +func (daemon *Daemon) checkpointAndSave(container *container.Container) error { + container.Lock() + defer container.Unlock() + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + return fmt.Errorf("Error saving container state: %v", err) } - return nil } + +// because the CLI sends a -1 when it wants to unset the swappiness value +// we need to clear it on the server side +func fixMemorySwappiness(resources *containertypes.Resources) { + if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 { + resources.MemorySwappiness = nil + } +} diff --git a/fn/vendor/github.com/docker/docker/daemon/daemon_linux.go b/fn/vendor/github.com/docker/docker/daemon/daemon_linux.go index 5faf533fd..000a04869 100644 --- a/fn/vendor/github.com/docker/docker/daemon/daemon_linux.go +++ b/fn/vendor/github.com/docker/docker/daemon/daemon_linux.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/mount" ) @@ -86,3 +87,7 @@ func getCleanPatterns(id string) (regexps []*regexp.Regexp) { } return } + +func getRealPath(path string) (string, error) { + return fileutils.ReadSymlinkedDirectory(path) +} diff --git a/fn/vendor/github.com/docker/docker/daemon/daemon_solaris.go b/fn/vendor/github.com/docker/docker/daemon/daemon_solaris.go index 4a6fac928..f464ee34b 100644 --- a/fn/vendor/github.com/docker/docker/daemon/daemon_solaris.go +++ b/fn/vendor/github.com/docker/docker/daemon/daemon_solaris.go @@ -13,6 +13,7 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" @@ -23,8 +24,8 @@ import ( "github.com/docker/libnetwork/netlabel" "github.com/docker/libnetwork/netutils" lntypes "github.com/docker/libnetwork/types" - "github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" ) @@ -142,6 +143,7 @@ func UsingSystemd(config *Config) bool { // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + fixMemorySwappiness(resources) warnings := []string{} sysInfo := sysinfo.New(true) // NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and @@ -162,7 +164,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes. } // Solaris NOTE: We allow and encourage setting the swap without setting the memory limit. - if hostConfig.MemorySwappiness != nil && *hostConfig.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + if hostConfig.MemorySwappiness != nil && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") hostConfig.MemorySwappiness = nil @@ -352,7 +354,7 @@ func configureMaxThreads(config *Config) error { } // configureKernelSecuritySupport configures and validate security support for the kernel -func configureKernelSecuritySupport(config *Config, driverName string) error { +func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { return nil } @@ -525,3 +527,7 @@ func setupDaemonProcess(config *Config) error { func (daemon *Daemon) setupSeccompProfile() error { return nil } + +func getRealPath(path string) (string, error) { + return fileutils.ReadSymlinkedDirectory(path) +} diff --git a/fn/vendor/github.com/docker/docker/daemon/daemon_test.go b/fn/vendor/github.com/docker/docker/daemon/daemon_test.go index eaa3be44d..6f07d0d1e 100644 --- a/fn/vendor/github.com/docker/docker/daemon/daemon_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/daemon_test.go @@ -11,6 +11,7 @@ import ( containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/registrar" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/volume" @@ -26,38 +27,28 @@ import ( func TestGetContainer(t *testing.T) { c1 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", - Name: "tender_bardeen", - }, + ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + Name: "tender_bardeen", } c2 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", - Name: "drunk_hawking", - }, + ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", + Name: "drunk_hawking", } c3 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", - Name: "3cdbd1aa", - }, + ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", + Name: "3cdbd1aa", } c4 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", - Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", - }, + ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", + Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", } c5 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", - Name: "d22d69a2b896", - }, + ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", + Name: "d22d69a2b896", } store := container.NewMemoryStore() @@ -127,7 +118,7 @@ func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { return nil, err } - volumesDriver, err := local.New(tmp, 0, 0) + volumesDriver, err := local.New(tmp, idtools.IDPair{UID: 0, GID: 0}) if err != nil { return nil, err } @@ -183,7 +174,7 @@ func TestContainerInitDNS(t *testing.T) { "UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` // Container struct only used to retrieve path to config file - container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}} + container := &container.Container{Root: containerPath} configPath, err := container.ConfigPath() if err != nil { t.Fatal(err) diff --git a/fn/vendor/github.com/docker/docker/daemon/daemon_unix.go b/fn/vendor/github.com/docker/docker/daemon/daemon_unix.go index 0f342bcfd..a7ba9c37b 100644 --- a/fn/vendor/github.com/docker/docker/daemon/daemon_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/daemon_unix.go @@ -41,9 +41,9 @@ import ( lntypes "github.com/docker/libnetwork/types" "github.com/golang/protobuf/ptypes" "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/label" rsystem "github.com/opencontainers/runc/libcontainer/system" specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/vishvananda/netlink" ) @@ -64,8 +64,8 @@ const ( cgroupSystemdDriver = "systemd" ) -func getMemoryResources(config containertypes.Resources) *specs.Memory { - memory := specs.Memory{} +func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { + memory := specs.LinuxMemory{} if config.Memory > 0 { limit := uint64(config.Memory) @@ -77,7 +77,7 @@ func getMemoryResources(config containertypes.Resources) *specs.Memory { memory.Reservation = &reservation } - if config.MemorySwap != 0 { + if config.MemorySwap > 0 { swap := uint64(config.MemorySwap) memory.Swap = &swap } @@ -95,28 +95,29 @@ func getMemoryResources(config containertypes.Resources) *specs.Memory { return &memory } -func getCPUResources(config containertypes.Resources) *specs.CPU { - cpu := specs.CPU{} +func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { + cpu := specs.LinuxCPU{} - if config.CPUShares != 0 { + if config.CPUShares < 0 { + return nil, fmt.Errorf("shares: invalid argument") + } + if config.CPUShares >= 0 { shares := uint64(config.CPUShares) cpu.Shares = &shares } if config.CpusetCpus != "" { - cpuset := config.CpusetCpus - cpu.Cpus = &cpuset + cpu.Cpus = config.CpusetCpus } if config.CpusetMems != "" { - cpuset := config.CpusetMems - cpu.Mems = &cpuset + cpu.Mems = config.CpusetMems } if config.NanoCPUs > 0 { // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt period := uint64(100 * time.Millisecond / time.Microsecond) - quota := uint64(config.NanoCPUs) * period / 1e9 + quota := config.NanoCPUs * int64(period) / 1e9 cpu.Period = &period cpu.Quota = "a } @@ -127,8 +128,8 @@ func getCPUResources(config containertypes.Resources) *specs.CPU { } if config.CPUQuota != 0 { - quota := uint64(config.CPUQuota) - cpu.Quota = "a + q := config.CPUQuota + cpu.Quota = &q } if config.CPURealtimePeriod != 0 { @@ -137,23 +138,23 @@ func getCPUResources(config containertypes.Resources) *specs.CPU { } if config.CPURealtimeRuntime != 0 { - runtime := uint64(config.CPURealtimeRuntime) - cpu.RealtimeRuntime = &runtime + c := config.CPURealtimeRuntime + cpu.RealtimeRuntime = &c } - return &cpu + return &cpu, nil } -func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevice, error) { +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { var stat syscall.Stat_t - var blkioWeightDevices []specs.WeightDevice + var blkioWeightDevices []specs.LinuxWeightDevice for _, weightDevice := range config.BlkioWeightDevice { if err := syscall.Stat(weightDevice.Path, &stat); err != nil { return nil, err } weight := weightDevice.Weight - d := specs.WeightDevice{Weight: &weight} + d := specs.LinuxWeightDevice{Weight: &weight} d.Major = int64(stat.Rdev / 256) d.Minor = int64(stat.Rdev % 256) blkioWeightDevices = append(blkioWeightDevices, d) @@ -178,6 +179,10 @@ func parseSecurityOpt(container *container.Container, config *containertypes.Hos container.NoNewPrivileges = true continue } + if opt == "disable" { + labelOpts = append(labelOpts, "disable") + continue + } var con []string if strings.Contains(opt, "=") { @@ -186,7 +191,6 @@ func parseSecurityOpt(container *container.Container, config *containertypes.Hos con = strings.SplitN(opt, ":", 2) logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") } - if len(con) != 2 { return fmt.Errorf("invalid --security-opt 1: %q", opt) } @@ -213,16 +217,15 @@ func parseSecurityOpt(container *container.Container, config *containertypes.Hos return err } -func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.ThrottleDevice, error) { - var throttleDevices []specs.ThrottleDevice +func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { + var throttleDevices []specs.LinuxThrottleDevice var stat syscall.Stat_t for _, d := range devs { if err := syscall.Stat(d.Path, &stat); err != nil { return nil, err } - rate := d.Rate - d := specs.ThrottleDevice{Rate: &rate} + d := specs.LinuxThrottleDevice{Rate: d.Rate} d.Major = int64(stat.Rdev / 256) d.Minor = int64(stat.Rdev % 256) throttleDevices = append(throttleDevices, d) @@ -279,10 +282,6 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf return err } hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...) - if hostConfig.MemorySwappiness == nil { - defaultSwappiness := int64(-1) - hostConfig.MemorySwappiness = &defaultSwappiness - } if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable @@ -293,6 +292,7 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { warnings := []string{} + fixMemorySwappiness(resources) // memory subsystem checks and adjustments if resources.Memory != 0 && resources.Memory < linuxMinMemory { @@ -315,14 +315,14 @@ func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysi if resources.Memory == 0 && resources.MemorySwap > 0 && !update { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") } - if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") resources.MemorySwappiness = nil } if resources.MemorySwappiness != nil { swappiness := *resources.MemorySwappiness - if swappiness < -1 || swappiness > 100 { + if swappiness < 0 || swappiness > 100 { return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) } } @@ -699,14 +699,22 @@ func overlaySupportsSelinux() (bool, error) { } // configureKernelSecuritySupport configures and validates security support for the kernel -func configureKernelSecuritySupport(config *config.Config, driverName string) error { +func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { if config.EnableSelinuxSupport { if !selinuxEnabled() { logrus.Warn("Docker could not enable SELinux on the host system") return nil } - if driverName == "overlay" || driverName == "overlay2" { + overlayFound := false + for _, d := range driverNames { + if d == "overlay" || d == "overlay2" { + overlayFound = true + break + } + } + + if overlayFound { // If driver is overlay or overlay2, make sure kernel // supports selinux with overlay. supported, err := overlaySupportsSelinux() @@ -715,7 +723,7 @@ func configureKernelSecuritySupport(config *config.Config, driverName string) er } if !supported { - logrus.Warnf("SELinux is not supported with the %s graph driver on this kernel", driverName) + logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverNames) } } } else { @@ -1023,40 +1031,38 @@ func parseRemappedRoot(usergrp string) (string, string, error) { return username, groupname, nil } -func setupRemappedRoot(config *config.Config) ([]idtools.IDMap, []idtools.IDMap, error) { +func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) { if runtime.GOOS != "linux" && config.RemappedRoot != "" { - return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") + return nil, fmt.Errorf("User namespaces are only supported on Linux") } // if the daemon was started with remapped root option, parse // the config option to the int uid,gid values - var ( - uidMaps, gidMaps []idtools.IDMap - ) if config.RemappedRoot != "" { username, groupname, err := parseRemappedRoot(config.RemappedRoot) if err != nil { - return nil, nil, err + return nil, err } if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") - return uidMaps, gidMaps, nil + return &idtools.IDMappings{}, nil } logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) - uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) + mappings, err := idtools.NewIDMappings(username, groupname) if err != nil { - return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) + return nil, errors.Wrapf(err, "Can't create ID mappings: %v") } + return mappings, nil } - return uidMaps, gidMaps, nil + return &idtools.IDMappings{}, nil } -func setupDaemonRoot(config *config.Config, rootDir string, rootUID, rootGID int) error { +func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error { config.Root = rootDir // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) // so that syscalls executing as non-root, operating on subdirectories of the graph root @@ -1081,10 +1087,10 @@ func setupDaemonRoot(config *config.Config, rootDir string, rootUID, rootGID int // a new subdirectory with ownership set to the remapped uid/gid (so as to allow // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { - config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootIDs.UID, rootIDs.GID)) logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exist - if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(config.Root, 0700, rootIDs); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) } // we also need to verify that any pre-existing directories in the path to @@ -1097,7 +1103,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, rootUID, rootGID int if dirPath == "/" { break } - if !idtools.CanAccess(dirPath, rootUID, rootGID) { + if !idtools.CanAccess(dirPath, rootIDs) { return fmt.Errorf("A subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories.", config.Root) } } @@ -1137,7 +1143,8 @@ func (daemon *Daemon) registerLinks(container *container.Container, hostConfig * // After we load all the links into the daemon // set them to nil on the hostconfig - return container.WriteHostConfig() + _, err := container.WriteHostConfig() + return err } // conditionalMountOnStart is a platform specific helper function during the diff --git a/fn/vendor/github.com/docker/docker/daemon/daemon_unix_test.go b/fn/vendor/github.com/docker/docker/daemon/daemon_unix_test.go index e8afe629e..c3aa443e4 100644 --- a/fn/vendor/github.com/docker/docker/daemon/daemon_unix_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/daemon_unix_test.go @@ -11,6 +11,7 @@ import ( containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/local" @@ -277,13 +278,17 @@ func TestMigratePre17Volumes(t *testing.T) { if err != nil { t.Fatal(err) } - drv, err := local.New(volumeRoot, 0, 0) + drv, err := local.New(volumeRoot, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } volumedrivers.Register(drv, volume.DefaultDriverName) - daemon := &Daemon{root: rootDir, repository: containerRoot, volumes: volStore} + daemon := &Daemon{ + root: rootDir, + repository: containerRoot, + volumes: volStore, + } err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600) if err != nil { t.Fatal(err) diff --git a/fn/vendor/github.com/docker/docker/daemon/daemon_windows.go b/fn/vendor/github.com/docker/docker/daemon/daemon_windows.go index d58b51db6..bb9c85b80 100644 --- a/fn/vendor/github.com/docker/docker/daemon/daemon_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/daemon_windows.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/image" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/platform" @@ -99,7 +100,7 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) { warnings := []string{} - + fixMemorySwappiness(resources) if !isHyperv { // The processor resource controls are mutually exclusive on // Windows Server Containers, the order of precedence is @@ -146,6 +147,17 @@ func verifyContainerResources(resources *containertypes.Resources, isHyperv bool return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } + osv := system.GetOSVersion() + if resources.NanoCPUs > 0 && isHyperv && osv.Build < 16175 { + leftoverNanoCPUs := resources.NanoCPUs % 1e9 + if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { + resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 + warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) + warnings = append(warnings, warningString) + logrus.Warn(warningString) + } + } + if len(resources.BlkioDeviceReadBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") } @@ -185,7 +197,7 @@ func verifyContainerResources(resources *containertypes.Resources, isHyperv bool if resources.MemorySwap != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") } - if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 { + if resources.MemorySwappiness != nil { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") } if resources.OomKillDisable != nil && *resources.OomKillDisable { @@ -206,7 +218,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes. warnings := []string{} hyperv := daemon.runAsHyperVContainer(hostConfig) - if !hyperv && system.IsWindowsClient() { + if !hyperv && system.IsWindowsClient() && !system.IsIoTCore() { // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers") @@ -248,7 +260,7 @@ func checkSystem() error { } // configureKernelSecuritySupport configures and validate security support for the kernel -func configureKernelSecuritySupport(config *config.Config, driverName string) error { +func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { return nil } @@ -316,6 +328,9 @@ func (daemon *Daemon) initNetworkController(config *config.Config, activeSandbox // discover and add HNS networks to windows // network that exist are removed and added again for _, v := range hnsresponse { + if strings.ToLower(v.Type) == "private" { + continue // workaround for HNS reporting unsupported networks + } var n libnetwork.Network s := func(current libnetwork.Network) bool { options := current.Info().DriverOptions() @@ -443,14 +458,14 @@ func (daemon *Daemon) cleanupMounts() error { return nil } -func setupRemappedRoot(config *config.Config) ([]idtools.IDMap, []idtools.IDMap, error) { - return nil, nil, nil +func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) { + return &idtools.IDMappings{}, nil } -func setupDaemonRoot(config *config.Config, rootDir string, rootUID, rootGID int) error { +func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error { config.Root = rootDir // Create the root directory if it doesn't exists - if err := system.MkdirAllWithACL(config.Root, 0); err != nil && !os.IsExist(err) { + if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil && !os.IsExist(err) { return err } return nil @@ -471,6 +486,11 @@ func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + // Bail out now for Linux containers + if system.LCOWSupported() && container.Platform != "windows" { + return nil + } + // We do not mount if a Hyper-V container if !daemon.runAsHyperVContainer(container.HostConfig) { return daemon.Mount(container) @@ -481,6 +501,11 @@ func (daemon *Daemon) conditionalMountOnStart(container *container.Container) er // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + // Bail out now for Linux containers + if system.LCOWSupported() && container.Platform != "windows" { + return nil + } + // We do not unmount if a Hyper-V container if !daemon.runAsHyperVContainer(container.HostConfig) { return daemon.Unmount(container) @@ -555,8 +580,9 @@ func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") - // On client SKUs, default to Hyper-V - if system.IsWindowsClient() { + // On client SKUs, default to Hyper-V. Note that IoT reports as a client SKU + // but it should not be treated as such. + if system.IsWindowsClient() && !system.IsIoTCore() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } for _, option := range daemon.configStore.ExecOptions { @@ -575,7 +601,7 @@ func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("hyperv") } if containertypes.Isolation(val).IsProcess() { - if system.IsWindowsClient() { + if system.IsWindowsClient() && !system.IsIoTCore() { // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. return fmt.Errorf("Windows client operating systems only support Hyper-V containers") @@ -616,3 +642,13 @@ func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { func (daemon *Daemon) setupSeccompProfile() error { return nil } + +func getRealPath(path string) (string, error) { + if system.IsIoTCore() { + // Due to https://github.com/golang/go/issues/20506, path expansion + // does not work correctly on the default IoT Core configuration. + // TODO @darrenstahlmsft remove this once golang/go/20506 is fixed + return path, nil + } + return fileutils.ReadSymlinkedDirectory(path) +} diff --git a/fn/vendor/github.com/docker/docker/daemon/debugtrap.go b/fn/vendor/github.com/docker/docker/daemon/debugtrap.go deleted file mode 100644 index 209048b58..000000000 --- a/fn/vendor/github.com/docker/docker/daemon/debugtrap.go +++ /dev/null @@ -1,62 +0,0 @@ -package daemon - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkg/errors" -) - -const dataStructuresLogNameTemplate = "daemon-data-%s.log" - -// dumpDaemon appends the daemon datastructures into file in dir and returns full path -// to that file. -func (d *Daemon) dumpDaemon(dir string) (string, error) { - // Ensure we recover from a panic as we are doing this without any locking - defer func() { - recover() - }() - - path := filepath.Join(dir, fmt.Sprintf(dataStructuresLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) - f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) - if err != nil { - return "", errors.Wrap(err, "failed to open file to write the daemon datastructure dump") - } - defer f.Close() - - dump := struct { - containers interface{} - names interface{} - links interface{} - execs interface{} - volumes interface{} - images interface{} - layers interface{} - imageReferences interface{} - downloads interface{} - uploads interface{} - registry interface{} - plugins interface{} - }{ - containers: d.containers, - execs: d.execCommands, - volumes: d.volumes, - images: d.imageStore, - layers: d.layerStore, - imageReferences: d.referenceStore, - downloads: d.downloadManager, - uploads: d.uploadManager, - registry: d.RegistryService, - plugins: d.PluginStore, - names: d.nameIndex, - links: d.linkIndex, - } - - spew.Fdump(f, dump) // Does not return an error - f.Sync() - return path, nil -} diff --git a/fn/vendor/github.com/docker/docker/daemon/debugtrap_unix.go b/fn/vendor/github.com/docker/docker/daemon/debugtrap_unix.go index d650eb7f8..8605d1d2b 100644 --- a/fn/vendor/github.com/docker/docker/daemon/debugtrap_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/debugtrap_unix.go @@ -22,12 +22,6 @@ func (d *Daemon) setupDumpStackTrap(root string) { } else { logrus.Infof("goroutine stacks written to %s", path) } - path, err = d.dumpDaemon(root) - if err != nil { - logrus.WithError(err).Error("failed to write daemon datastructure dump") - } else { - logrus.Infof("daemon datastructure dump written to %s", path) - } } }() } diff --git a/fn/vendor/github.com/docker/docker/daemon/debugtrap_windows.go b/fn/vendor/github.com/docker/docker/daemon/debugtrap_windows.go index fb20c9d2c..d01f7f332 100644 --- a/fn/vendor/github.com/docker/docker/daemon/debugtrap_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/debugtrap_windows.go @@ -41,12 +41,6 @@ func (d *Daemon) setupDumpStackTrap(root string) { } else { logrus.Infof("goroutine stacks written to %s", path) } - path, err = d.dumpDaemon(root) - if err != nil { - logrus.WithError(err).Error("failed to write daemon datastructure dump") - } else { - logrus.Infof("daemon datastructure dump written to %s", path) - } } }() } diff --git a/fn/vendor/github.com/docker/docker/daemon/delete.go b/fn/vendor/github.com/docker/docker/daemon/delete.go index fd1759ee1..2d3cd0f90 100644 --- a/fn/vendor/github.com/docker/docker/daemon/delete.go +++ b/fn/vendor/github.com/docker/docker/daemon/delete.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/container" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" volumestore "github.com/docker/docker/volume/store" "github.com/pkg/errors" ) @@ -102,45 +103,43 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo } // Mark container dead. We don't want anybody to be restarting it. - container.SetDead() + container.Lock() + container.Dead = true // Save container state to disk. So that if error happens before // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. - if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { + if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) { logrus.Errorf("Error saving dying container to disk: %v", err) } - - // If force removal is required, delete container from various - // indexes even if removal failed. - defer func() { - if err == nil || forceRemove { - daemon.nameIndex.Delete(container.ID) - daemon.linkIndex.delete(container) - selinuxFreeLxcContexts(container.ProcessLabel) - daemon.idIndex.Delete(container.ID) - daemon.containers.Delete(container.ID) - if e := daemon.removeMountPoints(container, removeVolume); e != nil { - logrus.Error(e) - } - daemon.LogContainerEvent(container, "destroy") - } - }() - - if err = os.RemoveAll(container.Root); err != nil { - return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) - } + container.Unlock() // When container creation fails and `RWLayer` has not been created yet, we // do not call `ReleaseRWLayer` if container.RWLayer != nil { - metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) + metadata, err := daemon.stores[container.Platform].layerStore.ReleaseRWLayer(container.RWLayer) layer.LogReleaseMetadata(metadata) if err != nil && err != layer.ErrMountDoesNotExist { - return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) + return errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.Platform), container.ID) } } + if err := system.EnsureRemoveAll(container.Root); err != nil { + return errors.Wrapf(err, "unable to remove filesystem for %s", container.ID) + } + + daemon.nameIndex.Delete(container.ID) + daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + daemon.containersReplica.Delete(container) + if e := daemon.removeMountPoints(container, removeVolume); e != nil { + logrus.Error(e) + } + container.SetRemoved() + stateCtr.del(container.ID) + daemon.LogContainerEvent(container, "destroy") return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/delete_test.go b/fn/vendor/github.com/docker/docker/daemon/delete_test.go index 21b4696a6..f1a979003 100644 --- a/fn/vendor/github.com/docker/docker/daemon/delete_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/delete_test.go @@ -9,12 +9,13 @@ import ( "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/require" ) func newDaemonWithTmpRoot(t *testing.T) (*Daemon, func()) { tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") - assert.NilError(t, err) + require.NoError(t, err) d := &Daemon{ repository: tmp, root: tmp, @@ -23,78 +24,63 @@ func newDaemonWithTmpRoot(t *testing.T) (*Daemon, func()) { return d, func() { os.RemoveAll(tmp) } } -// TestContainerDeletePaused tests that a useful error message and instructions is given when attempting -// to remove a paused container (#30842) -func TestContainerDeletePaused(t *testing.T) { - c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "test", - State: &container.State{Paused: true, Running: true}, - Config: &containertypes.Config{}, - }, +func newContainerWithState(state *container.State) *container.Container { + return &container.Container{ + ID: "test", + State: state, + Config: &containertypes.Config{}, } - d, cleanup := newDaemonWithTmpRoot(t) - defer cleanup() - d.containers.Add(c.ID, c) - - err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: false}) - - assert.Error(t, err, "cannot remove a paused container") - assert.Error(t, err, "Unpause and then stop the container before attempting removal or force remove") } -// TestContainerDeleteRestarting tests that a useful error message and instructions is given when attempting -// to remove a container that is restarting (#30842) -func TestContainerDeleteRestarting(t *testing.T) { - c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "test", - State: container.NewState(), - Config: &containertypes.Config{}, - }, +// TestContainerDelete tests that a useful error message and instructions is +// given when attempting to remove a container (#30842) +func TestContainerDelete(t *testing.T) { + tt := []struct { + errMsg string + fixMsg string + initContainer func() *container.Container + }{ + // a paused container + { + errMsg: "cannot remove a paused container", + fixMsg: "Unpause and then stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + return newContainerWithState(&container.State{Paused: true, Running: true}) + }}, + // a restarting container + { + errMsg: "cannot remove a restarting container", + fixMsg: "Stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + c := newContainerWithState(container.NewState()) + c.SetRunning(0, true) + c.SetRestarting(&container.ExitStatus{}) + return c + }}, + // a running container + { + errMsg: "cannot remove a running container", + fixMsg: "Stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + return newContainerWithState(&container.State{Running: true}) + }}, } - c.SetRunning(0, true) - c.SetRestarting(&container.ExitStatus{}) + for _, te := range tt { + c := te.initContainer() + d, cleanup := newDaemonWithTmpRoot(t) + defer cleanup() + d.containers.Add(c.ID, c) - d, cleanup := newDaemonWithTmpRoot(t) - defer cleanup() - d.containers.Add(c.ID, c) - - err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: false}) - assert.Error(t, err, "cannot remove a restarting container") - assert.Error(t, err, "Stop the container before attempting removal or force remove") -} - -// TestContainerDeleteRunning tests that a useful error message and instructions is given when attempting -// to remove a running container (#30842) -func TestContainerDeleteRunning(t *testing.T) { - c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "test", - State: &container.State{Running: true}, - Config: &containertypes.Config{}, - }, + err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: false}) + testutil.ErrorContains(t, err, te.errMsg) + testutil.ErrorContains(t, err, te.fixMsg) } - - d, cleanup := newDaemonWithTmpRoot(t) - defer cleanup() - d.containers.Add(c.ID, c) - - err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: false}) - assert.Error(t, err, "cannot remove a running container") - assert.Error(t, err, "Stop the container before attempting removal or force remove") } func TestContainerDoubleDelete(t *testing.T) { - c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "test", - State: container.NewState(), - Config: &containertypes.Config{}, - }, - } + c := newContainerWithState(container.NewState()) // Mark the container as having a delete in progress c.SetRemovalInProgress() @@ -106,5 +92,5 @@ func TestContainerDoubleDelete(t *testing.T) { // Try to remove the container when its state is removalInProgress. // It should return an error indicating it is under removal progress. err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true}) - assert.Error(t, err, fmt.Sprintf("removal of container %s is already in progress", c.ID)) + testutil.ErrorContains(t, err, fmt.Sprintf("removal of container %s is already in progress", c.ID)) } diff --git a/fn/vendor/github.com/docker/docker/daemon/dependency.go b/fn/vendor/github.com/docker/docker/daemon/dependency.go new file mode 100644 index 000000000..83144e686 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/dependency.go @@ -0,0 +1,17 @@ +package daemon + +import ( + "github.com/docker/swarmkit/agent/exec" +) + +// SetContainerDependencyStore sets the dependency store backend for the container +func (daemon *Daemon) SetContainerDependencyStore(name string, store exec.DependencyGetter) error { + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.DependencyStore = store + + return nil +} diff --git a/fn/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go b/fn/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go index 781442939..f084a649a 100644 --- a/fn/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go @@ -87,8 +87,8 @@ func TestDiscoveryOpts(t *testing.T) { t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat) } - discaveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1) - clusterOpts = map[string]string{"discovery.ttl": discaveryTTL} + discoveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1) + clusterOpts = map[string]string{"discovery.ttl": discoveryTTL} heartbeat, ttl, err = discoveryOpts(clusterOpts) if err == nil && heartbeat == 0 { t.Fatal("discovery.heartbeat must be positive") diff --git a/fn/vendor/github.com/docker/docker/daemon/disk_usage.go b/fn/vendor/github.com/docker/docker/daemon/disk_usage.go index fc77a3d8f..c64a24330 100644 --- a/fn/vendor/github.com/docker/docker/daemon/disk_usage.go +++ b/fn/vendor/github.com/docker/docker/daemon/disk_usage.go @@ -2,6 +2,9 @@ package daemon import ( "fmt" + "sync/atomic" + + "golang.org/x/net/context" "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" @@ -12,12 +15,12 @@ import ( "github.com/opencontainers/go-digest" ) -func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int { - tmpImages := daemon.imageStore.Map() +func (daemon *Daemon) getLayerRefs(platform string) map[layer.ChainID]int { + tmpImages := daemon.stores[platform].imageStore.Map() layerRefs := map[layer.ChainID]int{} for id, img := range tmpImages { dgst := digest.Digest(id) - if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { + if len(daemon.stores[platform].referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 { continue } @@ -34,7 +37,12 @@ func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int { } // SystemDiskUsage returns information about the daemon data disk usage -func (daemon *Daemon) SystemDiskUsage() (*types.DiskUsage, error) { +func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) { + if !atomic.CompareAndSwapInt32(&daemon.diskUsageRunning, 0, 1) { + return nil, fmt.Errorf("a disk usage operation is already running") + } + defer atomic.StoreInt32(&daemon.diskUsageRunning, 0) + // Retrieve container list allContainers, err := daemon.Containers(&types.ContainerListOptions{ Size: true, @@ -45,6 +53,7 @@ func (daemon *Daemon) SystemDiskUsage() (*types.DiskUsage, error) { } // Get all top images with extra attributes + // TODO @jhowardmsft LCOW. This may need revisiting allImages, err := daemon.Images(filters.NewArgs(), false, true) if err != nil { return nil, fmt.Errorf("failed to retrieve image list: %v", err) @@ -53,17 +62,29 @@ func (daemon *Daemon) SystemDiskUsage() (*types.DiskUsage, error) { // Get all local volumes allVolumes := []*types.Volume{} getLocalVols := func(v volume.Volume) error { - name := v.Name() - refs := daemon.volumes.Refs(v) + select { + case <-ctx.Done(): + return ctx.Err() + default: + if d, ok := v.(volume.DetailedVolume); ok { + // skip local volumes with mount options since these could have external + // mounted filesystems that will be slow to enumerate. + if len(d.Options()) > 0 { + return nil + } + } + name := v.Name() + refs := daemon.volumes.Refs(v) - tv := volumeToAPIType(v) - sz, err := directory.Size(v.Path()) - if err != nil { - logrus.Warnf("failed to determine size of volume %v", name) - sz = -1 + tv := volumeToAPIType(v) + sz, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("failed to determine size of volume %v", name) + sz = -1 + } + tv.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(len(refs))} + allVolumes = append(allVolumes, tv) } - tv.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(len(refs))} - allVolumes = append(allVolumes, tv) return nil } @@ -74,21 +95,28 @@ func (daemon *Daemon) SystemDiskUsage() (*types.DiskUsage, error) { } // Get total layers size on disk - layerRefs := daemon.getLayerRefs() - allLayers := daemon.layerStore.Map() var allLayersSize int64 - for _, l := range allLayers { - size, err := l.DiffSize() - if err == nil { - if _, ok := layerRefs[l.ChainID()]; ok { - allLayersSize += size - } else { - logrus.Warnf("found leaked image layer %v", l.ChainID()) + for platform := range daemon.stores { + layerRefs := daemon.getLayerRefs(platform) + allLayers := daemon.stores[platform].layerStore.Map() + var allLayersSize int64 + for _, l := range allLayers { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + size, err := l.DiffSize() + if err == nil { + if _, ok := layerRefs[l.ChainID()]; ok { + allLayersSize += size + } else { + logrus.Warnf("found leaked image layer %v platform %s", l.ChainID(), platform) + } + } else { + logrus.Warnf("failed to get diff size for layer %v %s", l.ChainID(), platform) + } } - } else { - logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) } - } return &types.DiskUsage{ diff --git a/fn/vendor/github.com/docker/docker/daemon/events.go b/fn/vendor/github.com/docker/docker/daemon/events.go index 8fe8e1b64..f5d188cf0 100644 --- a/fn/vendor/github.com/docker/docker/daemon/events.go +++ b/fn/vendor/github.com/docker/docker/daemon/events.go @@ -1,14 +1,27 @@ package daemon import ( + "context" + "strconv" "strings" "time" + "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/container" daemonevents "github.com/docker/docker/daemon/events" "github.com/docker/libnetwork" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +var ( + clusterEventAction = map[swarmapi.WatchActionKind]string{ + swarmapi.WatchActionKindCreate: "create", + swarmapi.WatchActionKindUpdate: "update", + swarmapi.WatchActionKindRemove: "remove", + } ) // LogContainerEvent generates an event related to a container with only the default attributes. @@ -130,3 +143,180 @@ func copyAttributes(attributes, labels map[string]string) { attributes[k] = v } } + +// ProcessClusterNotifications gets changes from store and add them to event list +func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStream chan *swarmapi.WatchMessage) { + for { + select { + case <-ctx.Done(): + return + case message, ok := <-watchStream: + if !ok { + logrus.Debug("cluster event channel has stopped") + return + } + daemon.generateClusterEvent(message) + } + } +} + +func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) { + for _, event := range msg.Events { + if event.Object == nil { + logrus.Errorf("event without object: %v", event) + continue + } + switch v := event.Object.GetObject().(type) { + case *swarmapi.Object_Node: + daemon.logNodeEvent(event.Action, v.Node, event.OldObject.GetNode()) + case *swarmapi.Object_Service: + daemon.logServiceEvent(event.Action, v.Service, event.OldObject.GetService()) + case *swarmapi.Object_Network: + daemon.logNetworkEvent(event.Action, v.Network, event.OldObject.GetNetwork()) + case *swarmapi.Object_Secret: + daemon.logSecretEvent(event.Action, v.Secret, event.OldObject.GetSecret()) + default: + logrus.Warnf("unrecognized event: %v", event) + } + } +} + +func (daemon *Daemon) logNetworkEvent(action swarmapi.WatchActionKind, net *swarmapi.Network, oldNet *swarmapi.Network) { + attributes := map[string]string{ + "name": net.Spec.Annotations.Name, + } + eventTime := eventTimestamp(net.Meta, action) + daemon.logClusterEvent(action, net.ID, "network", attributes, eventTime) +} + +func (daemon *Daemon) logSecretEvent(action swarmapi.WatchActionKind, secret *swarmapi.Secret, oldSecret *swarmapi.Secret) { + attributes := map[string]string{ + "name": secret.Spec.Annotations.Name, + } + eventTime := eventTimestamp(secret.Meta, action) + daemon.logClusterEvent(action, secret.ID, "secret", attributes, eventTime) +} + +func (daemon *Daemon) logNodeEvent(action swarmapi.WatchActionKind, node *swarmapi.Node, oldNode *swarmapi.Node) { + name := node.Spec.Annotations.Name + if name == "" && node.Description != nil { + name = node.Description.Hostname + } + attributes := map[string]string{ + "name": name, + } + eventTime := eventTimestamp(node.Meta, action) + // In an update event, display the changes in attributes + if action == swarmapi.WatchActionKindUpdate && oldNode != nil { + if node.Spec.Availability != oldNode.Spec.Availability { + attributes["availability.old"] = strings.ToLower(oldNode.Spec.Availability.String()) + attributes["availability.new"] = strings.ToLower(node.Spec.Availability.String()) + } + if node.Role != oldNode.Role { + attributes["role.old"] = strings.ToLower(oldNode.Role.String()) + attributes["role.new"] = strings.ToLower(node.Role.String()) + } + if node.Status.State != oldNode.Status.State { + attributes["state.old"] = strings.ToLower(oldNode.Status.State.String()) + attributes["state.new"] = strings.ToLower(node.Status.State.String()) + } + // This handles change within manager role + if node.ManagerStatus != nil && oldNode.ManagerStatus != nil { + // leader change + if node.ManagerStatus.Leader != oldNode.ManagerStatus.Leader { + if node.ManagerStatus.Leader { + attributes["leader.old"] = "false" + attributes["leader.new"] = "true" + } else { + attributes["leader.old"] = "true" + attributes["leader.new"] = "false" + } + } + if node.ManagerStatus.Reachability != oldNode.ManagerStatus.Reachability { + attributes["reachability.old"] = strings.ToLower(oldNode.ManagerStatus.Reachability.String()) + attributes["reachability.new"] = strings.ToLower(node.ManagerStatus.Reachability.String()) + } + } + } + + daemon.logClusterEvent(action, node.ID, "node", attributes, eventTime) +} + +func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service *swarmapi.Service, oldService *swarmapi.Service) { + attributes := map[string]string{ + "name": service.Spec.Annotations.Name, + } + eventTime := eventTimestamp(service.Meta, action) + + if action == swarmapi.WatchActionKindUpdate && oldService != nil { + // check image + if x, ok := service.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { + containerSpec := x.Container + if y, ok := oldService.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { + oldContainerSpec := y.Container + if containerSpec.Image != oldContainerSpec.Image { + attributes["image.old"] = oldContainerSpec.Image + attributes["image.new"] = containerSpec.Image + } + } else { + // This should not happen. + logrus.Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime()) + } + } + // check replicated count change + if x, ok := service.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { + replicas := x.Replicated.Replicas + if y, ok := oldService.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { + oldReplicas := y.Replicated.Replicas + if replicas != oldReplicas { + attributes["replicas.old"] = strconv.FormatUint(oldReplicas, 10) + attributes["replicas.new"] = strconv.FormatUint(replicas, 10) + } + } else { + // This should not happen. + logrus.Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode()) + } + } + if service.UpdateStatus != nil { + if oldService.UpdateStatus == nil { + attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) + } else if service.UpdateStatus.State != oldService.UpdateStatus.State { + attributes["updatestate.old"] = strings.ToLower(oldService.UpdateStatus.State.String()) + attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) + } + } + } + daemon.logClusterEvent(action, service.ID, "service", attributes, eventTime) +} + +func (daemon *Daemon) logClusterEvent(action swarmapi.WatchActionKind, id, eventType string, attributes map[string]string, eventTime time.Time) { + actor := events.Actor{ + ID: id, + Attributes: attributes, + } + + jm := events.Message{ + Action: clusterEventAction[action], + Type: eventType, + Actor: actor, + Scope: "swarm", + Time: eventTime.UTC().Unix(), + TimeNano: eventTime.UTC().UnixNano(), + } + daemon.EventsService.PublishMessage(jm) +} + +func eventTimestamp(meta swarmapi.Meta, action swarmapi.WatchActionKind) time.Time { + var eventTime time.Time + switch action { + case swarmapi.WatchActionKindCreate: + eventTime, _ = gogotypes.TimestampFromProto(meta.CreatedAt) + case swarmapi.WatchActionKindUpdate: + eventTime, _ = gogotypes.TimestampFromProto(meta.UpdatedAt) + case swarmapi.WatchActionKindRemove: + // There is no timestamp from store message for remove operations. + // Use current time. + eventTime = time.Now() + } + return eventTime +} diff --git a/fn/vendor/github.com/docker/docker/daemon/events/events.go b/fn/vendor/github.com/docker/docker/daemon/events/events.go index b28db07ca..d1529e1ce 100644 --- a/fn/vendor/github.com/docker/docker/daemon/events/events.go +++ b/fn/vendor/github.com/docker/docker/daemon/events/events.go @@ -9,7 +9,7 @@ import ( ) const ( - eventsLimit = 64 + eventsLimit = 256 bufferSize = 1024 ) @@ -78,15 +78,14 @@ func (e *Events) Evict(l chan interface{}) { e.pub.Evict(l) } -// Log broadcasts event to listeners. Each listener has 100 milliseconds to -// receive the event or it will be skipped. +// Log creates a local scope message and publishes it func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { - eventsCounter.Inc() now := time.Now().UTC() jm := eventtypes.Message{ Action: action, Type: eventType, Actor: actor, + Scope: "local", Time: now.Unix(), TimeNano: now.UnixNano(), } @@ -102,6 +101,14 @@ func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { jm.Status = action } + e.PublishMessage(jm) +} + +// PublishMessage broadcasts event to listeners. Each listener has 100 milliseconds to +// receive the event or it will be skipped. +func (e *Events) PublishMessage(jm eventtypes.Message) { + eventsCounter.Inc() + e.mu.Lock() if len(e.events) == cap(e.events) { // discard oldest event diff --git a/fn/vendor/github.com/docker/docker/daemon/events/events_test.go b/fn/vendor/github.com/docker/docker/daemon/events/events_test.go index bbd160f90..ebb222cfb 100644 --- a/fn/vendor/github.com/docker/docker/daemon/events/events_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/events/events_test.go @@ -139,17 +139,17 @@ func TestLogEvents(t *testing.T) { t.Fatalf("First action is %s, must be action_16", first.Status) } last := current[len(current)-1] - if last.Status != "action_79" { - t.Fatalf("Last action is %s, must be action_79", last.Status) + if last.Status != "action_271" { + t.Fatalf("Last action is %s, must be action_271", last.Status) } firstC := msgs[0] - if firstC.Status != "action_80" { - t.Fatalf("First action is %s, must be action_80", firstC.Status) + if firstC.Status != "action_272" { + t.Fatalf("First action is %s, must be action_272", firstC.Status) } lastC := msgs[len(msgs)-1] - if lastC.Status != "action_89" { - t.Fatalf("Last action is %s, must be action_89", lastC.Status) + if lastC.Status != "action_281" { + t.Fatalf("Last action is %s, must be action_281", lastC.Status) } } @@ -247,7 +247,7 @@ func TestLoadBufferedEventsOnlyFromPast(t *testing.T) { } // #13753 -func TestIngoreBufferedWhenNoTimes(t *testing.T) { +func TestIgnoreBufferedWhenNoTimes(t *testing.T) { m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") if err != nil { t.Fatal(err) diff --git a/fn/vendor/github.com/docker/docker/daemon/events/filter.go b/fn/vendor/github.com/docker/docker/daemon/events/filter.go index d10051600..7f1a5fda1 100644 --- a/fn/vendor/github.com/docker/docker/daemon/events/filter.go +++ b/fn/vendor/github.com/docker/docker/daemon/events/filter.go @@ -20,6 +20,7 @@ func NewFilter(filter filters.Args) *Filter { func (ef *Filter) Include(ev events.Message) bool { return ef.matchEvent(ev) && ef.filter.ExactMatch("type", ev.Type) && + ef.matchScope(ev.Scope) && ef.matchDaemon(ev) && ef.matchContainer(ev) && ef.matchPlugin(ev) && @@ -47,6 +48,13 @@ func (ef *Filter) filterContains(field string, values map[string]struct{}) bool return false } +func (ef *Filter) matchScope(scope string) bool { + if !ef.filter.Include("scope") { + return true + } + return ef.filter.ExactMatch("scope", scope) +} + func (ef *Filter) matchLabels(attributes map[string]string) bool { if !ef.filter.Include("label") { return true @@ -74,6 +82,18 @@ func (ef *Filter) matchNetwork(ev events.Message) bool { return ef.fuzzyMatchName(ev, events.NetworkEventType) } +func (ef *Filter) matchService(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ServiceEventType) +} + +func (ef *Filter) matchNode(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NodeEventType) +} + +func (ef *Filter) matchSecret(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.SecretEventType) +} + func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) diff --git a/fn/vendor/github.com/docker/docker/daemon/events_test.go b/fn/vendor/github.com/docker/docker/daemon/events_test.go index aa78664b2..7048de292 100644 --- a/fn/vendor/github.com/docker/docker/daemon/events_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/events_test.go @@ -16,15 +16,13 @@ func TestLogContainerEventCopyLabels(t *testing.T) { defer e.Evict(l) container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - Labels: map[string]string{ - "node": "1", - "os": "alpine", - }, + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "node": "1", + "os": "alpine", }, }, } @@ -49,14 +47,12 @@ func TestLogContainerEventWithAttributes(t *testing.T) { defer e.Evict(l) container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Labels: map[string]string{ - "node": "1", - "os": "alpine", - }, + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Labels: map[string]string{ + "node": "1", + "os": "alpine", }, }, } diff --git a/fn/vendor/github.com/docker/docker/daemon/exec.go b/fn/vendor/github.com/docker/docker/daemon/exec.go index 1622a9cd3..72d01c8c2 100644 --- a/fn/vendor/github.com/docker/docker/daemon/exec.go +++ b/fn/vendor/github.com/docker/docker/daemon/exec.go @@ -253,7 +253,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R return fmt.Errorf("context cancelled") case err := <-attachErr: if err != nil { - if _, ok := err.(stream.DetachError); !ok { + if _, ok := err.(term.EscapeError); !ok { return fmt.Errorf("exec attach failed with error: %v", err) } d.LogContainerEvent(c, "exec_detach") diff --git a/fn/vendor/github.com/docker/docker/daemon/exec_windows.go b/fn/vendor/github.com/docker/docker/daemon/exec_windows.go index 1d6974cda..b7b45149c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/exec_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/exec_windows.go @@ -8,7 +8,9 @@ import ( func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { // Process arguments need to be escaped before sending to OCI. - p.Args = escapeArgs(p.Args) - p.User.Username = ec.User + if c.Platform == "windows" { + p.Args = escapeArgs(p.Args) + p.User.Username = ec.User + } return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/export.go b/fn/vendor/github.com/docker/docker/daemon/export.go index 5ef6dbb0e..402e67583 100644 --- a/fn/vendor/github.com/docker/docker/daemon/export.go +++ b/fn/vendor/github.com/docker/docker/daemon/export.go @@ -40,11 +40,10 @@ func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCl return nil, err } - uidMaps, gidMaps := daemon.GetUIDGIDMaps() archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ Compression: archive.Uncompressed, - UIDMaps: uidMaps, - GIDMaps: gidMaps, + UIDMaps: daemon.idMappings.UIDs(), + GIDMaps: daemon.idMappings.GIDs(), }) if err != nil { daemon.Unmount(container) diff --git a/fn/vendor/github.com/docker/docker/daemon/getsize_unix.go b/fn/vendor/github.com/docker/docker/daemon/getsize_unix.go index bd088fd41..434fa4388 100644 --- a/fn/vendor/github.com/docker/docker/daemon/getsize_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/getsize_unix.go @@ -3,6 +3,8 @@ package daemon import ( + "runtime" + "github.com/Sirupsen/logrus" ) @@ -13,17 +15,17 @@ func (daemon *Daemon) getSize(containerID string) (int64, int64) { err error ) - rwlayer, err := daemon.layerStore.GetRWLayer(containerID) + rwlayer, err := daemon.stores[runtime.GOOS].layerStore.GetRWLayer(containerID) if err != nil { logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) return sizeRw, sizeRootfs } - defer daemon.layerStore.ReleaseRWLayer(rwlayer) + defer daemon.stores[runtime.GOOS].layerStore.ReleaseRWLayer(rwlayer) sizeRw, err = rwlayer.Size() if err != nil { logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", - daemon.GraphDriverName(), containerID, err) + daemon.GraphDriverName(runtime.GOOS), containerID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. sizeRw = -1 diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go index b85596278..b245143e1 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go @@ -46,9 +46,10 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/locker" mountpk "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" - "github.com/opencontainers/runc/libcontainer/label" rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/selinux/go-selinux/label" ) var ( @@ -319,13 +320,13 @@ func (a *Driver) Remove(id string) error { } return err } - defer os.RemoveAll(tmpMntPath) + defer system.EnsureRemoveAll(tmpMntPath) tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { return err } - defer os.RemoveAll(tmpDiffpath) + defer system.EnsureRemoveAll(tmpDiffpath) // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { @@ -572,7 +573,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro offset := 54 if useDirperm() { - offset += len("dirperm1") + offset += len(",dirperm1") } b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go index 0d149c96a..3eb4ce8ed 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go @@ -16,19 +16,25 @@ import "C" import ( "fmt" + "io/ioutil" + "math" "os" "path" "path/filepath" + "strconv" "strings" + "sync" "syscall" "unsafe" + "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/system" "github.com/docker/go-units" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" ) func init() { @@ -118,6 +124,7 @@ type Driver struct { gidMaps []idtools.IDMap options btrfsOptions quotaEnabled bool + once sync.Once } // String prints the name of the driver (btrfs). @@ -236,7 +243,7 @@ func isSubvolume(p string) (bool, error) { return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil } -func subvolDelete(dirpath, name string) error { +func subvolDelete(dirpath, name string, quotaEnabled bool) error { dir, err := openDir(dirpath) if err != nil { return err @@ -264,7 +271,7 @@ func subvolDelete(dirpath, name string) error { return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) } if sv { - if err := subvolDelete(path.Dir(p), f.Name()); err != nil { + if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) } } @@ -275,6 +282,21 @@ func subvolDelete(dirpath, name string) error { return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) } + if quotaEnabled { + if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { + var args C.struct_btrfs_ioctl_qgroup_create_args + args.qgroupid = C.__u64(qgroupid) + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) + } + } else { + logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) + } + } + // all subvolumes have been removed // now remove the one originally passed in for i, c := range []byte(name) { @@ -288,15 +310,25 @@ func subvolDelete(dirpath, name string) error { return nil } +func (d *Driver) updateQuotaStatus() { + d.once.Do(func() { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if err := subvolQgroupStatus(d.home); err != nil { + // quota is still not enabled + return + } + d.quotaEnabled = true + } + }) +} + func (d *Driver) subvolEnableQuota() error { + d.updateQuotaStatus() + if d.quotaEnabled { return nil } - // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed - if _, err := subvolLookupQgroup(d.home); err == nil { - d.quotaEnabled = true - return nil - } dir, err := openDir(d.home) if err != nil { @@ -318,13 +350,10 @@ func (d *Driver) subvolEnableQuota() error { } func (d *Driver) subvolDisableQuota() error { + d.updateQuotaStatus() + if !d.quotaEnabled { - // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed - if _, err := subvolLookupQgroup(d.home); err != nil { - // quota is still not enabled - return nil - } - d.quotaEnabled = true + return nil } dir, err := openDir(d.home) @@ -347,13 +376,10 @@ func (d *Driver) subvolDisableQuota() error { } func (d *Driver) subvolRescanQuota() error { + d.updateQuotaStatus() + if !d.quotaEnabled { - // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed - if _, err := subvolLookupQgroup(d.home); err != nil { - // quota is still not enabled - return nil - } - d.quotaEnabled = true + return nil } dir, err := openDir(d.home) @@ -391,6 +417,38 @@ func subvolLimitQgroup(path string, size uint64) error { return nil } +// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path +// with search key of BTRFS_QGROUP_STATUS_KEY. +// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY. +// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 +func subvolQgroupStatus(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_search_args + args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID + args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_objectid = C.__u64(math.MaxUint64) + args.key.max_offset = C.__u64(math.MaxUint64) + args.key.max_transid = C.__u64(math.MaxUint64) + args.key.nr_items = 4096 + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) + } + sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) + if sh._type != C.BTRFS_QGROUP_STATUS_KEY { + return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) + } + return nil +} + func subvolLookupQgroup(path string) (uint64, error) { dir, err := openDir(path) if err != nil { @@ -421,6 +479,14 @@ func (d *Driver) subvolumesDirID(id string) string { return path.Join(d.subvolumesDir(), id) } +func (d *Driver) quotasDir() string { + return path.Join(d.home, "quotas") +} + +func (d *Driver) quotasDirID(id string) string { + return path.Join(d.quotasDir(), id) +} + // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { @@ -429,6 +495,7 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts // Create the filesystem with given id. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + quotas := path.Join(d.home, "quotas") subvolumes := path.Join(d.home, "subvolumes") rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { @@ -465,9 +532,16 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err := d.parseStorageOpt(storageOpt, driver); err != nil { return err } + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { return err } + if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + return err + } } // if we have a remapped root (user namespaces enabled), change the created snapshot @@ -532,10 +606,22 @@ func (d *Driver) Remove(id string) error { if _, err := os.Stat(dir); err != nil { return err } - if err := subvolDelete(d.subvolumesDir(), id); err != nil { + quotasDir := d.quotasDirID(id) + if _, err := os.Stat(quotasDir); err == nil { + if err := os.Remove(quotasDir); err != nil { + return err + } + } else if !os.IsNotExist(err) { return err } - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + + // Call updateQuotaStatus() to invoke status update + d.updateQuotaStatus() + + if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { + return err + } + if err := system.EnsureRemoveAll(dir); err != nil { return err } if err := d.subvolRescanQuota(); err != nil { @@ -556,6 +642,17 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { return "", fmt.Errorf("%s: not a directory", dir) } + if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { + if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { + if err := d.subvolEnableQuota(); err != nil { + return "", err + } + if err := subvolLimitQgroup(dir, size); err != nil { + return "", err + } + } + } + return dir, nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md b/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md index bed07869a..6594fa65f 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md @@ -5,7 +5,9 @@ The device mapper graphdriver uses the device mapper thin provisioning module (dm-thinp) to implement CoW snapshots. The preferred model is to have a thin pool reserved outside of Docker and passed to the -daemon via the `--storage-opt dm.thinpooldev` option. +daemon via the `--storage-opt dm.thinpooldev` option. Alternatively, +the device mapper graphdriver can setup a block device to handle this +for you via the `--storage-opt dm.directlvm_device` option. As a fallback if no thin pool is provided, loopback files will be created. Loopback is very slow, but can be used without any diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/device_setup.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/device_setup.go new file mode 100644 index 000000000..ef6cffbf2 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/device_setup.go @@ -0,0 +1,247 @@ +package devmapper + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" +) + +type directLVMConfig struct { + Device string + ThinpPercent uint64 + ThinpMetaPercent uint64 + AutoExtendPercent uint64 + AutoExtendThreshold uint64 +} + +var ( + errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") + errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") + errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm") +) + +func validateLVMConfig(cfg directLVMConfig) error { + if reflect.DeepEqual(cfg, directLVMConfig{}) { + return nil + } + if cfg.Device == "" { + return errMissingSetupDevice + } + if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { + return errThinpPercentMissing + } + + if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { + return errThinpPercentTooBig + } + return nil +} + +func checkDevAvailable(dev string) error { + lvmScan, err := exec.LookPath("lvmdiskscan") + if err != nil { + logrus.Debug("could not find lvmdiskscan") + return nil + } + + out, err := exec.Command(lvmScan).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + if !bytes.Contains(out, []byte(dev)) { + return errors.Errorf("%s is not available for use with devicemapper", dev) + } + return nil +} + +func checkDevInVG(dev string) error { + pvDisplay, err := exec.LookPath("pvdisplay") + if err != nil { + logrus.Debug("could not find pvdisplay") + return nil + } + + out, err := exec.Command(pvDisplay, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) + for scanner.Scan() { + fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") + if len(fields) > 1 { + // got "VG Name" line" + vg := strings.TrimSpace(fields[1]) + if len(vg) > 0 { + return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + } + logrus.Error(fields) + break + } + } + return nil +} + +func checkDevHasFS(dev string) error { + blkid, err := exec.LookPath("blkid") + if err != nil { + logrus.Debug("could not find blkid") + return nil + } + + out, err := exec.Command(blkid, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + fields := bytes.Fields(out) + for _, f := range fields { + kv := bytes.Split(f, []byte{'='}) + if bytes.Equal(kv[0], []byte("TYPE")) { + v := bytes.Trim(kv[1], "\"") + if len(v) > 0 { + return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + } + return nil + } + } + return nil +} + +func verifyBlockDevice(dev string, force bool) error { + if err := checkDevAvailable(dev); err != nil { + return err + } + if err := checkDevInVG(dev); err != nil { + return err + } + + if force { + return nil + } + + if err := checkDevHasFS(dev); err != nil { + return err + } + return nil +} + +func readLVMConfig(root string) (directLVMConfig, error) { + var cfg directLVMConfig + + p := filepath.Join(root, "setup-config.json") + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + return cfg, errors.Wrap(err, "error reading existing setup config") + } + + // check if this is just an empty file, no need to produce a json error later if so + if len(b) == 0 { + return cfg, nil + } + + err = json.Unmarshal(b, &cfg) + return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") +} + +func writeLVMConfig(root string, cfg directLVMConfig) error { + p := filepath.Join(root, "setup-config.json") + b, err := json.Marshal(cfg) + if err != nil { + return errors.Wrap(err, "error marshalling direct lvm config") + } + err = ioutil.WriteFile(p, b, 0600) + return errors.Wrap(err, "error writing direct lvm config to file") +} + +func setupDirectLVM(cfg directLVMConfig) error { + pvCreate, err := exec.LookPath("pvcreate") + if err != nil { + return errors.Wrap(err, "error looking up command `pvcreate` while setting up direct lvm") + } + + vgCreate, err := exec.LookPath("vgcreate") + if err != nil { + return errors.Wrap(err, "error looking up command `vgcreate` while setting up direct lvm") + } + + lvCreate, err := exec.LookPath("lvcreate") + if err != nil { + return errors.Wrap(err, "error looking up command `lvcreate` while setting up direct lvm") + } + + lvConvert, err := exec.LookPath("lvconvert") + if err != nil { + return errors.Wrap(err, "error looking up command `lvconvert` while setting up direct lvm") + } + + lvChange, err := exec.LookPath("lvchange") + if err != nil { + return errors.Wrap(err, "error looking up command `lvchange` while setting up direct lvm") + } + + if cfg.AutoExtendPercent == 0 { + cfg.AutoExtendPercent = 20 + } + + if cfg.AutoExtendThreshold == 0 { + cfg.AutoExtendThreshold = 80 + } + + if cfg.ThinpPercent == 0 { + cfg.ThinpPercent = 95 + } + if cfg.ThinpMetaPercent == 0 { + cfg.ThinpMetaPercent = 1 + } + + out, err := exec.Command(pvCreate, "-f", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(vgCreate, "docker", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(lvCreate, "--wipesignatures", "y", "-n", "thinpool", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + out, err = exec.Command(lvCreate, "--wipesignatures", "y", "-n", "thinpoolmeta", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(lvConvert, "-y", "--zero", "n", "-c", "512K", "--thinpool", "docker/thinpool", "--poolmetadata", "docker/thinpoolmeta").CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) + err = ioutil.WriteFile("/etc/lvm/profile/docker-thinpool.profile", []byte(profile), 0600) + if err != nil { + return errors.Wrap(err, "error writing docker thinp autoextend profile") + } + + out, err = exec.Command(lvChange, "--metadataprofile", "docker-thinpool", "docker/thinpool").CombinedOutput() + return errors.Wrap(err, string(out)) +} diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go index ba845d4d0..c0116c799 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go @@ -5,7 +5,6 @@ package devmapper import ( "bufio" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -13,6 +12,7 @@ import ( "os/exec" "path" "path/filepath" + "reflect" "strconv" "strings" "sync" @@ -29,8 +29,9 @@ import ( "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" units "github.com/docker/go-units" + "github.com/pkg/errors" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" ) var ( @@ -50,6 +51,7 @@ var ( enableDeferredDeletion = false userBaseSize = false defaultMinFreeSpacePercent uint32 = 10 + lvmSetupConfigForce bool ) const deviceSetMetaFile string = "deviceset-metadata" @@ -123,6 +125,7 @@ type DeviceSet struct { gidMaps []idtools.IDMap minFreeSpacePercent uint32 //min free space percentage in thinpool xfsNospaceRetries string // max retries when xfs receives ENOSPC + lvmSetupConfig directLVMConfig } // DiskUsage contains information about disk usage and is used when reporting Status of a device. @@ -1686,20 +1689,10 @@ func (devices *DeviceSet) enableDeferredRemovalDeletion() error { return nil } -func (devices *DeviceSet) initDevmapper(doInit bool) error { +func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { // give ourselves to libdm as a log handler devicemapper.LogInit(devices) - version, err := devicemapper.GetDriverVersion() - if err != nil { - // Can't even get driver version, assume not supported - return graphdriver.ErrNotSupported - } - - if err := determineDriverCapabilities(version); err != nil { - return graphdriver.ErrNotSupported - } - if err := devices.enableDeferredRemovalDeletion(); err != nil { return err } @@ -1730,8 +1723,36 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { return err } - // Set the device prefix from the device id and inode of the docker root dir + prevSetupConfig, err := readLVMConfig(devices.root) + if err != nil { + return err + } + if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { + if devices.thinPoolDevice != "" { + return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") + } + + if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { + if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { + return errors.New("changing direct-lvm config is not supported") + } + logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") + if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { + return err + } + if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { + return err + } + if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { + return err + } + } + devices.thinPoolDevice = "docker-thinpool" + logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) + } + + // Set the device prefix from the device id and inode of the docker root dir st, err := os.Stat(devices.root) if err != nil { return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) @@ -1840,6 +1861,14 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { return err } + defer func() { + if retErr != nil { + err = devices.deactivatePool() + if err != nil { + logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) + } + } + }() } // Pool already exists and caller did not pass us a pool. That means @@ -2604,7 +2633,24 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ minFreeSpacePercent: defaultMinFreeSpacePercent, } + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return nil, graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return nil, graphdriver.ErrNotSupported + } + + if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { + // enable deferred stuff by default + enableDeferredDeletion = true + enableDeferredRemoval = true + } + foundBlkDiscard := false + var lvmSetupConfig directLVMConfig for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { @@ -2699,11 +2745,60 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ return nil, err } devices.xfsNospaceRetries = val + case "dm.directlvm_device": + lvmSetupConfig.Device = val + case "dm.directlvm_device_force": + lvmSetupConfigForce, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.thinp_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpPercent = per + case "dm.thinp_metapercent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpMetaPercent = per + case "dm.thinp_autoextend_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendPercent = per + case "dm.thinp_autoextend_threshold": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendThreshold = per default: return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) } } + if err := validateLVMConfig(lvmSetupConfig); err != nil { + return nil, err + } + + devices.lvmSetupConfig = lvmSetupConfig + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { devices.doBlkDiscard = false diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go index c5be97ae3..7501397fd 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go @@ -4,6 +4,8 @@ package devmapper import ( "fmt" + "os" + "syscall" "testing" "time" @@ -17,11 +19,51 @@ func init() { defaultMetaDataLoopbackSize = 200 * 1024 * 1024 defaultBaseFsSize = 300 * 1024 * 1024 defaultUdevSyncOverride = true - if err := graphtest.InitLoopbacks(); err != nil { + if err := initLoopbacks(); err != nil { panic(err) } } +// initLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func initLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create at least 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown func TestDevmapperSetup(t *testing.T) { diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go index 91de5cd12..243d88a8b 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" units "github.com/docker/go-units" ) @@ -160,7 +161,7 @@ func (d *Driver) Remove(id string) error { } mp := path.Join(d.home, "mnt", id) - if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + if err := system.EnsureRemoveAll(mp); err != nil { return err } diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go index 5c8d0e230..46b6eec09 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -95,7 +95,7 @@ func GetFSMagic(rootpath string) (FsMagic, error) { return FsMagic(buf.Type), nil } -// NewFsChecker returns a checker configured for the provied FsMagic +// NewFsChecker returns a checker configured for the provided FsMagic func NewFsChecker(t FsMagic) Checker { return &fsChecker{ t: t, diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go index 7daf01c32..06dc360cf 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go @@ -54,7 +54,7 @@ func (c *fsChecker) IsMounted(path string) bool { return m } -// NewFsChecker returns a checker configured for the provied FsMagic +// NewFsChecker returns a checker configured for the provided FsMagic func NewFsChecker(t FsMagic) Checker { return &fsChecker{ t: t, diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go index 6e952de78..6852ca9f4 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go @@ -16,6 +16,8 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-units" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -33,14 +35,9 @@ type Driver struct { func newDriver(t testing.TB, name string, options []string) *Driver { root, err := ioutil.TempDir("", "docker-graphtest-") - if err != nil { - t.Fatal(err) - } - - if err := os.MkdirAll(root, 0755); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, os.MkdirAll(root, 0755)) d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root}) if err != nil { t.Logf("graphdriver: %v\n", err) @@ -86,14 +83,11 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str driver := GetDriver(t, drivername, driverOptions...) defer PutDriver(t) - if err := driver.Create("empty", "", nil); err != nil { - t.Fatal(err) - } + err := driver.Create("empty", "", nil) + require.NoError(t, err) defer func() { - if err := driver.Remove("empty"); err != nil { - t.Fatal(err) - } + require.NoError(t, driver.Remove("empty")) }() if !driver.Exists("empty") { @@ -101,21 +95,14 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str } dir, err := driver.Get("empty", "") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) verifyFile(t, dir, 0755|os.ModeDir, 0, 0) // Verify that the directory is empty fis, err := readDir(dir) - if err != nil { - t.Fatal(err) - } - - if len(fis) != 0 { - t.Fatal("New directory not empty") - } + require.NoError(t, err) + assert.Len(t, fis, 0) driver.Put("empty") } @@ -127,9 +114,7 @@ func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...stri createBase(t, driver, "Base") defer func() { - if err := driver.Remove("Base"); err != nil { - t.Fatal(err) - } + require.NoError(t, driver.Remove("Base")) }() verifyBase(t, driver, "Base") } @@ -140,21 +125,14 @@ func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...stri defer PutDriver(t) createBase(t, driver, "Base") - defer func() { - if err := driver.Remove("Base"); err != nil { - t.Fatal(err) - } + require.NoError(t, driver.Remove("Base")) }() - if err := driver.Create("Snap", "Base", nil); err != nil { - t.Fatal(err) - } - + err := driver.Create("Snap", "Base", nil) + require.NoError(t, err) defer func() { - if err := driver.Remove("Snap"); err != nil { - t.Fatal(err) - } + require.NoError(t, driver.Remove("Snap")) }() verifyBase(t, driver, "Snap") diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go index 49b0c2cc3..63a934176 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go @@ -3,7 +3,6 @@ package graphtest import ( - "fmt" "io/ioutil" "os" "path" @@ -11,81 +10,24 @@ import ( "testing" "github.com/docker/docker/daemon/graphdriver" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -// InitLoopbacks ensures that the loopback devices are properly created within -// the system running the device mapper tests. -func InitLoopbacks() error { - statT, err := getBaseLoopStats() - if err != nil { - return err - } - // create at least 8 loopback files, ya, that is a good number - for i := 0; i < 8; i++ { - loopPath := fmt.Sprintf("/dev/loop%d", i) - // only create new loopback files if they don't exist - if _, err := os.Stat(loopPath); err != nil { - if mkerr := syscall.Mknod(loopPath, - uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { - return mkerr - } - os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) - } - } - return nil -} - -// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the -// loop0 device on the system. If it does not exist we assume 0,0,0660 for the -// stat data -func getBaseLoopStats() (*syscall.Stat_t, error) { - loop0, err := os.Stat("/dev/loop0") - if err != nil { - if os.IsNotExist(err) { - return &syscall.Stat_t{ - Uid: 0, - Gid: 0, - Mode: 0660, - }, nil - } - return nil, err - } - return loop0.Sys().(*syscall.Stat_t), nil -} - func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { fi, err := os.Stat(path) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if fi.Mode()&os.ModeType != mode&os.ModeType { - t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) - } - - if fi.Mode()&os.ModePerm != mode&os.ModePerm { - t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) - } - - if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { - t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) - } - - if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { - t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) - } - - if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { - t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) - } + actual := fi.Mode() + assert.Equal(t, mode&os.ModeType, actual&os.ModeType, path) + assert.Equal(t, mode&os.ModePerm, actual&os.ModePerm, path) + assert.Equal(t, mode&os.ModeSticky, actual&os.ModeSticky, path) + assert.Equal(t, mode&os.ModeSetuid, actual&os.ModeSetuid, path) + assert.Equal(t, mode&os.ModeSetgid, actual&os.ModeSetgid, path) if stat, ok := fi.Sys().(*syscall.Stat_t); ok { - if stat.Uid != uid { - t.Fatalf("%s no owned by uid %d", path, uid) - } - if stat.Gid != gid { - t.Fatalf("%s not owned by gid %d", path, gid) - } + assert.Equal(t, uid, stat.Uid, path) + assert.Equal(t, gid, stat.Gid, path) } } @@ -94,35 +36,25 @@ func createBase(t testing.TB, driver graphdriver.Driver, name string) { oldmask := syscall.Umask(0) defer syscall.Umask(oldmask) - if err := driver.CreateReadWrite(name, "", nil); err != nil { - t.Fatal(err) - } + err := driver.CreateReadWrite(name, "", nil) + require.NoError(t, err) dir, err := driver.Get(name, "") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer driver.Put(name) subdir := path.Join(dir, "a subdir") - if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { - t.Fatal(err) - } - if err := os.Chown(subdir, 1, 2); err != nil { - t.Fatal(err) - } + require.NoError(t, os.Mkdir(subdir, 0705|os.ModeSticky)) + require.NoError(t, os.Chown(subdir, 1, 2)) file := path.Join(dir, "a file") - if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid) + require.NoError(t, err) } func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { dir, err := driver.Get(name, "") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer driver.Put(name) subdir := path.Join(dir, "a subdir") @@ -131,13 +63,7 @@ func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { file := path.Join(dir, "a file") verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) - fis, err := readDir(dir) - if err != nil { - t.Fatal(err) - } - - if len(fis) != 2 { - t.Fatal("Unexpected files in base image") - } - + files, err := readDir(dir) + require.NoError(t, err) + assert.Len(t, files, 2) } diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go new file mode 100644 index 000000000..079252ecb --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go @@ -0,0 +1,539 @@ +// +build windows + +package lcow + +// Maintainer: @jhowardmsft +// Graph-driver for Linux Containers On Windows (LCOW) + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "github.com/jhowardmsft/opengcs/gogcs/client" +) + +// init registers the LCOW driver to the register. +func init() { + graphdriver.Register("lcow", InitLCOW) +} + +const ( + // sandboxFilename is the name of the file containing a layers sandbox (read-write layer) + sandboxFilename = "sandbox.vhdx" + + // svmScratchFilename is the name of the scratch-space used by an SVM to avoid running out of memory + svmScratchFilename = "scratch.vhdx" +) + +// cacheType is our internal structure representing an item in our local cache +// of things that have been mounted. +type cacheType struct { + uvmPath string // Path in utility VM + hostPath string // Path on host + refCount int // How many times its been mounted + isSandbox bool // True if a sandbox +} + +// Driver represents an LCOW graph driver. +type Driver struct { + // homeDir is the hostpath where we're storing everything + homeDir string + // cachedSandboxFile is the location of the local default-sized cached sandbox + cachedSandboxFile string + // options are the graphdriver options we are initialised with + options []string + // config is the representation of the SVM. + // @jhowardmsft LIFETIME TODO - For now, a global service utility-VM + config client.Config + // svmScratchSpaceFile is a host location for a dedicated scratch space + // that the SVM utilities can use as a scratch-space to avoid OOMs + // @jhowardmsft LIFETIME TODO - For now, a global service utility-VM + svmScratchSpaceFile string + + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + + // cacheMu is the mutex protection add/update/deletes to our cache + cacheMu sync.Mutex + // cache is the cache of all the IDs we've mounted/unmounted. + cache map[string]cacheType +} + +// InitLCOW returns a new LCOW storage driver. +func InitLCOW(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + title := "lcowdriver: init:" + logrus.Debugf("%s %s", title, home) + + d := &Driver{ + homeDir: home, + options: options, + cachedSandboxFile: filepath.Join(home, "cache", sandboxFilename), + svmScratchSpaceFile: filepath.Join(home, "svmscratch", svmScratchFilename), + cache: make(map[string]cacheType), + } + + if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, home, err) + } + + // Cache directory for blank sandbox so don't have to pull it from the service VM each time + if err := idtools.MkdirAllAs(filepath.Dir(d.cachedSandboxFile), 0700, 0, 0); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, home, err) + } + + // Location for the SVM scratch + if err := idtools.MkdirAllAs(filepath.Dir(d.svmScratchSpaceFile), 0700, 0, 0); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, home, err) + } + + return d, nil +} + +// startUvm starts the service utility VM if it isn't running. +// TODO @jhowardmsft. This will change before RS3 ships as we move to a model of one +// service VM globally to a service VM per container (or offline operation). However, +// for the initial bring-up of LCOW, this is acceptable. +func (d *Driver) startUvm(context string) error { + const toolsScratchPath = "/mnt/gcs/LinuxServiceVM/scratch" + + // Nothing to do if it's already running + if d.config.Uvm != nil { + return nil + } + + // So we need to start it. Generate a default configuration + if err := d.config.GenerateDefault(d.options); err != nil { + return fmt.Errorf("failed to generate default gogcs configuration (%s): %s", context, err) + } + + scratchAttached := false + if _, err := os.Stat(d.svmScratchSpaceFile); err == nil { + // We have a scratch space already, so just attach it as a mapped virtual disk + logrus.Debugf("lcowdriver: startuvm: (%s) attaching pre-existing scratch", context) + mvd := hcsshim.MappedVirtualDisk{ + HostPath: d.svmScratchSpaceFile, + ContainerPath: toolsScratchPath, + CreateInUtilityVM: true, + } + d.config.MappedVirtualDisks = append(d.config.MappedVirtualDisks, mvd) + scratchAttached = true + } + + d.config.Name = "LinuxServiceVM" // TODO @jhowardmsft - This requires an in-flight platform change. Can't hard code it to this longer term + if err := d.config.Create(); err != nil { + return fmt.Errorf("failed to start utility VM (%s): %s", context, err) + } + + // Hot-add the scratch-space if not already attached + if !scratchAttached { + logrus.Debugf("lcowdriver: startuvm: (%s) creating an SVM scratch", context) + if err := d.config.CreateSandbox(d.svmScratchSpaceFile, client.DefaultSandboxSizeMB, d.cachedSandboxFile); err != nil { + return fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err) + } + logrus.Debugf("lcowdriver: startuvm: (%s) hot-adding an SVM scratch", context) + if err := d.config.HotAddVhd(d.svmScratchSpaceFile, toolsScratchPath); err != nil { + return fmt.Errorf("failed to hot-add %s failed: %s", d.svmScratchSpaceFile, err) + } + } + logrus.Debugf("lcowdriver: startuvm: (%s) successful", context) + return nil +} + +// terminateUvm terminates the service utility VM if its running. +func (d *Driver) terminateUvm(context string) error { + // Nothing to do if it's not running + if d.config.Uvm == nil { + return nil + } + + // FIXME: @jhowardmsft + // This isn't thread-safe yet, but will change anyway with the lifetime + // changes and multiple instances. Deferring that work for now. + uvm := d.config.Uvm + d.config.Uvm = nil + + if err := uvm.Terminate(); err != nil { + return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err) + } + + if err := uvm.WaitTimeout(time.Duration(d.config.UvmTimeoutSeconds) * time.Second); err != nil { + return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err) + } + + return nil +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "lcow" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"LCOW", ""}, + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + _, err := os.Lstat(d.dir(id)) + logrus.Debugf("lcowdriver: exists: id %s %t", id, err == nil) + return err == nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. That equates to creating a sandbox VHDx. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + logrus.Debugf("lcowdriver: createreadwrite: id %s", id) + + if err := d.startUvm("createreadwrite"); err != nil { + return err + } + + if err := d.Create(id, parent, opts); err != nil { + return err + } + + return d.config.CreateSandbox(filepath.Join(d.dir(id), sandboxFilename), client.DefaultSandboxSizeMB, d.cachedSandboxFile) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + logrus.Debugf("lcowdriver: create: id %s parent: %s", id, parent) + + parentChain, err := d.getLayerChain(parent) + if err != nil { + return err + } + + var layerChain []string + if parent != "" { + if !d.Exists(parent) { + return fmt.Errorf("lcowdriver: cannot create read-only layer with missing parent %s", parent) + } + layerChain = []string{d.dir(parent)} + } + layerChain = append(layerChain, parentChain...) + + // Make sure layers are created with the correct ACL so that VMs can access them. + layerPath := d.dir(id) + logrus.Debugf("lcowdriver: create: id %s: creating layerPath %s", id, layerPath) + if err := system.MkdirAllWithACL(layerPath, 755, system.SddlNtvmAdministratorsLocalSystem); err != nil { + return err + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := os.RemoveAll(layerPath); err2 != nil { + logrus.Warnf("Failed to remove layer %s: %s", layerPath, err2) + } + return err + } + logrus.Debugf("lcowdriver: createreadwrite: id %s: success", id) + + return nil +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + logrus.Debugf("lcowdriver: remove: id %s", id) + tmpID := fmt.Sprintf("%s-removing", id) + tmpLayerPath := d.dir(tmpID) + layerPath := d.dir(id) + + logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + + if err := os.RemoveAll(tmpLayerPath); err != nil { + return err + } + + logrus.Debugf("lcowdriver: remove: id %s: layerPath %s succeeded", id, layerPath) + return nil +} + +// Get returns the rootfs path for the id. It is reference counted and +// effectively can be thought of as a "mount the layer into the utility +// vm if it isn't already" +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir, _, _, err := d.getEx(id) + return dir, err +} + +// getEx is Get, but also returns the cache-entry and the size of the VHD +func (d *Driver) getEx(id string) (string, cacheType, int64, error) { + title := "lcowdriver: getEx" + logrus.Debugf("%s %s", title, id) + + if err := d.startUvm(fmt.Sprintf("getex %s", id)); err != nil { + logrus.Debugf("%s failed to start utility vm: %s", title, err) + return "", cacheType{}, 0, err + } + + // Work out what we are working on + vhdFilename, vhdSize, isSandbox, err := client.LayerVhdDetails(d.dir(id)) + if err != nil { + logrus.Debugf("%s failed to get LayerVhdDetails from %s: %s", title, d.dir(id), err) + return "", cacheType{}, 0, fmt.Errorf("%s failed to open layer or sandbox VHD to open in %s: %s", title, d.dir(id), err) + } + logrus.Debugf("%s %s, size %d, isSandbox %t", title, vhdFilename, vhdSize, isSandbox) + + hotAddRequired := false + d.cacheMu.Lock() + var cacheEntry cacheType + if _, ok := d.cache[id]; !ok { + // The item is not currently in the cache. + // + // Sandboxes need hot-adding in the case that there is a single global utility VM + // This will change for multiple instances with the lifetime changes. + if isSandbox { + hotAddRequired = true + } + d.cache[id] = cacheType{ + uvmPath: fmt.Sprintf("/mnt/%s", id), + refCount: 1, + isSandbox: isSandbox, + hostPath: vhdFilename, + } + } else { + // Increment the reference counter in the cache. + cacheEntry = d.cache[id] + cacheEntry.refCount++ + d.cache[id] = cacheEntry + } + + cacheEntry = d.cache[id] + logrus.Debugf("%s %s: isSandbox %t, refCount %d", title, id, cacheEntry.isSandbox, cacheEntry.refCount) + d.cacheMu.Unlock() + + if hotAddRequired { + logrus.Debugf("%s %s: Hot-Adding %s", title, id, vhdFilename) + if err := d.config.HotAddVhd(vhdFilename, cacheEntry.uvmPath); err != nil { + return "", cacheType{}, 0, fmt.Errorf("%s hot add %s failed: %s", title, vhdFilename, err) + } + } + + logrus.Debugf("%s %s success. %s: %+v: size %d", title, id, d.dir(id), cacheEntry, vhdSize) + return d.dir(id), cacheEntry, vhdSize, nil +} + +// Put does the reverse of get. If there are no more references to +// the layer, it unmounts it from the utility VM. +func (d *Driver) Put(id string) error { + title := "lcowdriver: put" + logrus.Debugf("%s %s", title, id) + + if err := d.startUvm(fmt.Sprintf("put %s", id)); err != nil { + return err + } + + d.cacheMu.Lock() + // Bad-news if unmounting something that isn't in the cache. + entry, ok := d.cache[id] + if !ok { + d.cacheMu.Unlock() + return fmt.Errorf("%s possible ref-count error, or invalid id was passed to the graphdriver. Cannot handle id %s as it's not in the cache", title, id) + } + + // Are we just decrementing the reference count + if entry.refCount > 1 { + entry.refCount-- + d.cache[id] = entry + logrus.Debugf("%s %s: refCount decremented to %d", title, id, entry.refCount) + d.cacheMu.Unlock() + return nil + } + + // No more references, so tear it down if previously hot-added + if entry.isSandbox { + logrus.Debugf("%s %s: Hot-Removing %s", title, id, entry.hostPath) + if err := d.config.HotRemoveVhd(entry.hostPath); err != nil { + d.cacheMu.Unlock() + return fmt.Errorf("%s failed to hot-remove %s from service utility VM: %s", title, entry.hostPath, err) + } + } + + // @jhowardmsft TEMPORARY FIX WHILE WAITING FOR HOT-REMOVE TO BE FIXED IN PLATFORM + //d.terminateUvm(fmt.Sprintf("put %s", id)) + + // Remove from the cache map. + delete(d.cache, id) + d.cacheMu.Unlock() + + logrus.Debugf("%s %s: refCount 0. %s (%s) completed successfully", title, id, entry.hostPath, entry.uvmPath) + return nil +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + title := "lcowdriver: cleanup" + logrus.Debugf(title) + + d.cacheMu.Lock() + for k, v := range d.cache { + logrus.Debugf("%s cache entry: %s: %+v", title, k, v) + if v.refCount > 0 { + logrus.Warnf("%s leaked %s: %+v", title, k, v) + } + } + d.cacheMu.Unlock() + + items, err := ioutil.ReadDir(d.homeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := os.RemoveAll(filepath.Join(d.homeDir, item.Name())); err != nil { + logrus.Warnf("%s failed to cleanup %s: %s", title, item.Name(), err) + } else { + logrus.Infof("%s cleaned up %s", title, item.Name()) + } + } + } + return nil +} + +// Diff takes a layer (and it's parent layer which may be null, but +// is ignored by this implementation below) and returns a reader for +// a tarstream representing the layers contents. The id could be +// a read-only "layer.vhd" or a read-write "sandbox.vhdx". The semantics +// of this function dictate that the layer is already mounted. +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + title := "lcowdriver: diff:" + logrus.Debugf("%s id %s", title, id) + + if err := d.startUvm(fmt.Sprintf("diff %s", id)); err != nil { + return nil, err + } + + d.cacheMu.Lock() + if _, ok := d.cache[id]; !ok { + d.cacheMu.Unlock() + return nil, fmt.Errorf("%s fail as %s is not in the cache", title, id) + } + cacheEntry := d.cache[id] + d.cacheMu.Unlock() + + // Stat to get size + fileInfo, err := os.Stat(cacheEntry.hostPath) + if err != nil { + return nil, fmt.Errorf("%s failed to stat %s: %s", title, cacheEntry.hostPath, err) + } + + // Then obtain the tar stream for it + logrus.Debugf("%s %s, size %d, isSandbox %t", title, cacheEntry.hostPath, fileInfo.Size(), cacheEntry.isSandbox) + tarReadCloser, err := d.config.VhdToTar(cacheEntry.hostPath, cacheEntry.uvmPath, cacheEntry.isSandbox, fileInfo.Size()) + if err != nil { + return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err) + } + logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent) + return tarReadCloser, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. The layer should not be mounted when calling +// this function. Another way of describing this is that ApplyDiff writes +// to a new layer (a VHD in LCOW) the contents of a tarstream it's given. +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + logrus.Debugf("lcowdriver: applydiff: id %s", id) + + if err := d.startUvm(fmt.Sprintf("applydiff %s", id)); err != nil { + return 0, err + } + + return d.config.TarToVhd(filepath.Join(d.homeDir, id, "layer.vhd"), diff) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + logrus.Debugf("lcowdriver: changes: id %s parent %s", id, parent) + // TODO @gupta-ak. Needs implementation with assistance from service VM + return nil, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + logrus.Debugf("lcowdriver: diffsize: id %s", id) + // TODO @gupta-ak. Needs implementation with assistance from service VM + return 0, nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + logrus.Debugf("lcowdriver: getmetadata: id %s", id) + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.homeDir, filepath.Base(id)) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + logrus.Debugf("lcowdriver: getlayerchain: id %s json %s", id, jPath) + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("lcowdriver: getlayerchain: %s unable to read layerchain file %s: %s", id, jPath, err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("lcowdriver: getlayerchain: %s failed to unmarshall layerchain file %s: %s", id, jPath, err) + } + return layerChain, nil +} + +// setLayerChain stores the layer chain information on disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("lcowdriver: setlayerchain: %s failed to marshall layerchain json: %s", id, err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + logrus.Debugf("lcowdriver: setlayerchain: id %s json %s", id, jPath) + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("lcowdriver: setlayerchain: %s failed to write layerchain file: %s", id, err) + } + return nil +} diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go index 357b5952d..7f849c96f 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go @@ -21,7 +21,8 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/mount" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" ) // This is a small wrapper over the NaiveDiffWriter that lets us have a custom @@ -339,10 +340,7 @@ func (d *Driver) dir(id string) string { func (d *Driver) Remove(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) - if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { - return err - } - return nil + return system.EnsureRemoveAll(d.dir(id)) } // Get creates and mounts the required file system for the given id and returns the mount path. @@ -406,7 +404,7 @@ func (d *Driver) Put(id string) error { if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if err := syscall.Unmount(mountpoint, 0); err != nil { + if err := syscall.Unmount(mountpoint, syscall.MNT_DETACH); err != nil { logrus.Debugf("Failed to unmount %s overlay: %v", id, err) } return nil diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go index 74865ba16..deea3e017 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go @@ -31,9 +31,10 @@ import ( "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/system" units "github.com/docker/go-units" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" ) var ( @@ -149,9 +150,19 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs switch fsMagic { - case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: logrus.Errorf("'overlay2' is not supported over %s", backingFs) return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicBtrfs: + // Support for OverlayFS on BTRFS was added in kernel 4.7 + // See https://btrfs.wiki.kernel.org/index.php/Changelog + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + logrus.Errorf("'overlay2' requires kernel 4.7 to use on %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + logrus.Warn("Using pre-4.7.0 kernel for overlay2 on btrfs, may require kernel update") + } } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) @@ -183,6 +194,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), + options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) @@ -191,7 +203,12 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true + } else if opts.quota.Size > 0 { + return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } + } else if opts.quota.Size > 0 { + // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. + return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) @@ -213,9 +230,14 @@ func parseOptions(options []string) (*overlayOptions, error) { if err != nil { return nil, err } - + case "overlay2.size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + o.quota.Size = uint64(size) default: - return nil, fmt.Errorf("overlay2: Unknown option %s\n", key) + return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil @@ -301,17 +323,38 @@ func (d *Driver) Cleanup() error { // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + if opts == nil { + opts = &graphdriver.CreateOpts{ + StorageOpt: map[string]string{}, + } + } + + if _, ok := opts.StorageOpt["size"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) + } + + return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { - - if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { - return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + if opts != nil && len(opts.StorageOpt) != 0 { + if _, ok := opts.StorageOpt["size"]; ok { + return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") + } } + return d.create(id, parent, opts) +} +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) @@ -464,7 +507,7 @@ func (d *Driver) Remove(id string) error { } } - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil @@ -576,7 +619,7 @@ func (d *Driver) Put(id string) error { if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if err := syscall.Unmount(mountpoint, 0); err != nil { + if err := syscall.Unmount(mountpoint, syscall.MNT_DETACH); err != nil { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } return nil diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go index e408d5f90..48ade1034 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go @@ -328,7 +328,7 @@ func makeBackingFsDev(home string) (string, error) { } backingFsBlockDev := path.Join(home, "backingFsBlockDev") - // Re-create just in case comeone copied the home directory over to a new device + // Re-create just in case someone copied the home directory over to a new device syscall.Unlink(backingFsBlockDev) stat := fileinfo.Sys().(*syscall.Stat_t) if err := syscall.Mknod(backingFsBlockDev, syscall.S_IFBLK|0600, int(stat.Dev)); err != nil { diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go index efaa5005e..5bb1fd62a 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go @@ -1,6 +1,7 @@ package register import ( - // register the windows graph driver + // register the windows graph drivers + _ "github.com/docker/docker/daemon/graphdriver/lcow" _ "github.com/docker/docker/daemon/graphdriver/windows" ) diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go index 8832d1153..15a4de360 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go @@ -8,13 +8,13 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" - - "github.com/opencontainers/runc/libcontainer/label" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" ) var ( // CopyWithTar defines the copy method to use. - CopyWithTar = chrootarchive.CopyWithTar + CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar ) func init() { @@ -25,15 +25,11 @@ func init() { // This sets the home directory for the driver and returns NaiveDiffDriver. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { d := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, + home: home, + idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { return nil, err } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil @@ -44,9 +40,8 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. // Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver type Driver struct { - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap + home string + idMappings *idtools.IDMappings } func (d *Driver) String() string { @@ -81,14 +76,11 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { } dir := d.dir(id) - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { return err } - if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil { return err } labelOpts := []string{"level:s0"} @@ -102,10 +94,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err != nil { return fmt.Errorf("%s: %s", parent, err) } - if err := CopyWithTar(parentDir, dir); err != nil { - return err - } - return nil + return CopyWithTar(parentDir, dir) } func (d *Driver) dir(id string) string { @@ -114,7 +103,7 @@ func (d *Driver) dir(id string) string { // Remove deletes the content from the directory for a given id. func (d *Driver) Remove(id string) error { - if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + if err := system.EnsureRemoveAll(d.dir(id)); err != nil { return err } return nil diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go index 3079cbc53..6b18c8c24 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go @@ -94,6 +94,10 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) } + if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) + } + d := &Driver{ info: hcsshim.DriverInfo{ HomeDir: home, @@ -149,8 +153,19 @@ func (d *Driver) Status() [][2]string { } } +// panicIfUsedByLcow does exactly what it says. +// TODO @jhowardmsft - this is a temporary measure for the bring-up of +// Linux containers on Windows. It is a failsafe to ensure that the right +// graphdriver is used. +func panicIfUsedByLcow() { + if system.LCOWSupported() { + panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode") + } +} + // Exists returns true if the given id is registered with this driver. func (d *Driver) Exists(id string) bool { + panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return false @@ -165,6 +180,7 @@ func (d *Driver) Exists(id string) bool { // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() if opts != nil { return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) } @@ -173,6 +189,7 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts // Create creates a new read-only layer with the given id. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() if opts != nil { return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) } @@ -256,6 +273,7 @@ func (d *Driver) dir(id string) string { // Remove unmounts and removes the dir information. func (d *Driver) Remove(id string) error { + panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return err @@ -282,7 +300,7 @@ func (d *Driver) Remove(id string) error { // // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider // using platform APIs (if available) to get this more succinctly. Also - // consider enlighting the Remove() interface to have context of why + // consider enhancing the Remove() interface to have context of why // the remove is being called - that could improve efficiency by not // enumerating compute systems during a remove of a container as it's // not required. @@ -337,6 +355,7 @@ func (d *Driver) Remove(id string) error { // Get returns the rootfs path for the id. This will mount the dir at its given path. func (d *Driver) Get(id, mountLabel string) (string, error) { + panicIfUsedByLcow() logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) var dir string @@ -395,6 +414,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // Put adds a new layer to the driver. func (d *Driver) Put(id string) error { + panicIfUsedByLcow() logrus.Debugf("WindowsGraphDriver Put() id %s", id) rID, err := d.resolveID(id) @@ -424,9 +444,11 @@ func (d *Driver) Put(id string) error { // We use this opportunity to cleanup any -removing folders which may be // still left if the daemon was killed while it was removing a layer. func (d *Driver) Cleanup() error { - items, err := ioutil.ReadDir(d.info.HomeDir) if err != nil { + if os.IsNotExist(err) { + return nil + } return err } @@ -451,6 +473,7 @@ func (d *Driver) Cleanup() error { // layer and its parent layer which may be "". // The layer should be mounted when calling this function func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { + panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return @@ -487,6 +510,7 @@ func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { // and its parent layer. If parent is "", then all changes will be ADD changes. // The layer should not be mounted when calling this function. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return nil, err @@ -542,6 +566,7 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { // new layer in bytes. // The layer should not be mounted when calling this function func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + panicIfUsedByLcow() var layerChain []string if parent != "" { rPId, err := d.resolveID(parent) @@ -576,6 +601,7 @@ func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + panicIfUsedByLcow() rPId, err := d.resolveID(parent) if err != nil { return @@ -597,6 +623,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) { // GetMetadata returns custom driver information. func (d *Driver) GetMetadata(id string) (map[string]string, error) { + panicIfUsedByLcow() m := make(map[string]string) m["dir"] = d.dir(id) return m, nil @@ -899,6 +926,7 @@ func (fg *fileGetCloserWithBackupPrivileges) Close() error { // DiffGetter returns a FileGetCloser that can read files from the directory that // contains files for the layer differences. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + panicIfUsedByLcow() id, err := d.resolveID(id) if err != nil { return nil, err diff --git a/fn/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go b/fn/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go index 9ad473363..bc2b4192a 100644 --- a/fn/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go +++ b/fn/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go @@ -19,7 +19,7 @@ import ( "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" zfs "github.com/mistifyio/go-zfs" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" ) type zfsOptions struct { diff --git a/fn/vendor/github.com/docker/docker/daemon/health.go b/fn/vendor/github.com/docker/docker/daemon/health.go index caa8db844..61b531484 100644 --- a/fn/vendor/github.com/docker/docker/daemon/health.go +++ b/fn/vendor/github.com/docker/docker/daemon/health.go @@ -64,31 +64,35 @@ type cmdProbe struct { // exec the healthcheck command in the container. // Returns the exit code and probe output (if any) -func (p *cmdProbe) run(ctx context.Context, d *Daemon, container *container.Container) (*types.HealthcheckResult, error) { - - cmdSlice := strslice.StrSlice(container.Config.Healthcheck.Test)[1:] +func (p *cmdProbe) run(ctx context.Context, d *Daemon, cntr *container.Container) (*types.HealthcheckResult, error) { + cmdSlice := strslice.StrSlice(cntr.Config.Healthcheck.Test)[1:] if p.shell { - cmdSlice = append(getShell(container.Config), cmdSlice...) + cmdSlice = append(getShell(cntr.Config), cmdSlice...) } entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) execConfig := exec.NewConfig() execConfig.OpenStdin = false execConfig.OpenStdout = true execConfig.OpenStderr = true - execConfig.ContainerID = container.ID + execConfig.ContainerID = cntr.ID execConfig.DetachKeys = []byte{} execConfig.Entrypoint = entrypoint execConfig.Args = args execConfig.Tty = false execConfig.Privileged = false - execConfig.User = container.Config.User - execConfig.Env = container.Config.Env + execConfig.User = cntr.Config.User - d.registerExecCommand(container, execConfig) - d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + linkedEnv, err := d.setupLinkedContainers(cntr) + if err != nil { + return nil, err + } + execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(execConfig.Tty, linkedEnv), execConfig.Env) + + d.registerExecCommand(cntr, execConfig) + d.LogContainerEvent(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) output := &limitedBuffer{} - err := d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) + err = d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) if err != nil { return nil, err } @@ -97,7 +101,7 @@ func (p *cmdProbe) run(ctx context.Context, d *Daemon, container *container.Cont return nil, err } if info.ExitCode == nil { - return nil, fmt.Errorf("Healthcheck for container %s has no exit code!", container.ID) + return nil, fmt.Errorf("Healthcheck for container %s has no exit code!", cntr.ID) } // Note: Go's json package will handle invalid UTF-8 for us out := output.String() @@ -163,6 +167,13 @@ func handleProbeResult(d *Daemon, c *container.Container, result *types.Healthch // Else we're starting or healthy. Stay in that state. } + // replicate Health status changes + if err := c.CheckpointTo(d.containersReplica); err != nil { + // queries will be inconsistent until the next probe runs or other state mutations + // checkpoint the container + logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err) + } + if oldStatus != h.Status { d.LogContainerEvent(c, "health_status: "+h.Status) } @@ -182,7 +193,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) logrus.Debugf("Running health check for container %s ...", c.ID) startTime := time.Now() ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) - results := make(chan *types.HealthcheckResult) + results := make(chan *types.HealthcheckResult, 1) go func() { healthChecksCounter.Inc() result, err := probe.run(ctx, d, c) @@ -205,8 +216,10 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) select { case <-stop: logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) - // Stop timeout and kill probe, but don't wait for probe to exit. cancelProbe() + // Wait for probe to exit (it might take a while to respond to the TERM + // signal and we don't want dying probes to pile up). + <-results return case result := <-results: handleProbeResult(d, c, result, stop) diff --git a/fn/vendor/github.com/docker/docker/daemon/health_test.go b/fn/vendor/github.com/docker/docker/daemon/health_test.go index fbbdf06c5..4fd89140d 100644 --- a/fn/vendor/github.com/docker/docker/daemon/health_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/health_test.go @@ -19,19 +19,23 @@ func reset(c *container.Container) { func TestNoneHealthcheck(t *testing.T) { c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - Healthcheck: &containertypes.HealthConfig{ - Test: []string{"NONE"}, - }, + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Healthcheck: &containertypes.HealthConfig{ + Test: []string{"NONE"}, }, - State: &container.State{}, }, + State: &container.State{}, + } + store, err := container.NewViewDB() + if err != nil { + t.Fatal(err) + } + daemon := &Daemon{ + containersReplica: store, } - daemon := &Daemon{} daemon.initHealthMonitor(c) if c.State.Health != nil { @@ -58,16 +62,21 @@ func TestHealthStates(t *testing.T) { } c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - }, + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", }, } + + store, err := container.NewViewDB() + if err != nil { + t.Fatal(err) + } + daemon := &Daemon{ - EventsService: e, + EventsService: e, + containersReplica: store, } c.Config.Healthcheck = &containertypes.HealthConfig{ diff --git a/fn/vendor/github.com/docker/docker/daemon/image.go b/fn/vendor/github.com/docker/docker/daemon/image.go index 43ee483ff..a51049dbb 100644 --- a/fn/vendor/github.com/docker/docker/daemon/image.go +++ b/fn/vendor/github.com/docker/docker/daemon/image.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/docker/distribution/reference" - "github.com/docker/docker/builder" "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" ) @@ -22,37 +21,43 @@ func (e ErrImageDoesNotExist) Error() string { return fmt.Sprintf("No such image: %s", reference.FamiliarString(ref)) } -// GetImageID returns an image ID corresponding to the image referred to by +// GetImageIDAndPlatform returns an image ID and platform corresponding to the image referred to by // refOrID. -func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { +func (daemon *Daemon) GetImageIDAndPlatform(refOrID string) (image.ID, string, error) { ref, err := reference.ParseAnyReference(refOrID) if err != nil { - return "", err + return "", "", err } namedRef, ok := ref.(reference.Named) if !ok { digested, ok := ref.(reference.Digested) if !ok { - return "", ErrImageDoesNotExist{ref} + return "", "", ErrImageDoesNotExist{ref} } id := image.IDFromDigest(digested.Digest()) - if _, err := daemon.imageStore.Get(id); err != nil { - return "", ErrImageDoesNotExist{ref} + for platform := range daemon.stores { + if _, err = daemon.stores[platform].imageStore.Get(id); err == nil { + return id, platform, nil + } } - return id, nil + return "", "", ErrImageDoesNotExist{ref} } - if id, err := daemon.referenceStore.Get(namedRef); err == nil { - return image.IDFromDigest(id), nil + for platform := range daemon.stores { + if id, err := daemon.stores[platform].referenceStore.Get(namedRef); err == nil { + return image.IDFromDigest(id), platform, nil + } } // deprecated: repo:shortid https://github.com/docker/docker/pull/799 if tagged, ok := namedRef.(reference.Tagged); ok { if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) { - if id, err := daemon.imageStore.Search(tag); err == nil { - for _, storeRef := range daemon.referenceStore.References(id.Digest()) { - if storeRef.Name() == namedRef.Name() { - return id, nil + for platform := range daemon.stores { + if id, err := daemon.stores[platform].imageStore.Search(tag); err == nil { + for _, storeRef := range daemon.stores[platform].referenceStore.References(id.Digest()) { + if storeRef.Name() == namedRef.Name() { + return id, platform, nil + } } } } @@ -60,27 +65,20 @@ func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { } // Search based on ID - if id, err := daemon.imageStore.Search(refOrID); err == nil { - return id, nil + for platform := range daemon.stores { + if id, err := daemon.stores[platform].imageStore.Search(refOrID); err == nil { + return id, platform, nil + } } - return "", ErrImageDoesNotExist{ref} + return "", "", ErrImageDoesNotExist{ref} } // GetImage returns an image corresponding to the image referred to by refOrID. func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { - imgID, err := daemon.GetImageID(refOrID) + imgID, platform, err := daemon.GetImageIDAndPlatform(refOrID) if err != nil { return nil, err } - return daemon.imageStore.Get(imgID) -} - -// GetImageOnBuild looks up a Docker image referenced by `name`. -func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, err - } - return img, nil + return daemon.stores[platform].imageStore.Get(imgID) } diff --git a/fn/vendor/github.com/docker/docker/daemon/image_delete.go b/fn/vendor/github.com/docker/docker/daemon/image_delete.go index b7dbd249e..4e228594b 100644 --- a/fn/vendor/github.com/docker/docker/daemon/image_delete.go +++ b/fn/vendor/github.com/docker/docker/daemon/image_delete.go @@ -65,12 +65,12 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I start := time.Now() records := []types.ImageDeleteResponseItem{} - imgID, err := daemon.GetImageID(imageRef) + imgID, platform, err := daemon.GetImageIDAndPlatform(imageRef) if err != nil { return nil, daemon.imageNotExistToErrcode(err) } - repoRefs := daemon.referenceStore.References(imgID.Digest()) + repoRefs := daemon.stores[platform].referenceStore.References(imgID.Digest()) var removedRepositoryRef bool if !isImageIDPrefix(imgID.String(), imageRef) { @@ -94,7 +94,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I return nil, err } - parsedRef, err = daemon.removeImageRef(parsedRef) + parsedRef, err = daemon.removeImageRef(platform, parsedRef) if err != nil { return nil, err } @@ -104,7 +104,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") records = append(records, untaggedRecord) - repoRefs = daemon.referenceStore.References(imgID.Digest()) + repoRefs = daemon.stores[platform].referenceStore.References(imgID.Digest()) // If a tag reference was removed and the only remaining // references to the same repository are digest references, @@ -122,7 +122,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I remainingRefs := []reference.Named{} for _, repoRef := range repoRefs { if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - if _, err := daemon.removeImageRef(repoRef); err != nil { + if _, err := daemon.removeImageRef(platform, repoRef); err != nil { return records, err } @@ -152,12 +152,12 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I if !force { c |= conflictSoft &^ conflictActiveReference } - if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil { return nil, conflict } for _, repoRef := range repoRefs { - parsedRef, err := daemon.removeImageRef(repoRef) + parsedRef, err := daemon.removeImageRef(platform, repoRef) if err != nil { return nil, err } @@ -170,7 +170,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I } } - if err := daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { + if err := daemon.imageDeleteHelper(imgID, platform, &records, force, prune, removedRepositoryRef); err != nil { return nil, err } @@ -231,13 +231,13 @@ func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Contai // repositoryRef must not be an image ID but a repository name followed by an // optional tag or digest reference. If tag or digest is omitted, the default // tag is used. Returns the resolved image reference and an error. -func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { +func (daemon *Daemon) removeImageRef(platform string, ref reference.Named) (reference.Named, error) { ref = reference.TagNameOnly(ref) // Ignore the boolean value returned, as far as we're concerned, this // is an idempotent operation and it's okay if the reference didn't // exist in the first place. - _, err := daemon.referenceStore.Delete(ref) + _, err := daemon.stores[platform].referenceStore.Delete(ref) return ref, err } @@ -247,11 +247,11 @@ func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, erro // on the first encountered error. Removed references are logged to this // daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the // given list of records. -func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error { - imageRefs := daemon.referenceStore.References(imgID.Digest()) +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem) error { + imageRefs := daemon.stores[platform].referenceStore.References(imgID.Digest()) for _, imageRef := range imageRefs { - parsedRef, err := daemon.removeImageRef(imageRef) + parsedRef, err := daemon.removeImageRef(platform, imageRef) if err != nil { return err } @@ -296,15 +296,15 @@ func (idc *imageDeleteConflict) Error() string { // conflict is encountered, it will be returned immediately without deleting // the image. If quiet is true, any encountered conflicts will be ignored and // the function will return nil immediately without deleting the image. -func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { // First, determine if this image has any conflicts. Ignore soft conflicts // if force is true. c := conflictHard if !force { c |= conflictSoft } - if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { - if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { + if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil { + if quiet && (!daemon.imageIsDangling(imgID, platform) || conflict.used) { // Ignore conflicts UNLESS the image is "dangling" or not being used in // which case we want the user to know. return nil @@ -315,18 +315,18 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe return conflict } - parent, err := daemon.imageStore.GetParent(imgID) + parent, err := daemon.stores[platform].imageStore.GetParent(imgID) if err != nil { // There may be no parent parent = "" } // Delete all repository tag/digest references to this image. - if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { + if err := daemon.removeAllReferencesToImageID(imgID, platform, records); err != nil { return err } - removedLayers, err := daemon.imageStore.Delete(imgID) + removedLayers, err := daemon.stores[platform].imageStore.Delete(imgID) if err != nil { return err } @@ -346,7 +346,7 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe // either running or stopped). // Do not force prunings, but do so quietly (stopping on any encountered // conflicts). - return daemon.imageDeleteHelper(parent, records, false, true, true) + return daemon.imageDeleteHelper(parent, platform, records, false, true, true) } // checkImageDeleteConflict determines whether there are any conflicts @@ -355,9 +355,9 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe // using the image. A soft conflict is any tags/digest referencing the given // image or any stopped container using the image. If ignoreSoftConflicts is // true, this function will not check for soft conflict conditions. -func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, platform string, mask conflictType) *imageDeleteConflict { // Check if the image has any descendant images. - if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { + if mask&conflictDependentChild != 0 && len(daemon.stores[platform].imageStore.Children(imgID)) > 0 { return &imageDeleteConflict{ hard: true, imgID: imgID, @@ -381,7 +381,7 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType } // Check if any repository tags/digest reference this image. - if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID.Digest())) > 0 { + if mask&conflictActiveReference != 0 && len(daemon.stores[platform].referenceStore.References(imgID.Digest())) > 0 { return &imageDeleteConflict{ imgID: imgID, message: "image is referenced in multiple repositories", @@ -408,6 +408,6 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType // imageIsDangling returns whether the given image is "dangling" which means // that there are no repository references to the given image and it has no // child images. -func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { - return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0) +func (daemon *Daemon) imageIsDangling(imgID image.ID, platform string) bool { + return !(len(daemon.stores[platform].referenceStore.References(imgID.Digest())) > 0 || len(daemon.stores[platform].imageStore.Children(imgID)) > 0) } diff --git a/fn/vendor/github.com/docker/docker/daemon/image_exporter.go b/fn/vendor/github.com/docker/docker/daemon/image_exporter.go index 95d1d3dcd..a7b0be64c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/image_exporter.go +++ b/fn/vendor/github.com/docker/docker/daemon/image_exporter.go @@ -2,8 +2,10 @@ package daemon import ( "io" + "runtime" "github.com/docker/docker/image/tarexport" + "github.com/docker/docker/pkg/system" ) // ExportImage exports a list of images to the given output stream. The @@ -12,7 +14,12 @@ import ( // the same tag are exported. names is the set of tags to export, and // outStream is the writer which the images are written to. func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { - imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + // TODO @jhowardmsft LCOW. This will need revisiting later. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + imageExporter := tarexport.NewTarExporter(daemon.stores[platform].imageStore, daemon.stores[platform].layerStore, daemon.stores[platform].referenceStore, daemon) return imageExporter.Save(names, outStream) } @@ -20,6 +27,11 @@ func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { // complement of ImageExport. The input stream is an uncompressed tar // ball containing images and metadata. func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + // TODO @jhowardmsft LCOW. This will need revisiting later. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + imageExporter := tarexport.NewTarExporter(daemon.stores[platform].imageStore, daemon.stores[platform].layerStore, daemon.stores[platform].referenceStore, daemon) return imageExporter.Load(inTar, outStream, quiet) } diff --git a/fn/vendor/github.com/docker/docker/daemon/image_history.go b/fn/vendor/github.com/docker/docker/daemon/image_history.go index b763c86c0..c9e81554e 100644 --- a/fn/vendor/github.com/docker/docker/daemon/image_history.go +++ b/fn/vendor/github.com/docker/docker/daemon/image_history.go @@ -2,6 +2,7 @@ package daemon import ( "fmt" + "runtime" "time" "github.com/docker/distribution/reference" @@ -18,6 +19,12 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e return nil, err } + // If the image OS isn't set, assume it's the host OS + platform := img.OS + if platform == "" { + platform = runtime.GOOS + } + history := []*image.HistoryResponseItem{} layerCounter := 0 @@ -33,12 +40,12 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e } rootFS.Append(img.RootFS.DiffIDs[layerCounter]) - l, err := daemon.layerStore.Get(rootFS.ChainID()) + l, err := daemon.stores[platform].layerStore.Get(rootFS.ChainID()) if err != nil { return nil, err } layerSize, err = l.DiffSize() - layer.ReleaseAndLog(daemon.layerStore, l) + layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) if err != nil { return nil, err } @@ -62,7 +69,7 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e h.ID = id.String() var tags []string - for _, r := range daemon.referenceStore.References(id.Digest()) { + for _, r := range daemon.stores[platform].referenceStore.References(id.Digest()) { if _, ok := r.(reference.NamedTagged); ok { tags = append(tags, reference.FamiliarString(r)) } diff --git a/fn/vendor/github.com/docker/docker/daemon/image_inspect.go b/fn/vendor/github.com/docker/docker/daemon/image_inspect.go index 267a41946..3baf265da 100644 --- a/fn/vendor/github.com/docker/docker/daemon/image_inspect.go +++ b/fn/vendor/github.com/docker/docker/daemon/image_inspect.go @@ -1,6 +1,7 @@ package daemon import ( + "runtime" "time" "github.com/docker/distribution/reference" @@ -17,7 +18,13 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { return nil, errors.Wrapf(err, "no such image: %s", name) } - refs := daemon.referenceStore.References(img.ID().Digest()) + // If the image OS isn't set, assume it's the host OS + platform := img.OS + if platform == "" { + platform = runtime.GOOS + } + + refs := daemon.stores[platform].referenceStore.References(img.ID().Digest()) repoTags := []string{} repoDigests := []string{} for _, ref := range refs { @@ -33,11 +40,11 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { var layerMetadata map[string]string layerID := img.RootFS.ChainID() if layerID != "" { - l, err := daemon.layerStore.Get(layerID) + l, err := daemon.stores[platform].layerStore.Get(layerID) if err != nil { return nil, err } - defer layer.ReleaseAndLog(daemon.layerStore, l) + defer layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) size, err = l.Size() if err != nil { return nil, err @@ -54,6 +61,11 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { comment = img.History[len(img.History)-1].Comment } + lastUpdated, err := daemon.stores[platform].imageStore.GetLastUpdated(img.ID()) + if err != nil { + return nil, err + } + imageInspect := &types.ImageInspect{ ID: img.ID().String(), RepoTags: repoTags, @@ -67,15 +79,17 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { Author: img.Author, Config: img.Config, Architecture: img.Architecture, - Os: img.OS, + Os: platform, OsVersion: img.OSVersion, Size: size, VirtualSize: size, // TODO: field unused, deprecate RootFS: rootFSToAPIType(img.RootFS), + Metadata: types.ImageMetadata{ + LastTagTime: lastUpdated, + }, } - imageInspect.GraphDriver.Name = daemon.GraphDriverName() - + imageInspect.GraphDriver.Name = daemon.GraphDriverName(platform) imageInspect.GraphDriver.Data = layerMetadata return imageInspect, nil diff --git a/fn/vendor/github.com/docker/docker/daemon/image_pull.go b/fn/vendor/github.com/docker/docker/daemon/image_pull.go index 5cbd7ba42..abc81ec67 100644 --- a/fn/vendor/github.com/docker/docker/daemon/image_pull.go +++ b/fn/vendor/github.com/docker/docker/daemon/image_pull.go @@ -2,12 +2,12 @@ package daemon import ( "io" + "runtime" "strings" dist "github.com/docker/distribution" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/builder" "github.com/docker/docker/distribution" progressutils "github.com/docker/docker/distribution/utils" "github.com/docker/docker/pkg/progress" @@ -18,7 +18,7 @@ import ( // PullImage initiates a pull operation. image is the repository name to pull, and // tag may be either empty, or indicate a specific tag to pull. -func (daemon *Daemon) PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { +func (daemon *Daemon) PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { // Special case: "pull -a" may send an image name with a // trailing :. This is ugly, but let's not break API // compatibility. @@ -43,39 +43,10 @@ func (daemon *Daemon) PullImage(ctx context.Context, image, tag string, metaHead } } - return daemon.pullImageWithReference(ctx, ref, metaHeaders, authConfig, outStream) + return daemon.pullImageWithReference(ctx, ref, platform, metaHeaders, authConfig, outStream) } -// PullOnBuild tells Docker to pull image referenced by `name`. -func (daemon *Daemon) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { - ref, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, err - } - ref = reference.TagNameOnly(ref) - - pullRegistryAuth := &types.AuthConfig{} - if len(authConfigs) > 0 { - // The request came with a full auth config file, we prefer to use that - repoInfo, err := daemon.RegistryService.ResolveRepository(ref) - if err != nil { - return nil, err - } - - resolvedConfig := registry.ResolveAuthConfig( - authConfigs, - repoInfo.Index, - ) - pullRegistryAuth = &resolvedConfig - } - - if err := daemon.pullImageWithReference(ctx, ref, nil, pullRegistryAuth, output); err != nil { - return nil, err - } - return daemon.GetImage(name) -} - -func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { +func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { // Include a buffer so that slow client connections don't affect // transfer performance. progressChan := make(chan progress.Progress, 100) @@ -89,6 +60,11 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference. close(writesDone) }() + // Default to the host OS platform in case it hasn't been populated with an explicit value. + if platform == "" { + platform = runtime.GOOS + } + imagePullConfig := &distribution.ImagePullConfig{ Config: distribution.Config{ MetaHeaders: metaHeaders, @@ -96,12 +72,13 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference. ProgressOutput: progress.ChanOutput(progressChan), RegistryService: daemon.RegistryService, ImageEventLogger: daemon.LogImageEvent, - MetadataStore: daemon.distributionMetadataStore, - ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), - ReferenceStore: daemon.referenceStore, + MetadataStore: daemon.stores[platform].distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[platform].imageStore), + ReferenceStore: daemon.stores[platform].referenceStore, }, DownloadManager: daemon.downloadManager, Schema2Types: distribution.ImageTypes, + Platform: platform, } err := distribution.Pull(ctx, ref, imagePullConfig) @@ -111,7 +88,7 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference. } // GetRepository returns a repository from the registry. -func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.NamedTagged, authConfig *types.AuthConfig) (dist.Repository, bool, error) { +func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) { // get repository info repoInfo, err := daemon.RegistryService.ResolveRepository(ref) if err != nil { diff --git a/fn/vendor/github.com/docker/docker/daemon/image_push.go b/fn/vendor/github.com/docker/docker/daemon/image_push.go index 0f060d117..c2e5967b1 100644 --- a/fn/vendor/github.com/docker/docker/daemon/image_push.go +++ b/fn/vendor/github.com/docker/docker/daemon/image_push.go @@ -2,6 +2,7 @@ package daemon import ( "io" + "runtime" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" @@ -9,6 +10,7 @@ import ( "github.com/docker/docker/distribution" progressutils "github.com/docker/docker/distribution/utils" "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" "golang.org/x/net/context" ) @@ -39,6 +41,12 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead close(writesDone) }() + // TODO @jhowardmsft LCOW Support. This will require revisiting. For now, hard-code. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + imagePushConfig := &distribution.ImagePushConfig{ Config: distribution.Config{ MetaHeaders: metaHeaders, @@ -46,12 +54,12 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead ProgressOutput: progress.ChanOutput(progressChan), RegistryService: daemon.RegistryService, ImageEventLogger: daemon.LogImageEvent, - MetadataStore: daemon.distributionMetadataStore, - ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), - ReferenceStore: daemon.referenceStore, + MetadataStore: daemon.stores[platform].distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[platform].imageStore), + ReferenceStore: daemon.stores[platform].referenceStore, }, ConfigMediaType: schema2.MediaTypeImageConfig, - LayerStore: distribution.NewLayerProviderFromStore(daemon.layerStore), + LayerStore: distribution.NewLayerProviderFromStore(daemon.stores[platform].layerStore), TrustKey: daemon.trustKey, UploadManager: daemon.uploadManager, } diff --git a/fn/vendor/github.com/docker/docker/daemon/image_tag.go b/fn/vendor/github.com/docker/docker/daemon/image_tag.go index 10a584b36..5f28daed0 100644 --- a/fn/vendor/github.com/docker/docker/daemon/image_tag.go +++ b/fn/vendor/github.com/docker/docker/daemon/image_tag.go @@ -8,7 +8,7 @@ import ( // TagImage creates the tag specified by newTag, pointing to the image named // imageName (alternatively, imageName can also be an image ID). func (daemon *Daemon) TagImage(imageName, repository, tag string) error { - imageID, err := daemon.GetImageID(imageName) + imageID, platform, err := daemon.GetImageIDAndPlatform(imageName) if err != nil { return err } @@ -23,15 +23,18 @@ func (daemon *Daemon) TagImage(imageName, repository, tag string) error { } } - return daemon.TagImageWithReference(imageID, newTag) + return daemon.TagImageWithReference(imageID, platform, newTag) } // TagImageWithReference adds the given reference to the image ID provided. -func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error { - if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { +func (daemon *Daemon) TagImageWithReference(imageID image.ID, platform string, newTag reference.Named) error { + if err := daemon.stores[platform].referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { return err } + if err := daemon.stores[platform].imageStore.SetLastUpdated(imageID); err != nil { + return err + } daemon.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/images.go b/fn/vendor/github.com/docker/docker/daemon/images.go index b014fd488..4baf70371 100644 --- a/fn/vendor/github.com/docker/docker/daemon/images.go +++ b/fn/vendor/github.com/docker/docker/daemon/images.go @@ -3,6 +3,7 @@ package daemon import ( "encoding/json" "fmt" + "runtime" "sort" "time" @@ -14,6 +15,7 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" ) var acceptedImageFilterTags = map[string]bool{ @@ -34,7 +36,12 @@ func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } // Map returns a map of all images in the ImageStore func (daemon *Daemon) Map() map[image.ID]*image.Image { - return daemon.imageStore.Map() + // TODO @jhowardmsft LCOW. This will need work to enumerate the stores for all platforms. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + return daemon.stores[platform].imageStore.Map() } // Images returns a filtered list of images. filterArgs is a JSON-encoded set @@ -43,6 +50,13 @@ func (daemon *Daemon) Map() map[image.ID]*image.Image { // named all controls whether all images in the graph are filtered, or just // the heads. func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { + + // TODO @jhowardmsft LCOW. This will need work to enumerate the stores for all platforms. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + var ( allImages map[image.ID]*image.Image err error @@ -61,9 +75,9 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs } } if danglingOnly { - allImages = daemon.imageStore.Heads() + allImages = daemon.stores[platform].imageStore.Heads() } else { - allImages = daemon.imageStore.Map() + allImages = daemon.stores[platform].imageStore.Map() } var beforeFilter, sinceFilter *image.Image @@ -116,7 +130,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs layerID := img.RootFS.ChainID() var size int64 if layerID != "" { - l, err := daemon.layerStore.Get(layerID) + l, err := daemon.stores[platform].layerStore.Get(layerID) if err != nil { // The layer may have been deleted between the call to `Map()` or // `Heads()` and the call to `Get()`, so we just ignore this error @@ -127,7 +141,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs } size, err = l.Size() - layer.ReleaseAndLog(daemon.layerStore, l) + layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) if err != nil { return nil, err } @@ -135,7 +149,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs newImage := newImage(img, size) - for _, ref := range daemon.referenceStore.References(id.Digest()) { + for _, ref := range daemon.stores[platform].referenceStore.References(id.Digest()) { if imageFilters.Include("reference") { var found bool var matchErr error @@ -157,7 +171,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs } } if newImage.RepoDigests == nil && newImage.RepoTags == nil { - if all || len(daemon.imageStore.Children(id)) == 0 { + if all || len(daemon.stores[platform].imageStore.Children(id)) == 0 { if imageFilters.Include("dangling") && !danglingOnly { //dangling=false case, so dangling image is not needed @@ -179,7 +193,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs // lazily init variables if imagesMap == nil { allContainers = daemon.List() - allLayers = daemon.layerStore.Map() + allLayers = daemon.stores[platform].layerStore.Map() imagesMap = make(map[*image.Image]*types.ImageSummary) layerRefs = make(map[layer.ChainID]int) } @@ -242,7 +256,16 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs // The existing image(s) is not destroyed. // If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. func (daemon *Daemon) SquashImage(id, parent string) (string, error) { - img, err := daemon.imageStore.Get(image.ID(id)) + + var ( + img *image.Image + err error + ) + for _, ds := range daemon.stores { + if img, err = ds.imageStore.Get(image.ID(id)); err == nil { + break + } + } if err != nil { return "", err } @@ -250,7 +273,7 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) { var parentImg *image.Image var parentChainID layer.ChainID if len(parent) != 0 { - parentImg, err = daemon.imageStore.Get(image.ID(parent)) + parentImg, err = daemon.stores[img.Platform()].imageStore.Get(image.ID(parent)) if err != nil { return "", errors.Wrap(err, "error getting specified parent layer") } @@ -260,11 +283,11 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) { parentImg = &image.Image{RootFS: rootFS} } - l, err := daemon.layerStore.Get(img.RootFS.ChainID()) + l, err := daemon.stores[img.Platform()].layerStore.Get(img.RootFS.ChainID()) if err != nil { return "", errors.Wrap(err, "error getting image layer") } - defer daemon.layerStore.Release(l) + defer daemon.stores[img.Platform()].layerStore.Release(l) ts, err := l.TarStreamFrom(parentChainID) if err != nil { @@ -272,11 +295,11 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) { } defer ts.Close() - newL, err := daemon.layerStore.Register(ts, parentChainID) + newL, err := daemon.stores[img.Platform()].layerStore.Register(ts, parentChainID, layer.Platform(img.Platform())) if err != nil { return "", errors.Wrap(err, "error registering layer") } - defer daemon.layerStore.Release(newL) + defer daemon.stores[img.Platform()].layerStore.Release(newL) var newImage image.Image newImage = *img @@ -313,7 +336,7 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) { return "", errors.Wrap(err, "error marshalling image config") } - newImgID, err := daemon.imageStore.Create(b) + newImgID, err := daemon.stores[img.Platform()].imageStore.Create(b) if err != nil { return "", errors.Wrap(err, "error creating new image after squash") } diff --git a/fn/vendor/github.com/docker/docker/daemon/import.go b/fn/vendor/github.com/docker/docker/daemon/import.go index fc9f2682c..0409cd6bd 100644 --- a/fn/vendor/github.com/docker/docker/daemon/import.go +++ b/fn/vendor/github.com/docker/docker/daemon/import.go @@ -12,11 +12,11 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/builder/remotecontext" "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" "github.com/pkg/errors" @@ -26,14 +26,18 @@ import ( // inConfig (if src is "-"), or from a URI specified in src. Progress output is // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. -func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { +func (daemon *Daemon) ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { var ( - sf = streamformatter.NewJSONStreamFormatter() rc io.ReadCloser resp *http.Response newRef reference.Named ) + // Default the platform if not supplied. + if platform == "" { + platform = runtime.GOOS + } + if repository != "" { var err error newRef, err = reference.ParseNormalizedNamed(repository) @@ -68,12 +72,12 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string return err } - resp, err = httputils.Download(u.String()) + resp, err = remotecontext.GetWithStatusError(u.String()) if err != nil { return err } - outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) - progressOutput := sf.NewProgressOutput(outStream, true) + outStream.Write(streamformatter.FormatStatus("", "Downloading from %s", u)) + progressOutput := streamformatter.NewJSONProgressOutput(outStream, true) rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") } @@ -86,12 +90,11 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string if err != nil { return err } - // TODO: support windows baselayer? - l, err := daemon.layerStore.Register(inflatedLayerData, "") + l, err := daemon.stores[platform].layerStore.Register(inflatedLayerData, "", layer.Platform(platform)) if err != nil { return err } - defer layer.ReleaseAndLog(daemon.layerStore, l) + defer layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) created := time.Now().UTC() imgConfig, err := json.Marshal(&image.Image{ @@ -99,7 +102,7 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string DockerVersion: dockerversion.Version, Config: config, Architecture: runtime.GOARCH, - OS: runtime.GOOS, + OS: platform, Created: created, Comment: msg, }, @@ -116,19 +119,19 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string return err } - id, err := daemon.imageStore.Create(imgConfig) + id, err := daemon.stores[platform].imageStore.Create(imgConfig) if err != nil { return err } // FIXME: connect with commit code and call refstore directly if newRef != nil { - if err := daemon.TagImageWithReference(id, newRef); err != nil { + if err := daemon.TagImageWithReference(id, platform, newRef); err != nil { return err } } daemon.LogImageEvent(id.String(), id.String(), "import") - outStream.Write(sf.FormatStatus("", id.String())) + outStream.Write(streamformatter.FormatStatus("", id.String())) return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/info.go b/fn/vendor/github.com/docker/docker/daemon/info.go index 919e8ed3d..2cb3e8479 100644 --- a/fn/vendor/github.com/docker/docker/daemon/info.go +++ b/fn/vendor/github.com/docker/docker/daemon/info.go @@ -4,14 +4,14 @@ import ( "fmt" "os" "runtime" - "sync/atomic" + "strings" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/types" "github.com/docker/docker/cli/debug" - "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/parsers/kernel" @@ -57,18 +57,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { } sysInfo := sysinfo.New(true) - - var cRunning, cPaused, cStopped int32 - daemon.containers.ApplyAll(func(c *container.Container) { - switch c.StateString() { - case "paused": - atomic.AddInt32(&cPaused, 1) - case "running": - atomic.AddInt32(&cRunning, 1) - default: - atomic.AddInt32(&cStopped, 1) - } - }) + cRunning, cPaused, cStopped := stateCtr.get() securityOptions := []string{} if sysInfo.AppArmor { @@ -84,20 +73,37 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { if selinuxEnabled() { securityOptions = append(securityOptions, "name=selinux") } - uid, gid := daemon.GetRemappedUIDGID() - if uid != 0 || gid != 0 { + rootIDs := daemon.idMappings.RootPair() + if rootIDs.UID != 0 || rootIDs.GID != 0 { securityOptions = append(securityOptions, "name=userns") } + imageCount := 0 + drivers := "" + for p, ds := range daemon.stores { + imageCount += len(ds.imageStore.Map()) + drivers += daemon.GraphDriverName(p) + if len(daemon.stores) > 1 { + drivers += fmt.Sprintf(" (%s) ", p) + } + } + + // TODO @jhowardmsft LCOW support. For now, hard-code the platform shown for the driver status + p := runtime.GOOS + if system.LCOWSupported() { + p = "linux" + } + + drivers = strings.TrimSpace(drivers) v := &types.Info{ ID: daemon.ID, Containers: int(cRunning + cPaused + cStopped), ContainersRunning: int(cRunning), ContainersPaused: int(cPaused), ContainersStopped: int(cStopped), - Images: len(daemon.imageStore.Map()), - Driver: daemon.GraphDriverName(), - DriverStatus: daemon.layerStore.DriverStatus(), + Images: imageCount, + Driver: drivers, + DriverStatus: daemon.stores[p].layerStore.DriverStatus(), Plugins: daemon.showPluginsInfo(), IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, @@ -174,7 +180,10 @@ func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { pluginsInfo.Volume = volumedrivers.GetDriverList() pluginsInfo.Network = daemon.GetNetworkDriverList() - pluginsInfo.Authorization = daemon.configStore.GetAuthorizationPlugins() + // The authorization plugins are returned in the order they are + // used as they constitute a request/response modification chain. + pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + pluginsInfo.Log = logger.ListDrivers() return pluginsInfo } diff --git a/fn/vendor/github.com/docker/docker/daemon/info_unix.go b/fn/vendor/github.com/docker/docker/daemon/info_unix.go index 5c387541b..e816f8dff 100644 --- a/fn/vendor/github.com/docker/docker/daemon/info_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/info_unix.go @@ -9,7 +9,6 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" - daemonconfig "github.com/docker/docker/daemon/config" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/sysinfo" "github.com/pkg/errors" @@ -38,7 +37,8 @@ func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) } v.RuncCommit.Expected = dockerversion.RuncCommitID - if rv, err := exec.Command(DefaultRuntimeBinary, "--version").Output(); err == nil { + defaultRuntimeBinary := daemon.configStore.GetRuntime(daemon.configStore.GetDefaultRuntimeName()).Path + if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { parts := strings.Split(strings.TrimSpace(string(rv)), "\n") if len(parts) == 3 { parts = strings.Split(parts[1], ": ") @@ -48,23 +48,24 @@ func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) } if v.RuncCommit.ID == "" { - logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultRuntimeBinary, string(rv)) + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", defaultRuntimeBinary, string(rv)) v.RuncCommit.ID = "N/A" } } else { - logrus.Warnf("failed to retrieve %s version: %v", DefaultRuntimeBinary, err) + logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) v.RuncCommit.ID = "N/A" } - if rv, err := exec.Command(daemonconfig.DefaultInitBinary, "--version").Output(); err == nil { + defaultInitBinary := daemon.configStore.GetInitPath() + if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { ver, err := parseInitVersion(string(rv)) if err != nil { - logrus.Warnf("failed to retrieve %s version: %s", daemonconfig.DefaultInitBinary, err) + logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } v.InitCommit = ver } else { - logrus.Warnf("failed to retrieve %s version: %s", daemonconfig.DefaultInitBinary, err) + logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) v.InitCommit.ID = "N/A" } } diff --git a/fn/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go b/fn/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go index e83c2751e..cdd897348 100644 --- a/fn/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go @@ -16,7 +16,7 @@ import ( // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. -func Setup(initLayer string, rootUID, rootGID int) error { +func Setup(initLayer string, rootIDs idtools.IDPair) error { for pth, typ := range map[string]string{ "/dev/pts": "dir", "/dev/shm": "dir", @@ -38,12 +38,12 @@ func Setup(initLayer string, rootUID, rootGID int) error { if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { if os.IsNotExist(err) { - if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootIDs); err != nil { return err } switch typ { case "dir": - if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(filepath.Join(initLayer, pth), 0755, rootIDs); err != nil { return err } case "file": @@ -51,7 +51,7 @@ func Setup(initLayer string, rootUID, rootGID int) error { if err != nil { return err } - f.Chown(rootUID, rootGID) + f.Chown(rootIDs.UID, rootIDs.GID) f.Close() default: if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { diff --git a/fn/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go b/fn/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go index 48a9d71aa..2b22f58b5 100644 --- a/fn/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go @@ -2,12 +2,16 @@ package initlayer +import ( + "github.com/docker/docker/pkg/idtools" +) + // Setup populates a directory with mountpoints suitable // for bind-mounting dockerinit into the container. The mountpoint is simply an // empty file at /.dockerinit // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. -func Setup(initLayer string, rootUID, rootGID int) error { +func Setup(initLayer string, rootIDs idtools.IDPair) error { return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/inspect.go b/fn/vendor/github.com/docker/docker/daemon/inspect.go index 06858223f..47c1ba418 100644 --- a/fn/vendor/github.com/docker/docker/daemon/inspect.go +++ b/fn/vendor/github.com/docker/docker/daemon/inspect.go @@ -51,7 +51,7 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co } } - mountPoints := addMountPoints(container) + mountPoints := container.GetMountPoints() networkSettings := &types.NetworkSettings{ NetworkSettingsBase: types.NetworkSettingsBase{ Bridge: container.NetworkSettings.Bridge, @@ -104,7 +104,7 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er return nil, err } - mountPoints := addMountPoints(container) + mountPoints := container.GetMountPoints() config := &v1p20.ContainerConfig{ Config: container.Config, MacAddress: container.Config.MacAddress, @@ -153,7 +153,7 @@ func (daemon *Daemon) getInspectData(container *container.Container) (*types.Con Dead: container.State.Dead, Pid: container.State.Pid, ExitCode: container.State.ExitCode(), - Error: container.State.Error(), + Error: container.State.ErrorMsg, StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), Health: containerHealth, @@ -170,6 +170,7 @@ func (daemon *Daemon) getInspectData(container *container.Container) (*types.Con Name: container.Name, RestartCount: container.RestartCount, Driver: container.Driver, + Platform: container.Platform, MountLabel: container.MountLabel, ProcessLabel: container.ProcessLabel, ExecIDs: container.GetExecIDs(), diff --git a/fn/vendor/github.com/docker/docker/daemon/inspect_solaris.go b/fn/vendor/github.com/docker/docker/daemon/inspect_solaris.go index 0e3dcc111..0b275c141 100644 --- a/fn/vendor/github.com/docker/docker/daemon/inspect_solaris.go +++ b/fn/vendor/github.com/docker/docker/daemon/inspect_solaris.go @@ -18,20 +18,6 @@ func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, return &v1p19.ContainerJSON{}, nil } -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - RW: m.RW, - }) - } - return mountPoints -} - func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { return &backend.ExecProcessConfig{ Tty: e.Tty, diff --git a/fn/vendor/github.com/docker/docker/daemon/inspect_unix.go b/fn/vendor/github.com/docker/docker/daemon/inspect_unix.go index 8342f7cf9..bd28481e6 100644 --- a/fn/vendor/github.com/docker/docker/daemon/inspect_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/inspect_unix.go @@ -64,23 +64,6 @@ func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, }, nil } -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Type: m.Type, - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - Mode: m.Mode, - RW: m.RW, - Propagation: m.Propagation, - }) - } - return mountPoints -} - func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { return &backend.ExecProcessConfig{ Tty: e.Tty, diff --git a/fn/vendor/github.com/docker/docker/daemon/inspect_windows.go b/fn/vendor/github.com/docker/docker/daemon/inspect_windows.go index b331c83ca..5b12902db 100644 --- a/fn/vendor/github.com/docker/docker/daemon/inspect_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/inspect_windows.go @@ -12,21 +12,6 @@ func setPlatformSpecificContainerFields(container *container.Container, contJSON return contJSONBase } -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Type: m.Type, - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - RW: m.RW, - }) - } - return mountPoints -} - // containerInspectPre120 get containers for pre 1.20 APIs. func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { return daemon.ContainerInspectCurrent(name, false) diff --git a/fn/vendor/github.com/docker/docker/daemon/kill.go b/fn/vendor/github.com/docker/docker/daemon/kill.go index ba0dbe1f5..a2220715a 100644 --- a/fn/vendor/github.com/docker/docker/daemon/kill.go +++ b/fn/vendor/github.com/docker/docker/daemon/kill.go @@ -1,6 +1,7 @@ package daemon import ( + "context" "fmt" "runtime" "strings" @@ -8,7 +9,7 @@ import ( "time" "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" + containerpkg "github.com/docker/docker/container" "github.com/docker/docker/pkg/signal" ) @@ -54,7 +55,7 @@ func (daemon *Daemon) ContainerKill(name string, sig uint64) error { // to send the signal. An error is returned if the container is paused // or not running, or if there is a problem returned from the // underlying kill command. -func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { +func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int) error { logrus.Debugf("Sending kill signal %d to container %s", sig, container.ID) container.Lock() defer container.Unlock() @@ -68,7 +69,7 @@ func (daemon *Daemon) killWithSignal(container *container.Container, sig int) er return errNotRunning{container.ID} } - if container.Config.StopSignal != "" { + if container.Config.StopSignal != "" && syscall.Signal(sig) != syscall.SIGKILL { containerStopSignal, err := signal.ParseSignal(container.Config.StopSignal) if err != nil { return err @@ -110,7 +111,7 @@ func (daemon *Daemon) killWithSignal(container *container.Container, sig int) er } // Kill forcefully terminates a container. -func (daemon *Daemon) Kill(container *container.Container) error { +func (daemon *Daemon) Kill(container *containerpkg.Container) error { if !container.IsRunning() { return errNotRunning{container.ID} } @@ -131,7 +132,10 @@ func (daemon *Daemon) Kill(container *container.Container) error { return nil } - if _, err2 := container.WaitStop(2 * time.Second); err2 != nil { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { return err } } @@ -144,12 +148,15 @@ func (daemon *Daemon) Kill(container *container.Container) error { return err } - container.WaitStop(-1 * time.Second) + // Wait for exit with no timeout. + // Ignore returned status. + _ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) + return nil } // killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. -func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { +func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig int) error { err := daemon.killWithSignal(container, sig) if err == syscall.ESRCH { e := errNoSuchProcess{container.GetPID(), sig} @@ -159,6 +166,6 @@ func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, si return err } -func (daemon *Daemon) kill(c *container.Container, sig int) error { +func (daemon *Daemon) kill(c *containerpkg.Container, sig int) error { return daemon.containerd.Signal(c.ID, sig) } diff --git a/fn/vendor/github.com/docker/docker/daemon/list.go b/fn/vendor/github.com/docker/docker/daemon/list.go index 4d831460f..b854be754 100644 --- a/fn/vendor/github.com/docker/docker/daemon/list.go +++ b/fn/vendor/github.com/docker/docker/daemon/list.go @@ -10,7 +10,6 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/container" "github.com/docker/docker/image" "github.com/docker/docker/volume" @@ -47,7 +46,7 @@ type iterationAction int // containerReducer represents a reducer for a container. // Returns the object to serialize by the api. -type containerReducer func(*container.Container, *listContext) (*types.Container, error) +type containerReducer func(*container.Snapshot, *listContext) (*types.Container, error) const ( // includeContainer is the action to include a container in the reducer. @@ -83,9 +82,9 @@ type listContext struct { exitAllowed []int // beforeFilter is a filter to ignore containers that appear before the one given - beforeFilter *container.Container + beforeFilter *container.Snapshot // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container - sinceFilter *container.Container + sinceFilter *container.Snapshot // taskFilter tells if we should filter based on wether a container is part of a task taskFilter bool @@ -101,21 +100,21 @@ type listContext struct { *types.ContainerListOptions } -// byContainerCreated is a temporary type used to sort a list of containers by creation time. -type byContainerCreated []*container.Container +// byCreatedDescending is a temporary type used to sort a list of containers by creation time. +type byCreatedDescending []container.Snapshot -func (r byContainerCreated) Len() int { return len(r) } -func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byContainerCreated) Less(i, j int) bool { - return r[i].Created.UnixNano() < r[j].Created.UnixNano() +func (r byCreatedDescending) Len() int { return len(r) } +func (r byCreatedDescending) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreatedDescending) Less(i, j int) bool { + return r[j].CreatedAt.UnixNano() < r[i].CreatedAt.UnixNano() } // Containers returns the list of containers to show given the user's filtering. func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { - return daemon.reduceContainers(config, daemon.transformContainer) + return daemon.reduceContainers(config, daemon.refreshImage) } -func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container { +func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) { idSearch := false names := ctx.filters.Get("name") ids := ctx.filters.Get("id") @@ -123,7 +122,9 @@ func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Conta // if name or ID filters are not in use, return to // standard behavior of walking the entire container // list from the daemon's in-memory store - return daemon.List() + all, err := view.All() + sort.Sort(byCreatedDescending(all)) + return all, err } // idSearch will determine if we limit name matching to the IDs @@ -158,38 +159,48 @@ func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Conta } } - cntrs := make([]*container.Container, 0, len(matches)) + cntrs := make([]container.Snapshot, 0, len(matches)) for id := range matches { - if c := daemon.containers.Get(id); c != nil { - cntrs = append(cntrs, c) + c, err := view.Get(id) + switch err.(type) { + case nil: + cntrs = append(cntrs, *c) + case container.NoSuchContainerError: + // ignore error + default: + return nil, err } } // Restore sort-order after filtering // Created gives us nanosec resolution for sorting - sort.Sort(sort.Reverse(byContainerCreated(cntrs))) + sort.Sort(byCreatedDescending(cntrs)) - return cntrs + return cntrs, nil } // reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { var ( + view = daemon.containersReplica.Snapshot(daemon.nameIndex) containers = []*types.Container{} ) - ctx, err := daemon.foldFilter(config) + ctx, err := daemon.foldFilter(view, config) if err != nil { return nil, err } // fastpath to only look at a subset of containers if specific name // or ID matches were provided by the user--otherwise we potentially - // end up locking and querying many more containers than intended - containerList := daemon.filterByNameIDMatches(ctx) + // end up querying many more containers than intended + containerList, err := daemon.filterByNameIDMatches(view, ctx) + if err != nil { + return nil, err + } - for _, container := range containerList { - t, err := daemon.reducePsContainer(container, ctx, reducer) + for i := range containerList { + t, err := daemon.reducePsContainer(&containerList[i], ctx, reducer) if err != nil { if err != errStopIteration { return nil, err @@ -206,23 +217,17 @@ func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reduc } // reducePsContainer is the basic representation for a container as expected by the ps command. -func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { - container.Lock() - +func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *listContext, reducer containerReducer) (*types.Container, error) { // filter containers to return - action := includeContainerInList(container, ctx) - switch action { + switch includeContainerInList(container, ctx) { case excludeContainer: - container.Unlock() return nil, nil case stopIteration: - container.Unlock() return nil, errStopIteration } // transform internal container struct into api structs newC, err := reducer(container, ctx) - container.Unlock() if err != nil { return nil, err } @@ -237,7 +242,7 @@ func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *lis } // foldFilter generates the container filter based on the user's filtering options. -func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { +func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerListOptions) (*listContext, error) { psFilters := config.Filters if err := psFilters.Validate(acceptedPsFilterTags); err != nil { @@ -294,10 +299,10 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte return nil, err } - var beforeContFilter, sinceContFilter *container.Container + var beforeContFilter, sinceContFilter *container.Snapshot err = psFilters.WalkValues("before", func(value string) error { - beforeContFilter, err = daemon.GetContainer(value) + beforeContFilter, err = view.Get(value) return err }) if err != nil { @@ -305,7 +310,7 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte } err = psFilters.WalkValues("since", func(value string) error { - sinceContFilter, err = daemon.GetContainer(value) + sinceContFilter, err = view.Get(value) return err }) if err != nil { @@ -317,7 +322,7 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte if psFilters.Include("ancestor") { ancestorFilter = true psFilters.WalkValues("ancestor", func(ancestor string) error { - id, err := daemon.GetImageID(ancestor) + id, platform, err := daemon.GetImageIDAndPlatform(ancestor) if err != nil { logrus.Warnf("Error while looking up for image %v", ancestor) return nil @@ -327,7 +332,7 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte return nil } // Then walk down the graph and put the imageIds in imagesFilter - populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) + populateImageFilterByParents(imagesFilter, id, daemon.stores[platform].imageStore.Children) return nil }) } @@ -383,7 +388,7 @@ func portOp(key string, filter map[nat.Port]bool) func(value string) error { // includeContainerInList decides whether a container should be included in the output or not based in the filter. // It also decides if the iteration should be stopped or not. -func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { +func includeContainerInList(container *container.Snapshot, ctx *listContext) iterationAction { // Do not include container if it's in the list before the filter container. // Set the filter container to nil to include the rest of containers after this one. if ctx.beforeFilter != nil { @@ -422,7 +427,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it } // Do not include container if any of the labels don't match - if !ctx.filters.MatchKVList("label", container.Config.Labels) { + if !ctx.filters.MatchKVList("label", container.Labels) { return excludeContainer } @@ -440,7 +445,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it if len(ctx.exitAllowed) > 0 { shouldSkip := true for _, code := range ctx.exitAllowed { - if code == container.ExitCode() && !container.Running && !container.StartedAt.IsZero() { + if code == container.ExitCode && !container.Running && !container.StartedAt.IsZero() { shouldSkip = false break } @@ -451,28 +456,34 @@ func includeContainerInList(container *container.Container, ctx *listContext) it } // Do not include container if its status doesn't match the filter - if !ctx.filters.Match("status", container.State.StateString()) { + if !ctx.filters.Match("status", container.State) { return excludeContainer } // Do not include container if its health doesn't match the filter - if !ctx.filters.ExactMatch("health", container.State.HealthString()) { + if !ctx.filters.ExactMatch("health", container.Health) { return excludeContainer } if ctx.filters.Include("volume") { - volumesByName := make(map[string]*volume.MountPoint) - for _, m := range container.MountPoints { + volumesByName := make(map[string]types.MountPoint) + for _, m := range container.Mounts { if m.Name != "" { volumesByName[m.Name] = m } else { volumesByName[m.Source] = m } } + volumesByDestination := make(map[string]types.MountPoint) + for _, m := range container.Mounts { + if m.Destination != "" { + volumesByDestination[m.Destination] = m + } + } volumeExist := fmt.Errorf("volume mounted in container") err := ctx.filters.WalkValues("volume", func(value string) error { - if _, exist := container.MountPoints[value]; exist { + if _, exist := volumesByDestination[value]; exist { return volumeExist } if _, exist := volumesByName[value]; exist { @@ -489,19 +500,25 @@ func includeContainerInList(container *container.Container, ctx *listContext) it if len(ctx.images) == 0 { return excludeContainer } - if !ctx.images[container.ImageID] { + if !ctx.images[image.ID(container.ImageID)] { return excludeContainer } } - networkExist := fmt.Errorf("container part of network") + var ( + networkExist = errors.New("container part of network") + noNetworks = errors.New("container is not part of any networks") + ) if ctx.filters.Include("network") { err := ctx.filters.WalkValues("network", func(value string) error { + if container.NetworkSettings == nil { + return noNetworks + } if _, ok := container.NetworkSettings.Networks[value]; ok { return networkExist } for _, nw := range container.NetworkSettings.Networks { - if nw.EndpointSettings == nil { + if nw == nil { continue } if strings.HasPrefix(nw.NetworkID, value) { @@ -518,7 +535,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it if len(ctx.publish) > 0 { shouldSkip := true for port := range ctx.publish { - if _, ok := container.HostConfig.PortBindings[port]; ok { + if _, ok := container.PortBindings[port]; ok { shouldSkip = false break } @@ -531,7 +548,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it if len(ctx.expose) > 0 { shouldSkip := true for port := range ctx.expose { - if _, ok := container.Config.ExposedPorts[port]; ok { + if _, ok := container.ExposedPorts[port]; ok { shouldSkip = false break } @@ -544,106 +561,22 @@ func includeContainerInList(container *container.Container, ctx *listContext) it return includeContainer } -// transformContainer generates the container type expected by the docker ps command. -func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { - newC := &types.Container{ - ID: container.ID, - Names: ctx.names[container.ID], - ImageID: container.ImageID.String(), - } - if newC.Names == nil { - // Dead containers will often have no name, so make sure the response isn't null - newC.Names = []string{} - } - - image := container.Config.Image // if possible keep the original ref - if image != container.ImageID.String() { - id, err := daemon.GetImageID(image) +// refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't +func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) { + c := s.Container + image := s.Image // keep the original ref if still valid (hasn't changed) + if image != s.ImageID { + id, _, err := daemon.GetImageIDAndPlatform(image) if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { return nil, err } - if err != nil || id != container.ImageID { - image = container.ImageID.String() + if err != nil || id.String() != s.ImageID { + // ref changed, we need to use original ID + image = s.ImageID } } - newC.Image = image - - if len(container.Args) > 0 { - args := []string{} - for _, arg := range container.Args { - if strings.Contains(arg, " ") { - args = append(args, fmt.Sprintf("'%s'", arg)) - } else { - args = append(args, arg) - } - } - argsAsString := strings.Join(args, " ") - - newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) - } else { - newC.Command = container.Path - } - newC.Created = container.Created.Unix() - newC.State = container.State.StateString() - newC.Status = container.State.String() - newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) - // copy networks to avoid races - networks := make(map[string]*networktypes.EndpointSettings) - for name, network := range container.NetworkSettings.Networks { - if network == nil || network.EndpointSettings == nil { - continue - } - networks[name] = &networktypes.EndpointSettings{ - EndpointID: network.EndpointID, - Gateway: network.Gateway, - IPAddress: network.IPAddress, - IPPrefixLen: network.IPPrefixLen, - IPv6Gateway: network.IPv6Gateway, - GlobalIPv6Address: network.GlobalIPv6Address, - GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, - MacAddress: network.MacAddress, - NetworkID: network.NetworkID, - } - if network.IPAMConfig != nil { - networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ - IPv4Address: network.IPAMConfig.IPv4Address, - IPv6Address: network.IPAMConfig.IPv6Address, - } - } - } - newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} - - newC.Ports = []types.Port{} - for port, bindings := range container.NetworkSettings.Ports { - p, err := nat.ParsePort(port.Port()) - if err != nil { - return nil, err - } - if len(bindings) == 0 { - newC.Ports = append(newC.Ports, types.Port{ - PrivatePort: uint16(p), - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - h, err := nat.ParsePort(binding.HostPort) - if err != nil { - return nil, err - } - newC.Ports = append(newC.Ports, types.Port{ - PrivatePort: uint16(p), - PublicPort: uint16(h), - Type: port.Proto(), - IP: binding.HostIP, - }) - } - } - - newC.Labels = container.Config.Labels - newC.Mounts = addMountPoints(container) - - return newC, nil + c.Image = image + return &c, nil } // Volumes lists known volumes, using the filter to restrict the range diff --git a/fn/vendor/github.com/docker/docker/daemon/list_unix.go b/fn/vendor/github.com/docker/docker/daemon/list_unix.go index 91c9caccf..ebaae4560 100644 --- a/fn/vendor/github.com/docker/docker/daemon/list_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/list_unix.go @@ -6,6 +6,6 @@ import "github.com/docker/docker/container" // excludeByIsolation is a platform specific helper function to support PS // filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. -func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { +func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction { return includeContainer } diff --git a/fn/vendor/github.com/docker/docker/daemon/list_windows.go b/fn/vendor/github.com/docker/docker/daemon/list_windows.go index 7fbcd3af2..ab563c535 100644 --- a/fn/vendor/github.com/docker/docker/daemon/list_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/list_windows.go @@ -8,7 +8,7 @@ import ( // excludeByIsolation is a platform specific helper function to support PS // filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. -func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { +func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction { i := strings.ToLower(string(container.HostConfig.Isolation)) if i == "" { i = "default" diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/adapter.go b/fn/vendor/github.com/docker/docker/daemon/logger/adapter.go index e6d7598b4..a187b30fd 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/adapter.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/adapter.go @@ -3,6 +3,7 @@ package logger import ( "io" "os" + "strings" "sync" "time" @@ -18,6 +19,7 @@ type pluginAdapter struct { driverName string id string plugin logPlugin + basePath string fifoPath string capabilities Capability logInfo Info @@ -56,7 +58,7 @@ func (a *pluginAdapter) Close() error { a.mu.Lock() defer a.mu.Unlock() - if err := a.plugin.StopLogging(a.fifoPath); err != nil { + if err := a.plugin.StopLogging(strings.TrimPrefix(a.fifoPath, a.basePath)); err != nil { return err } diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/adapter_test.go b/fn/vendor/github.com/docker/docker/daemon/logger/adapter_test.go index 707550e7e..b8c069ffb 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/adapter_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/adapter_test.go @@ -1,17 +1,16 @@ package logger import ( - "bytes" "encoding/binary" "io" "io/ioutil" "os" - "runtime" "testing" "time" "github.com/docker/docker/api/types/plugins/logdriver" protoio "github.com/gogo/protobuf/io" + "github.com/stretchr/testify/assert" ) // mockLoggingPlugin implements the loggingPlugin interface for testing purposes @@ -89,9 +88,8 @@ func (l *mockLoggingPlugin) ReadLogs(info Info, config ReadConfig) (io.ReadClose func newMockPluginAdapter(t *testing.T) Logger { r, w := io.Pipe() f, err := ioutil.TempFile("", "mock-plugin-adapter") - if err != nil { - t.Fatal(err) - } + assert.NoError(t, err) + enc := logdriver.NewLogEntryEncoder(w) a := &pluginAdapterWithRead{ &pluginAdapter{ @@ -118,15 +116,11 @@ func TestAdapterReadLogs(t *testing.T) { } for _, msg := range testMsg { m := msg.copy() - if err := l.Log(m); err != nil { - t.Fatal(err) - } + assert.NoError(t, l.Log(m)) } lr, ok := l.(LogReader) - if !ok { - t.Fatal("expected log reader") - } + assert.NotNil(t, ok) lw := lr.ReadLogs(ReadConfig{}) @@ -134,16 +128,14 @@ func TestAdapterReadLogs(t *testing.T) { select { case msg := <-lw.Msg: testMessageEqual(t, &x, msg) - case <-time.After(10 * time.Millisecond): + case <-time.After(10 * time.Second): t.Fatal("timeout reading logs") } } select { case _, ok := <-lw.Msg: - if ok { - t.Fatal("expected message channel to be closed") - } + assert.False(t, ok, "expected message channel to be closed") case <-time.After(10 * time.Second): t.Fatal("timeout waiting for message channel to close") @@ -161,16 +153,11 @@ func TestAdapterReadLogs(t *testing.T) { } x := Message{Line: []byte("Too infinity and beyond!"), Timestamp: time.Now()} - - if err := l.Log(x.copy()); err != nil { - t.Fatal(err) - } + assert.NoError(t, l.Log(x.copy())) select { case msg, ok := <-lw.Msg: - if !ok { - t.Fatal("message channel unexpectedly closed") - } + assert.NotNil(t, ok, "message channel unexpectedly closed") testMessageEqual(t, &x, msg) case <-time.After(10 * time.Second): t.Fatal("timeout reading logs") @@ -179,30 +166,15 @@ func TestAdapterReadLogs(t *testing.T) { l.Close() select { case msg, ok := <-lw.Msg: - if ok { - t.Fatal("expected message channel to be closed") - } - if msg != nil { - t.Fatal("expected nil message") - } + assert.False(t, ok, "expected message channel to be closed") + assert.Nil(t, msg) case <-time.After(10 * time.Second): t.Fatal("timeout waiting for logger to close") } } func testMessageEqual(t *testing.T, a, b *Message) { - _, _, n, _ := runtime.Caller(1) - errFmt := "line %d: expected same messages:\nwant: %+v\nhave: %+v" - - if !bytes.Equal(a.Line, b.Line) { - t.Fatalf(errFmt, n, *a, *b) - } - - if a.Timestamp.UnixNano() != b.Timestamp.UnixNano() { - t.Fatalf(errFmt, n, *a, *b) - } - - if a.Source != b.Source { - t.Fatalf(errFmt, n, *a, *b) - } + assert.Equal(t, a.Line, b.Line) + assert.Equal(t, a.Timestamp.UnixNano(), b.Timestamp.UnixNano()) + assert.Equal(t, a.Source, b.Source) } diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go b/fn/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go index ba9455e6a..4d98468a7 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go @@ -3,9 +3,9 @@ package awslogs import ( "bytes" - "errors" "fmt" "os" + "regexp" "runtime" "sort" "strconv" @@ -24,6 +24,7 @@ import ( "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/templates" + "github.com/pkg/errors" ) const ( @@ -34,6 +35,8 @@ const ( logStreamKey = "awslogs-stream" logCreateGroupKey = "awslogs-create-group" tagKey = "tag" + datetimeFormatKey = "awslogs-datetime-format" + multilinePatternKey = "awslogs-multiline-pattern" batchPublishFrequency = 5 * time.Second // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html @@ -53,14 +56,15 @@ const ( ) type logStream struct { - logStreamName string - logGroupName string - logCreateGroup bool - client api - messages chan *logger.Message - lock sync.RWMutex - closed bool - sequenceToken *string + logStreamName string + logGroupName string + logCreateGroup bool + multilinePattern *regexp.Regexp + client api + messages chan *logger.Message + lock sync.RWMutex + closed bool + sequenceToken *string } type api interface { @@ -91,7 +95,8 @@ func init() { // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, -// awslogs-group, awslogs-stream, and awslogs-create-group. When available, configuration is +// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern +// and awslogs-datetime-format. When available, configuration is // also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, // AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and // the EC2 Instance Metadata Service. @@ -112,16 +117,23 @@ func New(info logger.Info) (logger.Logger, error) { if info.Config[logStreamKey] != "" { logStreamName = info.Config[logStreamKey] } + + multilinePattern, err := parseMultilineOptions(info) + if err != nil { + return nil, err + } + client, err := newAWSLogsClient(info) if err != nil { return nil, err } containerStream := &logStream{ - logStreamName: logStreamName, - logGroupName: logGroupName, - logCreateGroup: logCreateGroup, - client: client, - messages: make(chan *logger.Message, 4096), + logStreamName: logStreamName, + logGroupName: logGroupName, + logCreateGroup: logCreateGroup, + multilinePattern: multilinePattern, + client: client, + messages: make(chan *logger.Message, 4096), } err = containerStream.create() if err != nil { @@ -132,6 +144,56 @@ func New(info logger.Info) (logger.Logger, error) { return containerStream, nil } +// Parses awslogs-multiline-pattern and awslogs-datetime-format options +// If awslogs-datetime-format is present, convert the format from strftime +// to regexp and return. +// If awslogs-multiline-pattern is present, compile regexp and return +func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { + dateTimeFormat := info.Config[datetimeFormatKey] + multilinePatternKey := info.Config[multilinePatternKey] + // strftime input is parsed into a regular expression + if dateTimeFormat != "" { + // %. matches each strftime format sequence and ReplaceAllStringFunc + // looks up each format sequence in the conversion table strftimeToRegex + // to replace with a defined regular expression + r := regexp.MustCompile("%.") + multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { + return strftimeToRegex[s] + }) + } + if multilinePatternKey != "" { + multilinePattern, err := regexp.Compile(multilinePatternKey) + if err != nil { + return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) + } + return multilinePattern, nil + } + return nil, nil +} + +// Maps strftime format strings to regex +var strftimeToRegex = map[string]string{ + /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, + /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, + /*weekdayZeroIndex */ `%w`: `[0-6]`, + /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, + /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, + /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, + /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, + /*yearCentury */ `%Y`: `\d{4}`, + /*yearZeroPadded */ `%y`: `\d{2}`, + /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, + /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, + /*AM or PM */ `%p`: "[A,P]M", + /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, + /*secondZeroPadded */ `%S`: `[0-5][0-9]`, + /*microsecondZeroPadded */ `%f`: `\d{6}`, + /*utcOffset */ `%z`: `[+-]\d{4}`, + /*tzName */ `%Z`: `[A-Z]{1,4}T`, + /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, + /*milliseconds */ `%L`: `\.\d{3}`, +} + func parseLogGroup(info logger.Info, groupTemplate string) (string, error) { tmpl, err := templates.NewParse("log-group", groupTemplate) if err != nil { @@ -297,60 +359,108 @@ var newTicker = func(freq time.Duration) *time.Ticker { } // collectBatch executes as a goroutine to perform batching of log events for -// submission to the log stream. Batching is performed on time- and size- -// bases. Time-based batching occurs at a 5 second interval (defined in the -// batchPublishFrequency const). Size-based batching is performed on the -// maximum number of events per batch (defined in maximumLogEventsPerPut) and -// the maximum number of total bytes in a batch (defined in -// maximumBytesPerPut). Log messages are split by the maximum bytes per event -// (defined in maximumBytesPerEvent). There is a fixed per-event byte overhead -// (defined in perEventBytes) which is accounted for in split- and batch- -// calculations. +// submission to the log stream. If the awslogs-multiline-pattern or +// awslogs-datetime-format options have been configured, multiline processing +// is enabled, where log messages are stored in an event buffer until a multiline +// pattern match is found, at which point the messages in the event buffer are +// pushed to CloudWatch logs as a single log event. Multiline messages are processed +// according to the maximumBytesPerPut constraint, and the implementation only +// allows for messages to be buffered for a maximum of 2*batchPublishFrequency +// seconds. When events are ready to be processed for submission to CloudWatch +// Logs, the processEvents method is called. If a multiline pattern is not +// configured, log events are submitted to the processEvents method immediately. func (l *logStream) collectBatch() { timer := newTicker(batchPublishFrequency) var events []wrappedEvent - bytes := 0 + var eventBuffer []byte + var eventBufferTimestamp int64 for { select { - case <-timer.C: + case t := <-timer.C: + // If event buffer is older than batch publish frequency flush the event buffer + if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { + eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp + eventBufferExpired := eventBufferAge > int64(batchPublishFrequency)/int64(time.Millisecond) + eventBufferNegative := eventBufferAge < 0 + if eventBufferExpired || eventBufferNegative { + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + } + } l.publishBatch(events) events = events[:0] - bytes = 0 case msg, more := <-l.messages: if !more { + // Flush event buffer + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) l.publishBatch(events) return } - unprocessedLine := msg.Line - for len(unprocessedLine) > 0 { - // Split line length so it does not exceed the maximum - lineBytes := len(unprocessedLine) - if lineBytes > maximumBytesPerEvent { - lineBytes = maximumBytesPerEvent - } - line := unprocessedLine[:lineBytes] - unprocessedLine = unprocessedLine[lineBytes:] - if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { - // Publish an existing batch if it's already over the maximum number of events or if adding this - // event would push it over the maximum number of total bytes. - l.publishBatch(events) - events = events[:0] - bytes = 0 - } - events = append(events, wrappedEvent{ - inputLogEvent: &cloudwatchlogs.InputLogEvent{ - Message: aws.String(string(line)), - Timestamp: aws.Int64(msg.Timestamp.UnixNano() / int64(time.Millisecond)), - }, - insertOrder: len(events), - }) - bytes += (lineBytes + perEventBytes) + if eventBufferTimestamp == 0 { + eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) + } + unprocessedLine := msg.Line + if l.multilinePattern != nil { + if l.multilinePattern.Match(unprocessedLine) { + // This is a new log event so flush the current eventBuffer to events + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) + eventBuffer = eventBuffer[:0] + } + // If we will exceed max bytes per event flush the current event buffer before appending + if len(eventBuffer)+len(unprocessedLine) > maximumBytesPerEvent { + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + eventBuffer = eventBuffer[:0] + } + // Append new line + processedLine := append(unprocessedLine, "\n"...) + eventBuffer = append(eventBuffer, processedLine...) + logger.PutMessage(msg) + } else { + events = l.processEvent(events, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond)) + logger.PutMessage(msg) } - logger.PutMessage(msg) } } } +// processEvent processes log events that are ready for submission to CloudWatch +// logs. Batching is performed on time- and size-bases. Time-based batching +// occurs at a 5 second interval (defined in the batchPublishFrequency const). +// Size-based batching is performed on the maximum number of events per batch +// (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a +// batch (defined in maximumBytesPerPut). Log messages are split by the maximum +// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event +// byte overhead (defined in perEventBytes) which is accounted for in split- and +// batch-calculations. +func (l *logStream) processEvent(events []wrappedEvent, unprocessedLine []byte, timestamp int64) []wrappedEvent { + bytes := 0 + for len(unprocessedLine) > 0 { + // Split line length so it does not exceed the maximum + lineBytes := len(unprocessedLine) + if lineBytes > maximumBytesPerEvent { + lineBytes = maximumBytesPerEvent + } + line := unprocessedLine[:lineBytes] + unprocessedLine = unprocessedLine[lineBytes:] + if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { + // Publish an existing batch if it's already over the maximum number of events or if adding this + // event would push it over the maximum number of total bytes. + l.publishBatch(events) + events = events[:0] + bytes = 0 + } + events = append(events, wrappedEvent{ + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(string(line)), + Timestamp: aws.Int64(timestamp), + }, + insertOrder: len(events), + }) + bytes += (lineBytes + perEventBytes) + } + return events +} + // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). @@ -419,7 +529,8 @@ func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenc } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, -// awslogs-group, awslogs-stream, awslogs-create-group +// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, +// awslogs-multiline-pattern func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { @@ -428,6 +539,8 @@ func ValidateLogOpt(cfg map[string]string) error { case logCreateGroupKey: case regionKey: case tagKey: + case datetimeFormatKey: + case multilinePatternKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } @@ -440,6 +553,11 @@ func ValidateLogOpt(cfg map[string]string) error { return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) } } + _, datetimeFormatKeyExists := cfg[datetimeFormatKey] + _, multilinePatternKeyExists := cfg[multilinePatternKey] + if datetimeFormatKeyExists && multilinePatternKeyExists { + return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) + } return nil } @@ -472,9 +590,9 @@ func (slice byTimestamp) Swap(i, j int) { } func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { - cwEvents := []*cloudwatchlogs.InputLogEvent{} - for _, input := range events { - cwEvents = append(cwEvents, input.inputLogEvent) + cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) + for i, input := range events { + cwEvents[i] = input.inputLogEvent } return cwEvents } diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go b/fn/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go index ac0bb09c3..e3862ffeb 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" "reflect" + "regexp" "runtime" "strings" "testing" @@ -17,6 +18,7 @@ import ( "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" + "github.com/stretchr/testify/assert" ) const ( @@ -24,9 +26,26 @@ const ( streamName = "streamName" sequenceToken = "sequenceToken" nextSequenceToken = "nextSequenceToken" - logline = "this is a log line" + logline = "this is a log line\r" + multilineLogline = "2017-01-01 01:01:44 This is a multiline log entry\r" ) +// Generates i multi-line events each with j lines +func (l *logStream) logGenerator(lineCount int, multilineCount int) { + for i := 0; i < multilineCount; i++ { + l.Log(&logger.Message{ + Line: []byte(multilineLogline), + Timestamp: time.Time{}, + }) + for j := 0; j < lineCount; j++ { + l.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + } + } +} + func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { info := logger.Info{ Config: map[string]string{ @@ -471,6 +490,216 @@ func TestCollectBatchTicker(t *testing.T) { } +func TestCollectBatchMultilinePattern(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + stream.Log(&logger.Message{ + Line: []byte("xxxx " + logline), + Timestamp: time.Now(), + }) + + ticks <- time.Now() + + // Verify single multiline event + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() + + // Verify single event + argument = <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, "xxxx "+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") +} + +func BenchmarkCollectBatch(b *testing.B) { + for i := 0; i < b.N; i++ { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + stream.logGenerator(10, 100) + ticks <- time.Time{} + stream.Close() + } +} + +func BenchmarkCollectBatchMultilinePattern(b *testing.B) { + for i := 0; i < b.N; i++ { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile(`\d{4}-(?:0[1-9]|1[0-2])-(?:0[1-9]|[1,2][0-9]|3[0,1]) (?:[0,1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]`) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + go stream.collectBatch() + stream.logGenerator(10, 100) + ticks <- time.Time{} + stream.Close() + } +} + +func TestCollectBatchMultilinePatternMaxEventAge(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker batchPublishFrequency seconds later + ticks <- time.Now().Add(batchPublishFrequency * time.Second) + + // Verify single multiline event is flushed after maximum event buffer age (batchPublishFrequency) + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() +} + +func TestCollectBatchMultilinePatternNegativeEventAge(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker in past to simulate negative event buffer age + ticks <- time.Now().Add(-time.Second) + + // Verify single multiline event is flushed with a negative event buffer age + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() +} + func TestCollectBatchClose(t *testing.T) { mockClient := newMockClient() stream := &logStream{ @@ -724,6 +953,58 @@ func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { } } +func TestParseLogOptionsMultilinePattern(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + multilinePatternKey: "^xxxx", + }, + } + + multilinePattern, err := parseMultilineOptions(info) + assert.Nil(t, err, "Received unexpected error") + assert.True(t, multilinePattern.MatchString("xxxx"), "No multiline pattern match found") +} + +func TestParseLogOptionsDatetimeFormat(t *testing.T) { + datetimeFormatTests := []struct { + format string + match string + }{ + {"%d/%m/%y %a %H:%M:%S%L %Z", "31/12/10 Mon 08:42:44.345 NZDT"}, + {"%Y-%m-%d %A %I:%M:%S.%f%p%z", "2007-12-04 Monday 08:42:44.123456AM+1200"}, + {"%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b", "Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec"}, + {"%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B", "January|February|March|April|May|June|July|August|September|October|November|December"}, + {"%A|%A|%A|%A|%A|%A|%A", "Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday"}, + {"%a|%a|%a|%a|%a|%a|%a", "Mon|Tue|Wed|Thu|Fri|Sat|Sun"}, + {"Day of the week: %w, Day of the year: %j", "Day of the week: 4, Day of the year: 091"}, + } + for _, dt := range datetimeFormatTests { + t.Run(dt.match, func(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + datetimeFormatKey: dt.format, + }, + } + multilinePattern, err := parseMultilineOptions(info) + assert.Nil(t, err, "Received unexpected error") + assert.True(t, multilinePattern.MatchString(dt.match), "No multiline pattern match found") + }) + } +} + +func TestValidateLogOptionsDatetimeFormatAndMultilinePattern(t *testing.T) { + cfg := map[string]string{ + multilinePatternKey: "^xxxx", + datetimeFormatKey: "%Y-%m-%d", + logGroupKey: groupName, + } + conflictingLogOptionsError := "you cannot configure log opt 'awslogs-datetime-format' and 'awslogs-multiline-pattern' at the same time" + + err := ValidateLogOpt(cfg) + assert.NotNil(t, err, "Expected an error") + assert.Equal(t, err.Error(), conflictingLogOptionsError, "Received invalid error") +} + func TestCreateTagSuccess(t *testing.T) { mockClient := newMockClient() info := logger.Info{ @@ -753,3 +1034,20 @@ func TestCreateTagSuccess(t *testing.T) { t.Errorf("Expected LogStreamName to be %s", "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890") } } + +func BenchmarkUnwrapEvents(b *testing.B) { + events := make([]wrappedEvent, maximumLogEventsPerPut) + for i := 0; i < maximumLogEventsPerPut; i++ { + mes := strings.Repeat("0", maximumBytesPerEvent) + events[i].inputLogEvent = &cloudwatchlogs.InputLogEvent{ + Message: &mes, + } + } + + as := assert.New(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + res := unwrapEvents(events) + as.Len(res, maximumLogEventsPerPut) + } +} diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/factory.go b/fn/vendor/github.com/docker/docker/daemon/logger/factory.go index 32d51effa..32001590d 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/factory.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/factory.go @@ -2,6 +2,7 @@ package logger import ( "fmt" + "sort" "sync" containertypes "github.com/docker/docker/api/types/container" @@ -23,6 +24,22 @@ type logdriverFactory struct { m sync.Mutex } +func (lf *logdriverFactory) list() []string { + ls := make([]string, 0, len(lf.registry)) + lf.m.Lock() + for name := range lf.registry { + ls = append(ls, name) + } + lf.m.Unlock() + sort.Strings(ls) + return ls +} + +// ListDrivers gets the list of registered log driver names +func ListDrivers() []string { + return factory.list() +} + func (lf *logdriverFactory) register(name string, c Creator) error { if lf.driverRegistered(name) { return fmt.Errorf("logger: log driver named '%s' is already registered", name) diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go b/fn/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go index 7d6475bf4..a33566ae1 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go @@ -12,6 +12,7 @@ import ( "cloud.google.com/go/logging" "github.com/Sirupsen/logrus" "golang.org/x/net/context" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" ) const ( @@ -60,7 +61,7 @@ type gcplogs struct { type dockerLogEntry struct { Instance *instanceInfo `json:"instance,omitempty"` Container *containerInfo `json:"container,omitempty"` - Data string `json:"data,omitempty"` + Message string `json:"message,omitempty"` } type instanceInfo struct { @@ -128,7 +129,35 @@ func New(info logger.Info) (logger.Logger, error) { if err != nil { return nil, err } - lg := c.Logger("gcplogs-docker-driver") + var instanceResource *instanceInfo + if onGCE { + instanceResource = &instanceInfo{ + Zone: zone, + Name: instanceName, + ID: instanceID, + } + } else if info.Config[logZoneKey] != "" || info.Config[logNameKey] != "" || info.Config[logIDKey] != "" { + instanceResource = &instanceInfo{ + Zone: info.Config[logZoneKey], + Name: info.Config[logNameKey], + ID: info.Config[logIDKey], + } + } + + options := []logging.LoggerOption{} + if instanceResource != nil { + vmMrpb := logging.CommonResource( + &mrpb.MonitoredResource{ + Type: "gce_instance", + Labels: map[string]string{ + "instance_id": instanceResource.ID, + "zone": instanceResource.Zone, + }, + }, + ) + options = []logging.LoggerOption{vmMrpb} + } + lg := c.Logger("gcplogs-docker-driver", options...) if err := c.Ping(context.Background()); err != nil { return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) @@ -155,18 +184,8 @@ func New(info logger.Info) (logger.Logger, error) { l.container.Command = info.Command() } - if onGCE { - l.instance = &instanceInfo{ - Zone: zone, - Name: instanceName, - ID: instanceID, - } - } else if info.Config[logZoneKey] != "" || info.Config[logNameKey] != "" || info.Config[logIDKey] != "" { - l.instance = &instanceInfo{ - Zone: info.Config[logZoneKey], - Name: info.Config[logNameKey], - ID: info.Config[logIDKey], - } + if instanceResource != nil { + l.instance = instanceResource } // The logger "overflows" at a rate of 10,000 logs per second and this @@ -200,7 +219,7 @@ func ValidateLogOpts(cfg map[string]string) error { } func (l *gcplogs) Log(m *logger.Message) error { - data := string(m.Line) + message := string(m.Line) ts := m.Timestamp logger.PutMessage(m) @@ -209,7 +228,7 @@ func (l *gcplogs) Log(m *logger.Message) error { Payload: &dockerLogEntry{ Instance: l.instance, Container: l.container, - Data: data, + Message: message, }, }) return nil diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/journald/journald.go b/fn/vendor/github.com/docker/docker/daemon/logger/journald/journald.go index 04ae84b6d..86d7378b5 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/journald/journald.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/journald/journald.go @@ -18,12 +18,13 @@ import ( const name = "journald" type journald struct { + mu sync.Mutex vars map[string]string // additional variables and values to send to the journal along with the log message readers readerList + closed bool } type readerList struct { - mu sync.Mutex readers map[*logger.LogWatcher]*logger.LogWatcher } @@ -111,9 +112,10 @@ func (s *journald) Log(msg *logger.Message) error { } line := string(msg.Line) + source := msg.Source logger.PutMessage(msg) - if msg.Source == "stderr" { + if source == "stderr" { return journal.Send(line, journal.PriErr, vars) } return journal.Send(line, journal.PriInfo, vars) diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/journald/read.go b/fn/vendor/github.com/docker/docker/daemon/logger/journald/read.go index 9b896e0dc..9ecc3b521 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/journald/read.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/journald/read.go @@ -161,11 +161,12 @@ import ( ) func (s *journald) Close() error { - s.readers.mu.Lock() + s.mu.Lock() + s.closed = true for reader := range s.readers.readers { reader.Close() } - s.readers.mu.Unlock() + s.mu.Unlock() return nil } @@ -245,9 +246,16 @@ drain: } func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor *C.char) *C.char { - s.readers.mu.Lock() + s.mu.Lock() s.readers.readers[logWatcher] = logWatcher - s.readers.mu.Unlock() + if s.closed { + // the journald Logger is closed, presumably because the container has been + // reset. So we shouldn't follow, because we'll never be woken up. But we + // should make one more drainJournal call to be sure we've got all the logs. + // Close pfd[1] so that one drainJournal happens, then cleanup, then return. + C.close(pfd[1]) + } + s.mu.Unlock() newCursor := make(chan *C.char) @@ -274,22 +282,22 @@ func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.Re // Clean up. C.close(pfd[0]) - s.readers.mu.Lock() + s.mu.Lock() delete(s.readers.readers, logWatcher) - s.readers.mu.Unlock() + s.mu.Unlock() close(logWatcher.Msg) newCursor <- cursor }() // Wait until we're told to stop. select { + case cursor = <-newCursor: case <-logWatcher.WatchClose(): // Notify the other goroutine that its work is done. C.close(pfd[1]) + cursor = <-newCursor } - cursor = <-newCursor - return cursor } diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go index 5ad701a0d..e8df0ecbd 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -7,6 +7,7 @@ import ( "bytes" "encoding/json" "fmt" + "io" "strconv" "sync" @@ -15,6 +16,7 @@ import ( "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/jsonlog" "github.com/docker/go-units" + "github.com/pkg/errors" ) // Name is the name of the file that the jsonlogger logs to. @@ -22,11 +24,13 @@ const Name = "json-file" // JSONFileLogger is Logger implementation for default Docker logging. type JSONFileLogger struct { - buf *bytes.Buffer + extra []byte // json-encoded extra attributes + + mu sync.RWMutex + buf *bytes.Buffer // avoids allocating a new buffer on each call to `Log()` + closed bool writer *loggerutils.RotateFileWriter - mu sync.Mutex readers map[*logger.LogWatcher]struct{} // stores the active log followers - extra []byte // json-encoded extra attributes } func init() { @@ -89,33 +93,45 @@ func New(info logger.Info) (logger.Logger, error) { // Log converts logger.Message to jsonlog.JSONLog and serializes it to file. func (l *JSONFileLogger) Log(msg *logger.Message) error { + l.mu.Lock() + err := writeMessageBuf(l.writer, msg, l.extra, l.buf) + l.buf.Reset() + l.mu.Unlock() + return err +} + +func writeMessageBuf(w io.Writer, m *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error { + if err := marshalMessage(m, extra, buf); err != nil { + logger.PutMessage(m) + return err + } + logger.PutMessage(m) + if _, err := w.Write(buf.Bytes()); err != nil { + return errors.Wrap(err, "error writing log entry") + } + return nil +} + +func marshalMessage(msg *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error { timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) if err != nil { return err } - l.mu.Lock() - logline := msg.Line + logLine := msg.Line if !msg.Partial { - logline = append(msg.Line, '\n') + logLine = append(msg.Line, '\n') } err = (&jsonlog.JSONLogs{ - Log: logline, + Log: logLine, Stream: msg.Source, Created: timestamp, - RawAttrs: l.extra, - }).MarshalJSONBuf(l.buf) - logger.PutMessage(msg) + RawAttrs: extra, + }).MarshalJSONBuf(buf) if err != nil { - l.mu.Unlock() - return err + return errors.Wrap(err, "error writing log message to buffer") } - - l.buf.WriteByte('\n') - _, err = l.writer.Write(l.buf.Bytes()) - l.buf.Reset() - l.mu.Unlock() - - return err + err = buf.WriteByte('\n') + return errors.Wrap(err, "error finalizing log buffer") } // ValidateLogOpt looks for json specific log options max-file & max-size. @@ -142,6 +158,7 @@ func (l *JSONFileLogger) LogPath() string { // Close closes underlying file and signals all readers to stop. func (l *JSONFileLogger) Close() error { l.mu.Lock() + l.closed = true err := l.writer.Close() for r := range l.readers { r.Close() diff --git a/fn/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/multireader/multireader.go similarity index 96% rename from fn/vendor/github.com/docker/docker/pkg/ioutils/multireader.go rename to fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/multireader/multireader.go index edb043ddc..1993f1d76 100644 --- a/fn/vendor/github.com/docker/docker/pkg/ioutils/multireader.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/multireader/multireader.go @@ -1,4 +1,4 @@ -package ioutils +package multireader import ( "bytes" @@ -46,7 +46,9 @@ func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { rdrOffset := offset - tmpOffset idx := i - rdr.Seek(rdrOffset, os.SEEK_SET) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } // make sure all following readers are at 0 for _, rdr := range r.readers[i+1:] { rdr.Seek(0, os.SEEK_SET) @@ -67,7 +69,9 @@ func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { } tmpOffset += s } - r.Seek(tmpOffset+offset, os.SEEK_SET) + if _, err := r.Seek(tmpOffset+offset, os.SEEK_SET); err != nil { + return -1, err + } return tmpOffset + offset, nil case os.SEEK_CUR: if r.pos == nil { diff --git a/fn/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go b/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/multireader/multireader_test.go similarity index 99% rename from fn/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go rename to fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/multireader/multireader_test.go index 060473989..bd59b78ca 100644 --- a/fn/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/multireader/multireader_test.go @@ -1,4 +1,4 @@ -package ioutils +package multireader import ( "bytes" diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go index 30d533fc1..3fe596724 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -3,7 +3,6 @@ package jsonfilelog import ( "bytes" "encoding/json" - "errors" "fmt" "io" "os" @@ -14,10 +13,11 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog/multireader" "github.com/docker/docker/pkg/filenotify" - "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/tailfile" + "github.com/pkg/errors" ) const maxJSONDecodeRetry = 20000 @@ -48,10 +48,11 @@ func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { defer close(logWatcher.Msg) - // lock so the read stream doesn't get corrupted due to rotations or other log data written while we read + // lock so the read stream doesn't get corrupted due to rotations or other log data written while we open these files // This will block writes!!! - l.mu.Lock() + l.mu.RLock() + // TODO it would be nice to move a lot of this reader implementation to the rotate logger object pth := l.writer.LogPath() var files []io.ReadSeeker for i := l.writer.MaxFiles(); i > 1; i-- { @@ -59,25 +60,36 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R if err != nil { if !os.IsNotExist(err) { logWatcher.Err <- err - break + l.mu.RUnlock() + return } continue } defer f.Close() - files = append(files, f) } latestFile, err := os.Open(pth) if err != nil { - logWatcher.Err <- err - l.mu.Unlock() + logWatcher.Err <- errors.Wrap(err, "error opening latest log file") + l.mu.RUnlock() return } defer latestFile.Close() + latestChunk, err := newSectionReader(latestFile) + + // Now we have the reader sectioned, all fd's opened, we can unlock. + // New writes/rotates will not affect seeking through these files + l.mu.RUnlock() + + if err != nil { + logWatcher.Err <- err + return + } + if config.Tail != 0 { - tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) + tailer := multireader.MultiReadSeeker(append(files, latestChunk)...) tailFile(tailer, logWatcher, config.Tail, config.Since) } @@ -88,29 +100,32 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R } } - if !config.Follow { - if err := latestFile.Close(); err != nil { - logrus.Errorf("Error closing file: %v", err) - } - l.mu.Unlock() + if !config.Follow || l.closed { return } - if config.Tail >= 0 { - latestFile.Seek(0, os.SEEK_END) - } + notifyRotate := l.writer.NotifyRotate() + defer l.writer.NotifyRotateEvict(notifyRotate) + l.mu.Lock() l.readers[logWatcher] = struct{}{} l.mu.Unlock() - notifyRotate := l.writer.NotifyRotate() followLogs(latestFile, logWatcher, notifyRotate, config.Since) l.mu.Lock() delete(l.readers, logWatcher) l.mu.Unlock() +} - l.writer.NotifyRotateEvict(notifyRotate) +func newSectionReader(f *os.File) (*io.SectionReader, error) { + // seek to the end to get the size + // we'll leave this at the end of the file since section reader does not advance the reader + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, errors.Wrap(err, "error getting current file size") + } + return io.NewSectionReader(f, 0, size), nil } func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go b/fn/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go index 99e0964ae..457a39b5a 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go @@ -1,6 +1,7 @@ package loggerutils import ( + "errors" "os" "strconv" "sync" @@ -11,6 +12,7 @@ import ( // RotateFileWriter is Logger implementation for default Docker logging. type RotateFileWriter struct { f *os.File // store for closing + closed bool mu sync.Mutex capacity int64 //maximum size of each file currentSize int64 // current size of the latest file @@ -42,6 +44,10 @@ func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateF //WriteLog write log message to File func (w *RotateFileWriter) Write(message []byte) (int, error) { w.mu.Lock() + if w.closed { + w.mu.Unlock() + return -1, errors.New("cannot write because the output file was closed") + } if err := w.checkCapacityAndRotate(); err != nil { w.mu.Unlock() return -1, err @@ -68,7 +74,7 @@ func (w *RotateFileWriter) checkCapacityAndRotate() error { if err := rotate(name, w.maxFiles); err != nil { return err } - file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0640) if err != nil { return err } @@ -100,6 +106,8 @@ func rotate(name string, maxFiles int) error { // LogPath returns the location the given writer logs to. func (w *RotateFileWriter) LogPath() string { + w.mu.Lock() + defer w.mu.Unlock() return w.f.Name() } @@ -120,5 +128,14 @@ func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { // Close closes underlying file and signals all readers to stop. func (w *RotateFileWriter) Close() error { - return w.f.Close() + w.mu.Lock() + defer w.mu.Unlock() + if w.closed { + return nil + } + if err := w.f.Close(); err != nil { + return err + } + w.closed = true + return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/plugin.go b/fn/vendor/github.com/docker/docker/daemon/logger/plugin.go index de618c5ae..bdccea5b2 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/plugin.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/plugin.go @@ -59,6 +59,7 @@ func makePluginCreator(name string, l *logPluginProxy, basePath string) Creator driverName: name, id: id, plugin: l, + basePath: basePath, fifoPath: filepath.Join(root, id), logInfo: logCtx, } diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/ring.go b/fn/vendor/github.com/docker/docker/daemon/logger/ring.go index 90769d71e..5c5595547 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/ring.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/ring.go @@ -121,7 +121,7 @@ func (r *RingLogger) run() { type messageRing struct { mu sync.Mutex - // singals callers of `Dequeue` to wake up either on `Close` or when a new `Message` is added + // signals callers of `Dequeue` to wake up either on `Close` or when a new `Message` is added wait *sync.Cond sizeBytes int64 // current buffer size diff --git a/fn/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go b/fn/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go index 925473aee..42855e117 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go +++ b/fn/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go @@ -133,8 +133,9 @@ func New(info logger.Info) (logger.Logger, error) { func (s *syslogger) Log(msg *logger.Message) error { line := string(msg.Line) + source := msg.Source logger.PutMessage(msg) - if msg.Source == "stderr" { + if source == "stderr" { return s.writer.Err(line) } return s.writer.Info(line) diff --git a/fn/vendor/github.com/docker/docker/daemon/logs.go b/fn/vendor/github.com/docker/docker/daemon/logs.go index b207fb693..96e1b8a49 100644 --- a/fn/vendor/github.com/docker/docker/daemon/logs.go +++ b/fn/vendor/github.com/docker/docker/daemon/logs.go @@ -45,17 +45,24 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c return nil, logger.ErrReadLogsNotSupported } - cLog, err := daemon.getLogger(container) + cLog, cLogCreated, err := daemon.getLogger(container) if err != nil { return nil, err } + if cLogCreated { + defer func() { + if err = cLog.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + }() + } logReader, ok := cLog.(logger.LogReader) if !ok { return nil, logger.ErrReadLogsNotSupported } - follow := config.Follow && container.IsRunning() + follow := config.Follow && !cLogCreated tailLines, err := strconv.Atoi(config.Tail) if err != nil { tailLines = -1 @@ -85,23 +92,8 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c messageChan := make(chan *backend.LogMessage, 1) go func() { // set up some defers - defer func() { - // ok so this function, originally, was placed right after that - // logger.ReadLogs call above. I THINK that means it sets off the - // chain of events that results in the logger needing to be closed. - // i do not know if an error in time parsing above causing an early - // return will result in leaking the logger. if that is the case, - // it would also have been a bug in the original code - logs.Close() - if cLog != container.LogDriver { - // Since the logger isn't cached in the container, which - // occurs if it is running, it must get explicitly closed - // here to avoid leaking it and any file handles it has. - if err := cLog.Close(); err != nil { - logrus.Errorf("Error closing logger: %v", err) - } - } - }() + defer logs.Close() + // close the messages channel. closing is the only way to signal above // that we're doing with logs (other than context cancel i guess). defer close(messageChan) @@ -148,11 +140,17 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c return messageChan, nil } -func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { - if container.LogDriver != nil && container.IsRunning() { - return container.LogDriver, nil +func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) { + container.Lock() + if container.State.Running { + l = container.LogDriver } - return container.StartLogger() + container.Unlock() + if l == nil { + created = true + l, err = container.StartLogger() + } + return } // mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. diff --git a/fn/vendor/github.com/docker/docker/daemon/metrics.go b/fn/vendor/github.com/docker/docker/daemon/metrics.go index 69dbfd937..bf9e49d04 100644 --- a/fn/vendor/github.com/docker/docker/daemon/metrics.go +++ b/fn/vendor/github.com/docker/docker/daemon/metrics.go @@ -1,16 +1,31 @@ package daemon -import "github.com/docker/go-metrics" +import ( + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/go-metrics" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" +) + +const metricsPluginType = "MetricsCollector" var ( containerActions metrics.LabeledTimer + containerStates metrics.LabeledGauge imageActions metrics.LabeledTimer networkActions metrics.LabeledTimer - engineVersion metrics.LabeledGauge + engineInfo metrics.LabeledGauge engineCpus metrics.Gauge engineMemory metrics.Gauge healthChecksCounter metrics.Counter healthChecksFailedCounter metrics.Counter + + stateCtr *stateCounter ) func init() { @@ -25,18 +40,135 @@ func init() { } { containerActions.WithValues(a).Update(0) } + networkActions = ns.NewLabeledTimer("network_actions", "The number of seconds it takes to process each network action", "action") - engineVersion = ns.NewLabeledGauge("engine", "The version and commit information for the engine process", metrics.Unit("info"), + engineInfo = ns.NewLabeledGauge("engine", "The information related to the engine and the OS it is running on", metrics.Unit("info"), "version", "commit", "architecture", - "graph_driver", "kernel", - "os", + "graphdriver", + "kernel", "os", + "os_type", + "daemon_id", // ID is a randomly generated unique identifier (e.g. UUID4) ) engineCpus = ns.NewGauge("engine_cpus", "The number of cpus that the host system of the engine has", metrics.Unit("cpus")) engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes) healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks") healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks") imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") + + stateCtr = newStateCounter(ns.NewDesc("container_states", "The count of containers in various states", metrics.Unit("containers"), "state")) + ns.Add(stateCtr) + metrics.Register(ns) } + +type stateCounter struct { + mu sync.Mutex + states map[string]string + desc *prometheus.Desc +} + +func newStateCounter(desc *prometheus.Desc) *stateCounter { + return &stateCounter{ + states: make(map[string]string), + desc: desc, + } +} + +func (ctr *stateCounter) get() (running int, paused int, stopped int) { + ctr.mu.Lock() + defer ctr.mu.Unlock() + + states := map[string]int{ + "running": 0, + "paused": 0, + "stopped": 0, + } + for _, state := range ctr.states { + states[state]++ + } + return states["running"], states["paused"], states["stopped"] +} + +func (ctr *stateCounter) set(id, label string) { + ctr.mu.Lock() + ctr.states[id] = label + ctr.mu.Unlock() +} + +func (ctr *stateCounter) del(id string) { + ctr.mu.Lock() + delete(ctr.states, id) + ctr.mu.Unlock() +} + +func (ctr *stateCounter) Describe(ch chan<- *prometheus.Desc) { + ch <- ctr.desc +} + +func (ctr *stateCounter) Collect(ch chan<- prometheus.Metric) { + running, paused, stopped := ctr.get() + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(running), "running") + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(paused), "paused") + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(stopped), "stopped") +} + +func (d *Daemon) cleanupMetricsPlugins() { + ls := d.PluginStore.GetAllManagedPluginsByCap(metricsPluginType) + var wg sync.WaitGroup + wg.Add(len(ls)) + + for _, p := range ls { + go func() { + defer wg.Done() + pluginStopMetricsCollection(p) + }() + } + wg.Wait() + + if d.metricsPluginListener != nil { + d.metricsPluginListener.Close() + } +} + +type metricsPlugin struct { + plugingetter.CompatPlugin +} + +func (p metricsPlugin) sock() string { + return "metrics.sock" +} + +func (p metricsPlugin) sockBase() string { + return filepath.Join(p.BasePath(), "run", "docker") +} + +func pluginStartMetricsCollection(p plugingetter.CompatPlugin) error { + type metricsPluginResponse struct { + Err string + } + var res metricsPluginResponse + if err := p.Client().Call(metricsPluginType+".StartMetrics", nil, &res); err != nil { + return errors.Wrap(err, "could not start metrics plugin") + } + if res.Err != "" { + return errors.New(res.Err) + } + return nil +} + +func pluginStopMetricsCollection(p plugingetter.CompatPlugin) { + if err := p.Client().Call(metricsPluginType+".StopMetrics", nil, nil); err != nil { + logrus.WithError(err).WithField("name", p.Name()).Error("error stopping metrics collector") + } + + mp := metricsPlugin{p} + sockPath := filepath.Join(mp.sockBase(), mp.sock()) + if err := mount.Unmount(sockPath); err != nil { + if mounted, _ := mount.Mounted(sockPath); mounted { + logrus.WithError(err).WithField("name", p.Name()).WithField("socket", sockPath).Error("error unmounting metrics socket for plugin") + } + } + return +} diff --git a/fn/vendor/github.com/docker/docker/daemon/metrics_unix.go b/fn/vendor/github.com/docker/docker/daemon/metrics_unix.go new file mode 100644 index 000000000..cda7355e8 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/metrics_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package daemon + +import ( + "net" + "net/http" + "os" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + metrics "github.com/docker/go-metrics" + "github.com/pkg/errors" +) + +func (daemon *Daemon) listenMetricsSock() (string, error) { + path := filepath.Join(daemon.configStore.ExecRoot, "metrics.sock") + syscall.Unlink(path) + l, err := net.Listen("unix", path) + if err != nil { + return "", errors.Wrap(err, "error setting up metrics plugin listener") + } + + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + http.Serve(l, mux) + }() + daemon.metricsPluginListener = l + return path, nil +} + +func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) { + getter.Handle(metricsPluginType, func(name string, client *plugins.Client) { + // Use lookup since nothing in the system can really reference it, no need + // to protect against removal + p, err := getter.Get(name, metricsPluginType, plugingetter.Lookup) + if err != nil { + return + } + + mp := metricsPlugin{p} + sockBase := mp.sockBase() + if err := os.MkdirAll(sockBase, 0755); err != nil { + logrus.WithError(err).WithField("name", name).WithField("path", sockBase).Error("error creating metrics plugin base path") + return + } + + defer func() { + if err != nil { + os.RemoveAll(sockBase) + } + }() + + pluginSockPath := filepath.Join(sockBase, mp.sock()) + _, err = os.Stat(pluginSockPath) + if err == nil { + mount.Unmount(pluginSockPath) + } else { + logrus.WithField("path", pluginSockPath).Debugf("creating plugin socket") + f, err := os.OpenFile(pluginSockPath, os.O_CREATE, 0600) + if err != nil { + return + } + f.Close() + } + + if err := mount.Mount(sockPath, pluginSockPath, "none", "bind,ro"); err != nil { + logrus.WithError(err).WithField("name", name).Error("could not mount metrics socket to plugin") + return + } + + if err := pluginStartMetricsCollection(p); err != nil { + if err := mount.Unmount(pluginSockPath); err != nil { + if mounted, _ := mount.Mounted(pluginSockPath); mounted { + logrus.WithError(err).WithField("sock_path", pluginSockPath).Error("error unmounting metrics socket from plugin during cleanup") + } + } + logrus.WithError(err).WithField("name", name).Error("error while initializing metrics plugin") + } + }) +} diff --git a/fn/vendor/github.com/docker/docker/daemon/metrics_unsupported.go b/fn/vendor/github.com/docker/docker/daemon/metrics_unsupported.go new file mode 100644 index 000000000..64dc1817a --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/metrics_unsupported.go @@ -0,0 +1,12 @@ +// +build windows + +package daemon + +import "github.com/docker/docker/pkg/plugingetter" + +func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) { +} + +func (daemon *Daemon) listenMetricsSock() (string, error) { + return "", nil +} diff --git a/fn/vendor/github.com/docker/docker/daemon/monitor.go b/fn/vendor/github.com/docker/docker/daemon/monitor.go index 9227525e7..5156d9a8e 100644 --- a/fn/vendor/github.com/docker/docker/daemon/monitor.go +++ b/fn/vendor/github.com/docker/docker/daemon/monitor.go @@ -9,10 +9,22 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" + "github.com/docker/docker/container" "github.com/docker/docker/libcontainerd" "github.com/docker/docker/restartmanager" ) +func (daemon *Daemon) setStateCounter(c *container.Container) { + switch c.StateString() { + case "paused": + stateCtr.set(c.ID, "paused") + case "running": + stateCtr.set(c.ID, "running") + default: + stateCtr.set(c.ID, "stopped") + } +} + // StateChanged updates daemon state changes from containerd func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { c := daemon.containers.Get(id) @@ -27,31 +39,24 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") } daemon.updateHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } daemon.LogContainerEvent(c, "oom") case libcontainerd.StateExit: - // if container's AutoRemove flag is set, remove it after clean up - autoRemove := func() { - c.Lock() - ar := c.HostConfig.AutoRemove - c.Unlock() - if ar { - if err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { - logrus.Errorf("can't remove container %s: %v", c.ID, err) - } - } - } c.Lock() c.StreamConfig.Wait() c.Reset(false) - restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, c.HasBeenManuallyStopped, time.Since(c.StartedAt)) + // If daemon is being shutdown, don't let the container restart + restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, daemon.IsShuttingDown() || c.HasBeenManuallyStopped, time.Since(c.StartedAt)) if err == nil && restart { c.RestartCount++ c.SetRestarting(platformConstructExitStatus(e)) } else { c.SetStopped(platformConstructExitStatus(e)) - defer autoRemove() + defer daemon.autoRemove(c) } // cancel healthcheck here, they will be automatically @@ -67,13 +72,17 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { go func() { err := <-wait if err == nil { + // daemon.netController is initialized when daemon is restoring containers. + // But containerStart will use daemon.netController segment. + // So to avoid panic at startup process, here must wait util daemon restore done. + daemon.waitForStartupDone() if err = daemon.containerStart(c, "", "", false); err != nil { logrus.Debugf("failed to restart container: %+v", err) } } if err != nil { c.SetStopped(platformConstructExitStatus(e)) - defer autoRemove() + defer daemon.autoRemove(c) if err != restartmanager.ErrRestartCanceled { logrus.Errorf("restartmanger wait error: %+v", err) } @@ -81,8 +90,10 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { }() } + daemon.setStateCounter(c) + defer c.Unlock() - if err := c.ToDisk(); err != nil { + if err := c.CheckpointTo(daemon.containersReplica); err != nil { return err } return daemon.postRunProcessing(c, e) @@ -109,29 +120,54 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) c.HasBeenManuallyStopped = false c.HasBeenStartedBefore = true - if err := c.ToDisk(); err != nil { + daemon.setStateCounter(c) + + daemon.initHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { c.Reset(false) return err } - daemon.initHealthMonitor(c) + daemon.LogContainerEvent(c, "start") case libcontainerd.StatePause: // Container is already locked in this case c.Paused = true - if err := c.ToDisk(); err != nil { + daemon.setStateCounter(c) + daemon.updateHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { return err } - daemon.updateHealthMonitor(c) daemon.LogContainerEvent(c, "pause") case libcontainerd.StateResume: // Container is already locked in this case c.Paused = false - if err := c.ToDisk(); err != nil { + daemon.setStateCounter(c) + daemon.updateHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { return err } - daemon.updateHealthMonitor(c) daemon.LogContainerEvent(c, "unpause") } - return nil } + +func (daemon *Daemon) autoRemove(c *container.Container) { + c.Lock() + ar := c.HostConfig.AutoRemove + c.Unlock() + if !ar { + return + } + + var err error + if err = daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err == nil { + return + } + if c := daemon.containers.Get(c.ID); c == nil { + return + } + + if err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error removing container") + } +} diff --git a/fn/vendor/github.com/docker/docker/daemon/mounts.go b/fn/vendor/github.com/docker/docker/daemon/mounts.go index 1c11f86a8..35c6ed59a 100644 --- a/fn/vendor/github.com/docker/docker/daemon/mounts.go +++ b/fn/vendor/github.com/docker/docker/daemon/mounts.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + mounttypes "github.com/docker/docker/api/types/mount" "github.com/docker/docker/container" volumestore "github.com/docker/docker/volume/store" ) @@ -20,27 +21,31 @@ func (daemon *Daemon) prepareMountPoints(container *container.Container) error { func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { var rmErrors []string for _, m := range container.MountPoints { - if m.Volume == nil { + if m.Type != mounttypes.TypeVolume || m.Volume == nil { continue } daemon.volumes.Dereference(m.Volume, container.ID) - if rm { - // Do not remove named mountpoints - // these are mountpoints specified like `docker run -v :/foo` - if m.Spec.Source != "" { - continue - } - err := daemon.volumes.Remove(m.Volume) - // Ignore volume in use errors because having this - // volume being referenced by other container is - // not an error, but an implementation detail. - // This prevents docker from logging "ERROR: Volume in use" - // where there is another container using the volume. - if err != nil && !volumestore.IsInUse(err) { - rmErrors = append(rmErrors, err.Error()) - } + if !rm { + continue + } + + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Spec.Source != "" { + continue + } + + err := daemon.volumes.Remove(m.Volume) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumestore.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) } } + if len(rmErrors) > 0 { return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) } diff --git a/fn/vendor/github.com/docker/docker/daemon/names.go b/fn/vendor/github.com/docker/docker/daemon/names.go index 5ce16624a..ec6ac2924 100644 --- a/fn/vendor/github.com/docker/docker/daemon/names.go +++ b/fn/vendor/github.com/docker/docker/daemon/names.go @@ -30,10 +30,6 @@ func (daemon *Daemon) registerName(container *container.Container) error { return err } container.Name = name - - if err := container.ToDiskLocking(); err != nil { - logrus.Errorf("Error saving container name to disk: %v", err) - } } return daemon.nameIndex.Reserve(container.Name, container.ID) } diff --git a/fn/vendor/github.com/docker/docker/daemon/network.go b/fn/vendor/github.com/docker/docker/daemon/network.go index 06d3b3eb8..366c2a59e 100644 --- a/fn/vendor/github.com/docker/docker/daemon/network.go +++ b/fn/vendor/github.com/docker/docker/daemon/network.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/runconfig" "github.com/docker/libnetwork" + lncluster "github.com/docker/libnetwork/cluster" "github.com/docker/libnetwork/driverapi" "github.com/docker/libnetwork/ipamapi" networktypes "github.com/docker/libnetwork/types" @@ -207,7 +208,6 @@ func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip func (daemon *Daemon) releaseIngress(id string) { controller := daemon.netController - if err := controller.SandboxDestroy("ingress-sbox"); err != nil { logrus.Errorf("Failed to delete ingress sandbox: %v", err) } @@ -233,13 +233,17 @@ func (daemon *Daemon) releaseIngress(id string) { logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err) return } - return } // SetNetworkBootstrapKeys sets the bootstrap keys. func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { - return daemon.netController.SetKeys(keys) + err := daemon.netController.SetKeys(keys) + if err == nil { + // Upon successful key setting dispatch the keys available event + daemon.cluster.SendClusterEvent(lncluster.EventNetworkKeysAvailable) + } + return err } // UpdateAttachment notifies the attacher about the attachment config. @@ -314,6 +318,11 @@ func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string libnetwork.NetworkOptionLabels(create.Labels), libnetwork.NetworkOptionAttachable(create.Attachable), libnetwork.NetworkOptionIngress(create.Ingress), + libnetwork.NetworkOptionScope(create.Scope), + } + + if create.ConfigOnly { + nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly()) } if create.IPAM != nil { @@ -333,8 +342,15 @@ func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) } + if create.ConfigFrom != nil { + nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network)) + } + n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) if err != nil { + if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok { + return nil, errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } return nil, err } @@ -491,13 +507,29 @@ func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { return apierrors.NewRequestForbiddenError(err) } + if dynamic && !nw.Info().Dynamic() { + if runconfig.IsPreDefinedNetwork(nw.Name()) { + // Predefined networks now support swarm services. Make this + // a no-op when cluster requests to remove the predefined network. + return nil + } + err := fmt.Errorf("%s is not a dynamic network", nw.Name()) + return apierrors.NewRequestForbiddenError(err) + } + if err := nw.Delete(); err != nil { return err } - daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release) - ipamType, _, _, _ := nw.Info().IpamConfig() - daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release) - daemon.LogNetworkEvent(nw, "destroy") + + // If this is not a configuration only network, we need to + // update the corresponding remote drivers' reference counts + if !nw.Info().ConfigOnly() { + daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release) + ipamType, _, _, _ := nw.Info().IpamConfig() + daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release) + daemon.LogNetworkEvent(nw, "destroy") + } + return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/oci_linux.go b/fn/vendor/github.com/docker/docker/daemon/oci_linux.go index 73a9590c8..6d74301a0 100644 --- a/fn/vendor/github.com/docker/docker/daemon/oci_linux.go +++ b/fn/vendor/github.com/docker/docker/daemon/oci_linux.go @@ -56,13 +56,16 @@ func setResources(s *specs.Spec, r containertypes.Resources) error { } memoryRes := getMemoryResources(r) - cpuRes := getCPUResources(r) + cpuRes, err := getCPUResources(r) + if err != nil { + return err + } blkioWeight := r.BlkioWeight - specResources := &specs.Resources{ + specResources := &specs.LinuxResources{ Memory: memoryRes, CPU: cpuRes, - BlockIO: &specs.BlockIO{ + BlockIO: &specs.LinuxBlockIO{ Weight: &blkioWeight, WeightDevice: weightDevices, ThrottleReadBpsDevice: readBpsDevice, @@ -71,8 +74,8 @@ func setResources(s *specs.Spec, r containertypes.Resources) error { ThrottleWriteIOPSDevice: writeIOpsDevice, }, DisableOOMKiller: r.OomKillDisable, - Pids: &specs.Pids{ - Limit: &r.PidsLimit, + Pids: &specs.LinuxPids{ + Limit: r.PidsLimit, }, } @@ -86,7 +89,7 @@ func setResources(s *specs.Spec, r containertypes.Resources) error { func setDevices(s *specs.Spec, c *container.Container) error { // Build lists of devices allowed and created within the container. - var devs []specs.Device + var devs []specs.LinuxDevice devPermissions := s.Linux.Resources.Devices if c.HostConfig.Privileged { hostDevices, err := devices.HostDevices() @@ -96,11 +99,10 @@ func setDevices(s *specs.Spec, c *container.Container) error { for _, d := range hostDevices { devs = append(devs, oci.Device(d)) } - rwm := "rwm" - devPermissions = []specs.DeviceCgroup{ + devPermissions = []specs.LinuxDeviceCgroup{ { Allow: true, - Access: &rwm, + Access: "rwm", }, } } else { @@ -120,10 +122,10 @@ func setDevices(s *specs.Spec, c *container.Container) error { } matches := ss[0] - dPermissions := specs.DeviceCgroup{ + dPermissions := specs.LinuxDeviceCgroup{ Allow: true, - Type: &matches[1], - Access: &matches[4], + Type: matches[1], + Access: matches[4], } if matches[2] == "*" { major := int64(-1) @@ -155,14 +157,14 @@ func setDevices(s *specs.Spec, c *container.Container) error { } func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { - var rlimits []specs.Rlimit + var rlimits []specs.LinuxRlimit // We want to leave the original HostConfig alone so make a copy here hostConfig := *c.HostConfig // Merge with the daemon defaults daemon.mergeUlimits(&hostConfig) for _, ul := range hostConfig.Ulimits { - rlimits = append(rlimits, specs.Rlimit{ + rlimits = append(rlimits, specs.LinuxRlimit{ Type: "RLIMIT_" + strings.ToUpper(ul.Name), Soft: uint64(ul.Soft), Hard: uint64(ul.Hard), @@ -237,7 +239,7 @@ func getUser(c *container.Container, username string) (uint32, uint32, []uint32, return uid, gid, additionalGids, nil } -func setNamespace(s *specs.Spec, ns specs.Namespace) { +func setNamespace(s *specs.Spec, ns specs.LinuxNamespace) { for i, n := range s.Linux.Namespaces { if n.Type == ns.Type { s.Linux.Namespaces[i] = ns @@ -253,12 +255,15 @@ func setCapabilities(s *specs.Spec, c *container.Container) error { if c.HostConfig.Privileged { caplist = caps.GetAllCapabilities() } else { - caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + caplist, err = caps.TweakCapabilities(s.Process.Capabilities.Effective, c.HostConfig.CapAdd, c.HostConfig.CapDrop) if err != nil { return err } } - s.Process.Capabilities = caplist + s.Process.Capabilities.Effective = caplist + s.Process.Capabilities.Bounding = caplist + s.Process.Capabilities.Permitted = caplist + s.Process.Capabilities.Inheritable = caplist return nil } @@ -266,18 +271,18 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error userNS := false // user if c.HostConfig.UsernsMode.IsPrivate() { - uidMap, gidMap := daemon.GetUIDGIDMaps() + uidMap := daemon.idMappings.UIDs() if uidMap != nil { userNS = true - ns := specs.Namespace{Type: "user"} + ns := specs.LinuxNamespace{Type: "user"} setNamespace(s, ns) s.Linux.UIDMappings = specMapping(uidMap) - s.Linux.GIDMappings = specMapping(gidMap) + s.Linux.GIDMappings = specMapping(daemon.idMappings.GIDs()) } } // network if !c.Config.NetworkDisabled { - ns := specs.Namespace{Type: "network"} + ns := specs.LinuxNamespace{Type: "network"} parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) if parts[0] == "container" { nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) @@ -287,7 +292,7 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) if userNS { // to share a net namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} + nsUser := specs.LinuxNamespace{Type: "user"} nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) setNamespace(s, nsUser) } @@ -298,7 +303,7 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error } // ipc if c.HostConfig.IpcMode.IsContainer() { - ns := specs.Namespace{Type: "ipc"} + ns := specs.LinuxNamespace{Type: "ipc"} ic, err := daemon.getIpcContainer(c) if err != nil { return err @@ -307,19 +312,19 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error setNamespace(s, ns) if userNS { // to share an IPC namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} + nsUser := specs.LinuxNamespace{Type: "user"} nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) setNamespace(s, nsUser) } } else if c.HostConfig.IpcMode.IsHost() { - oci.RemoveNamespace(s, specs.NamespaceType("ipc")) + oci.RemoveNamespace(s, specs.LinuxNamespaceType("ipc")) } else { - ns := specs.Namespace{Type: "ipc"} + ns := specs.LinuxNamespace{Type: "ipc"} setNamespace(s, ns) } // pid if c.HostConfig.PidMode.IsContainer() { - ns := specs.Namespace{Type: "pid"} + ns := specs.LinuxNamespace{Type: "pid"} pc, err := daemon.getPidContainer(c) if err != nil { return err @@ -328,29 +333,29 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error setNamespace(s, ns) if userNS { // to share a PID namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} + nsUser := specs.LinuxNamespace{Type: "user"} nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) setNamespace(s, nsUser) } } else if c.HostConfig.PidMode.IsHost() { - oci.RemoveNamespace(s, specs.NamespaceType("pid")) + oci.RemoveNamespace(s, specs.LinuxNamespaceType("pid")) } else { - ns := specs.Namespace{Type: "pid"} + ns := specs.LinuxNamespace{Type: "pid"} setNamespace(s, ns) } // uts if c.HostConfig.UTSMode.IsHost() { - oci.RemoveNamespace(s, specs.NamespaceType("uts")) + oci.RemoveNamespace(s, specs.LinuxNamespaceType("uts")) s.Hostname = "" } return nil } -func specMapping(s []idtools.IDMap) []specs.IDMapping { - var ids []specs.IDMapping +func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping { + var ids []specs.LinuxIDMapping for _, item := range s { - ids = append(ids, specs.IDMapping{ + ids = append(ids, specs.LinuxIDMapping{ HostID: uint32(item.HostID), ContainerID: uint32(item.ContainerID), Size: uint32(item.Size), @@ -586,7 +591,7 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c // TODO: until a kernel/mount solution exists for handling remount in a user namespace, // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) - if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { + if uidMap := daemon.idMappings.UIDs(); uidMap != nil || c.HostConfig.Privileged { for i, m := range s.Mounts { if m.Type == "cgroup" { clearReadOnly(&s.Mounts[i]) @@ -606,8 +611,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) Path: c.BaseFS, Readonly: c.HostConfig.ReadonlyRootfs, } - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil { return err } cwd := c.Config.WorkingDir @@ -675,7 +679,7 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { } else { cgroupsPath = filepath.Join(parent, c.ID) } - s.Linux.CgroupsPath = &cgroupsPath + s.Linux.CgroupsPath = cgroupsPath if err := setResources(&s, c.HostConfig.Resources); err != nil { return nil, fmt.Errorf("linux runtime spec resources: %v", err) @@ -683,13 +687,13 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj s.Linux.Sysctl = c.HostConfig.Sysctls - p := *s.Linux.CgroupsPath + p := s.Linux.CgroupsPath if useSystemd { - initPath, err := cgroups.GetInitCgroupDir("cpu") + initPath, err := cgroups.GetInitCgroup("cpu") if err != nil { return nil, err } - p, _ = cgroups.GetThisCgroupDir("cpu") + p, _ = cgroups.GetOwnCgroup("cpu") if err != nil { return nil, err } @@ -732,6 +736,10 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { return nil, err } + if err := daemon.setupConfigDir(c); err != nil { + return nil, err + } + ms, err := daemon.setupMounts(c) if err != nil { return nil, err @@ -745,10 +753,12 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { } ms = append(ms, tmpfsMounts...) - if m := c.SecretMount(); m != nil { - ms = append(ms, *m) + if m := c.SecretMounts(); m != nil { + ms = append(ms, m...) } + ms = append(ms, c.ConfigMounts()...) + sort.Sort(mounts(ms)) if err := setMounts(daemon, &s, c, ms); err != nil { return nil, fmt.Errorf("linux mounts: %v", err) @@ -761,7 +771,7 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { return nil, err } - s.Hooks = specs.Hooks{ + s.Hooks = &specs.Hooks{ Prestart: []specs.Hook{{ Path: target, // FIXME: cross-platform Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, diff --git a/fn/vendor/github.com/docker/docker/daemon/oci_solaris.go b/fn/vendor/github.com/docker/docker/daemon/oci_solaris.go index 0c757f919..610efe10a 100644 --- a/fn/vendor/github.com/docker/docker/daemon/oci_solaris.go +++ b/fn/vendor/github.com/docker/docker/daemon/oci_solaris.go @@ -130,8 +130,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) Path: filepath.Dir(c.BaseFS), Readonly: c.HostConfig.ReadonlyRootfs, } - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil { return err } cwd := c.Config.WorkingDir diff --git a/fn/vendor/github.com/docker/docker/daemon/oci_windows.go b/fn/vendor/github.com/docker/docker/daemon/oci_windows.go index b225d4550..f114230ef 100644 --- a/fn/vendor/github.com/docker/docker/daemon/oci_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/oci_windows.go @@ -7,11 +7,17 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/oci" "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" "github.com/opencontainers/runtime-spec/specs-go" ) func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { - s := oci.DefaultSpec() + img, err := daemon.GetImage(string(c.ImageID)) + if err != nil { + return nil, err + } + + s := oci.DefaultOSSpec(img.OS) linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { @@ -25,11 +31,60 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { // In base spec s.Hostname = c.FullHostname() + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + if err := daemon.setupConfigDir(c); err != nil { + return nil, err + } + // In s.Mounts mounts, err := daemon.setupMounts(c) if err != nil { return nil, err } + + var isHyperV bool + if c.HostConfig.Isolation.IsDefault() { + // Container using default isolation, so take the default from the daemon configuration + isHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container may be requesting an explicit isolation mode. + isHyperV = c.HostConfig.Isolation.IsHyperV() + } + + // If the container has not been started, and has configs or secrets + // secrets, create symlinks to each config and secret. If it has been + // started before, the symlinks should have already been created. Also, it + // is important to not mount a Hyper-V container that has been started + // before, to protect the host from the container; for example, from + // malicious mutation of NTFS data structures. + if !c.HasBeenStartedBefore && (len(c.SecretReferences) > 0 || len(c.ConfigReferences) > 0) { + // The container file system is mounted before this function is called, + // except for Hyper-V containers, so mount it here in that case. + if isHyperV { + if err := daemon.Mount(c); err != nil { + return nil, err + } + defer daemon.Unmount(c) + } + if err := c.CreateSecretSymlinks(); err != nil { + return nil, err + } + if err := c.CreateConfigSymlinks(); err != nil { + return nil, err + } + } + + if m := c.SecretMounts(); m != nil { + mounts = append(mounts, m...) + } + + if m := c.ConfigMounts(); m != nil { + mounts = append(mounts, m...) + } + for _, mount := range mounts { m := specs.Mount{ Source: mount.Source, @@ -43,10 +98,33 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { // In s.Process s.Process.Args = append([]string{c.Path}, c.Args...) - if !c.Config.ArgsEscaped { + if !c.Config.ArgsEscaped && img.OS == "windows" { s.Process.Args = escapeArgs(s.Process.Args) } + s.Process.Cwd = c.Config.WorkingDir + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + if c.Config.Tty { + s.Process.Terminal = c.Config.Tty + s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] + s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] + } + s.Process.User.Username = c.Config.User + + if img.OS == "windows" { + daemon.createSpecWindowsFields(c, &s, isHyperV) + } else { + // TODO @jhowardmsft LCOW Support. Modify this check when running in dual-mode + if system.LCOWSupported() && img.OS == "linux" { + daemon.createSpecLinuxFields(c, &s) + } + } + + return (*specs.Spec)(&s), nil +} + +// Sets the Windows-specific fields of the OCI spec +func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.Spec, isHyperV bool) { if len(s.Process.Cwd) == 0 { // We default to C:\ to workaround the oddity of the case that the // default directory for cmd running as LocalSystem (or @@ -57,54 +135,62 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { // as c:\. Hence, setting it to default of c:\ makes for consistency. s.Process.Cwd = `C:\` } - s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) - s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] - s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] - s.Process.Terminal = c.Config.Tty - s.Process.User.Username = c.Config.User - // In spec.Root. This is not set for Hyper-V containers - var isHyperV bool - if c.HostConfig.Isolation.IsDefault() { - // Container using default isolation, so take the default from the daemon configuration - isHyperV = daemon.defaultIsolation.IsHyperV() - } else { - // Container may be requesting an explicit isolation mode. - isHyperV = c.HostConfig.Isolation.IsHyperV() - } - if !isHyperV { - s.Root.Path = c.BaseFS - } s.Root.Readonly = false // Windows does not support a read-only root filesystem + if !isHyperV { + s.Root.Path = c.BaseFS // This is not set for Hyper-V containers + } // In s.Windows.Resources - // @darrenstahlmsft implement these resources cpuShares := uint16(c.HostConfig.CPUShares) - cpuPercent := uint8(c.HostConfig.CPUPercent) - if c.HostConfig.NanoCPUs > 0 { - cpuPercent = uint8(c.HostConfig.NanoCPUs * 100 / int64(sysinfo.NumCPU()) / 1e9) - } + cpuMaximum := uint16(c.HostConfig.CPUPercent) * 100 cpuCount := uint64(c.HostConfig.CPUCount) + if c.HostConfig.NanoCPUs > 0 { + if isHyperV { + cpuCount = uint64(c.HostConfig.NanoCPUs / 1e9) + leftoverNanoCPUs := c.HostConfig.NanoCPUs % 1e9 + if leftoverNanoCPUs != 0 { + cpuCount++ + cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(cpuCount) / (1e9 / 10000)) + if cpuMaximum < 1 { + // The requested NanoCPUs is so small that we rounded to 0, use 1 instead + cpuMaximum = 1 + } + } + } else { + cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(sysinfo.NumCPU()) / (1e9 / 10000)) + if cpuMaximum < 1 { + // The requested NanoCPUs is so small that we rounded to 0, use 1 instead + cpuMaximum = 1 + } + } + } memoryLimit := uint64(c.HostConfig.Memory) s.Windows.Resources = &specs.WindowsResources{ CPU: &specs.WindowsCPUResources{ - Percent: &cpuPercent, + Maximum: &cpuMaximum, Shares: &cpuShares, Count: &cpuCount, }, Memory: &specs.WindowsMemoryResources{ Limit: &memoryLimit, - //TODO Reservation: ..., - }, - Network: &specs.WindowsNetworkResources{ - //TODO Bandwidth: ..., }, Storage: &specs.WindowsStorageResources{ Bps: &c.HostConfig.IOMaximumBandwidth, Iops: &c.HostConfig.IOMaximumIOps, }, } - return (*specs.Spec)(&s), nil +} + +// Sets the Linux-specific fields of the OCI spec +// TODO: @jhowardmsft LCOW Support. We need to do a lot more pulling in what can +// be pulled in from oci_linux.go. +func (daemon *Daemon) createSpecLinuxFields(c *container.Container, s *specs.Spec) { + if len(s.Process.Cwd) == 0 { + s.Process.Cwd = `/` + } + s.Root.Path = "rootfs" + s.Root.Readonly = c.HostConfig.ReadonlyRootfs } func escapeArgs(args []string) []string { diff --git a/fn/vendor/github.com/docker/docker/daemon/prune.go b/fn/vendor/github.com/docker/docker/daemon/prune.go index 2611d0c7a..1d8686b36 100644 --- a/fn/vendor/github.com/docker/docker/daemon/prune.go +++ b/fn/vendor/github.com/docker/docker/daemon/prune.go @@ -3,6 +3,8 @@ package daemon import ( "fmt" "regexp" + "runtime" + "sync/atomic" "time" "github.com/Sirupsen/logrus" @@ -13,16 +15,56 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/docker/docker/volume" "github.com/docker/libnetwork" digest "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +var ( + // errPruneRunning is returned when a prune request is received while + // one is in progress + errPruneRunning = fmt.Errorf("a prune operation is already running") + + containersAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + "until": true, + } + volumesAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + } + imagesAcceptedFilters = map[string]bool{ + "dangling": true, + "label": true, + "label!": true, + "until": true, + } + networksAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + "until": true, + } ) // ContainersPrune removes unused containers -func (daemon *Daemon) ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) { +func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + rep := &types.ContainersPruneReport{} + // make sure that only accepted filters have been received + err := pruneFilters.Validate(containersAcceptedFilters) + if err != nil { + return nil, err + } + until, err := getUntilFromPruneFilters(pruneFilters) if err != nil { return nil, err @@ -30,6 +72,13 @@ func (daemon *Daemon) ContainersPrune(pruneFilters filters.Args) (*types.Contain allContainers := daemon.List() for _, c := range allContainers { + select { + case <-ctx.Done(): + logrus.Warnf("ContainersPrune operation cancelled: %#v", *rep) + return rep, ctx.Err() + default: + } + if !c.IsRunning() { if !until.IsZero() && c.Created.After(until) { continue @@ -55,10 +104,28 @@ func (daemon *Daemon) ContainersPrune(pruneFilters filters.Args) (*types.Contain } // VolumesPrune removes unused local volumes -func (daemon *Daemon) VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) { +func (daemon *Daemon) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(volumesAcceptedFilters) + if err != nil { + return nil, err + } + rep := &types.VolumesPruneReport{} pruneVols := func(v volume.Volume) error { + select { + case <-ctx.Done(): + logrus.Warnf("VolumesPrune operation cancelled: %#v", *rep) + return ctx.Err() + default: + } + name := v.Name() refs := daemon.volumes.Refs(v) @@ -85,13 +152,30 @@ func (daemon *Daemon) VolumesPrune(pruneFilters filters.Args) (*types.VolumesPru return nil } - err := daemon.traverseLocalVolumes(pruneVols) + err = daemon.traverseLocalVolumes(pruneVols) return rep, err } // ImagesPrune removes unused images -func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) { +func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { + // TODO @jhowardmsft LCOW Support: This will need revisiting later. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(imagesAcceptedFilters) + if err != nil { + return nil, err + } + rep := &types.ImagesPruneReport{} danglingOnly := true @@ -110,34 +194,54 @@ func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPrune var allImages map[image.ID]*image.Image if danglingOnly { - allImages = daemon.imageStore.Heads() + allImages = daemon.stores[platform].imageStore.Heads() } else { - allImages = daemon.imageStore.Map() + allImages = daemon.stores[platform].imageStore.Map() } allContainers := daemon.List() imageRefs := map[string]bool{} for _, c := range allContainers { - imageRefs[c.ID] = true + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + imageRefs[c.ID] = true + } } // Filter intermediary images and get their unique size - allLayers := daemon.layerStore.Map() + allLayers := daemon.stores[platform].layerStore.Map() topImages := map[image.ID]*image.Image{} for id, img := range allImages { - dgst := digest.Digest(id) - if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { - continue + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + dgst := digest.Digest(id) + if len(daemon.stores[platform].referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 { + continue + } + if !until.IsZero() && img.Created.After(until) { + continue + } + if img.Config != nil && !matchLabels(pruneFilters, img.Config.Labels) { + continue + } + topImages[id] = img } - if !until.IsZero() && img.Created.After(until) { - continue - } - if !matchLabels(pruneFilters, img.Config.Labels) { - continue - } - topImages[id] = img } + canceled := false +deleteImagesLoop: for id := range topImages { + select { + case <-ctx.Done(): + // we still want to calculate freed size and return the data + canceled = true + break deleteImagesLoop + default: + } + dgst := digest.Digest(id) hex := dgst.Hex() if _, ok := imageRefs[hex]; ok { @@ -145,7 +249,7 @@ func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPrune } deletedImages := []types.ImageDeleteResponseItem{} - refs := daemon.referenceStore.References(dgst) + refs := daemon.stores[platform].referenceStore.References(dgst) if len(refs) > 0 { shouldDelete := !danglingOnly if !shouldDelete { @@ -198,17 +302,30 @@ func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPrune } } + if canceled { + logrus.Warnf("ImagesPrune operation cancelled: %#v", *rep) + return nil, ctx.Err() + } + return rep, nil } // localNetworksPrune removes unused local networks -func (daemon *Daemon) localNetworksPrune(pruneFilters filters.Args) *types.NetworksPruneReport { +func (daemon *Daemon) localNetworksPrune(ctx context.Context, pruneFilters filters.Args) *types.NetworksPruneReport { rep := &types.NetworksPruneReport{} until, _ := getUntilFromPruneFilters(pruneFilters) // When the function returns true, the walk will stop. l := func(nw libnetwork.Network) bool { + select { + case <-ctx.Done(): + return true + default: + } + if nw.Info().ConfigOnly() { + return false + } if !until.IsZero() && nw.Info().Created().After(until) { return false } @@ -234,7 +351,7 @@ func (daemon *Daemon) localNetworksPrune(pruneFilters filters.Args) *types.Netwo } // clusterNetworksPrune removes unused cluster networks -func (daemon *Daemon) clusterNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { +func (daemon *Daemon) clusterNetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { rep := &types.NetworksPruneReport{} until, _ := getUntilFromPruneFilters(pruneFilters) @@ -251,46 +368,70 @@ func (daemon *Daemon) clusterNetworksPrune(pruneFilters filters.Args) (*types.Ne } networkIsInUse := regexp.MustCompile(`network ([[:alnum:]]+) is in use`) for _, nw := range networks { - if nw.Ingress { - // Routing-mesh network removal has to be explicitly invoked by user - continue - } - if !until.IsZero() && nw.Created.After(until) { - continue - } - if !matchLabels(pruneFilters, nw.Labels) { - continue - } - // https://github.com/docker/docker/issues/24186 - // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. - // So we try to remove it anyway and check the error - err = cluster.RemoveNetwork(nw.ID) - if err != nil { - // we can safely ignore the "network .. is in use" error - match := networkIsInUse.FindStringSubmatch(err.Error()) - if len(match) != 2 || match[1] != nw.ID { - logrus.Warnf("could not remove cluster network %s: %v", nw.Name, err) + select { + case <-ctx.Done(): + return rep, ctx.Err() + default: + if nw.Ingress { + // Routing-mesh network removal has to be explicitly invoked by user + continue } - continue + if !until.IsZero() && nw.Created.After(until) { + continue + } + if !matchLabels(pruneFilters, nw.Labels) { + continue + } + // https://github.com/docker/docker/issues/24186 + // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. + // So we try to remove it anyway and check the error + err = cluster.RemoveNetwork(nw.ID) + if err != nil { + // we can safely ignore the "network .. is in use" error + match := networkIsInUse.FindStringSubmatch(err.Error()) + if len(match) != 2 || match[1] != nw.ID { + logrus.Warnf("could not remove cluster network %s: %v", nw.Name, err) + } + continue + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) } - rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) } return rep, nil } // NetworksPrune removes unused networks -func (daemon *Daemon) NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { +func (daemon *Daemon) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(networksAcceptedFilters) + if err != nil { + return nil, err + } + if _, err := getUntilFromPruneFilters(pruneFilters); err != nil { return nil, err } rep := &types.NetworksPruneReport{} - if clusterRep, err := daemon.clusterNetworksPrune(pruneFilters); err == nil { + if clusterRep, err := daemon.clusterNetworksPrune(ctx, pruneFilters); err == nil { rep.NetworksDeleted = append(rep.NetworksDeleted, clusterRep.NetworksDeleted...) } - localRep := daemon.localNetworksPrune(pruneFilters) + localRep := daemon.localNetworksPrune(ctx, pruneFilters) rep.NetworksDeleted = append(rep.NetworksDeleted, localRep.NetworksDeleted...) + + select { + case <-ctx.Done(): + logrus.Warnf("NetworksPrune operation cancelled: %#v", *rep) + return nil, ctx.Err() + default: + } + return rep, nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/reload.go b/fn/vendor/github.com/docker/docker/daemon/reload.go index deb13f54b..0200bcf06 100644 --- a/fn/vendor/github.com/docker/docker/daemon/reload.go +++ b/fn/vendor/github.com/docker/docker/daemon/reload.go @@ -39,7 +39,7 @@ func (daemon *Daemon) Reload(conf *config.Config) (err error) { daemon.reloadPlatform(conf, attributes) daemon.reloadDebug(conf, attributes) - daemon.reloadMaxConcurrentDowloadsAndUploads(conf, attributes) + daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes) daemon.reloadShutdownTimeout(conf, attributes) if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil { @@ -48,6 +48,9 @@ func (daemon *Daemon) Reload(conf *config.Config) (err error) { if err := daemon.reloadLabels(conf, attributes); err != nil { return err } + if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil { + return err + } if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil { return err } @@ -71,9 +74,9 @@ func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]str attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) } -// reloadMaxConcurrentDowloadsAndUploads updates configuration with max concurrent +// reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent // download and upload options and updates the passed attributes -func (daemon *Daemon) reloadMaxConcurrentDowloadsAndUploads(conf *config.Config, attributes map[string]string) { +func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) { // If no value is set for max-concurrent-downloads we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != nil { @@ -217,6 +220,31 @@ func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]st return nil } +// reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options +// and updates the passed attributes. +func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error { + // Update corresponding configuration. + if conf.IsValueSet("allow-nondistributable-artifacts") { + daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts + if err := daemon.RegistryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil { + return err + } + } + + // Prepare reload event attributes with updatable configurations. + if daemon.configStore.AllowNondistributableArtifacts != nil { + v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts) + if err != nil { + return err + } + attributes["allow-nondistributable-artifacts"] = string(v) + } else { + attributes["allow-nondistributable-artifacts"] = "[]" + } + + return nil +} + // reloadInsecureRegistries updates configuration with insecure registry option // and updates the passed attributes func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error { diff --git a/fn/vendor/github.com/docker/docker/daemon/reload_test.go b/fn/vendor/github.com/docker/docker/daemon/reload_test.go index ba1fd0247..bf11b6bd5 100644 --- a/fn/vendor/github.com/docker/docker/daemon/reload_test.go +++ b/fn/vendor/github.com/docker/docker/daemon/reload_test.go @@ -4,6 +4,7 @@ package daemon import ( "reflect" + "sort" "testing" "time" @@ -40,6 +41,61 @@ func TestDaemonReloadLabels(t *testing.T) { } } +func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { + daemon := &Daemon{ + configStore: &config.Config{}, + } + + // Initialize daemon with some registries. + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + AllowNondistributableArtifacts: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // This will be removed during reload. + "docker1.com", + "docker2.com", // This will be removed during reload. + }, + }) + + registries := []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.33:5000", // This will be added during reload. + "docker1.com", + "docker3.com", // This will be added during reload. + } + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + AllowNondistributableArtifacts: registries, + }, + ValuesSet: map[string]interface{}{ + "allow-nondistributable-artifacts": registries, + }, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + actual := []string{} + serviceConfig := daemon.RegistryService.ServiceConfig() + for _, value := range serviceConfig.AllowNondistributableArtifactsCIDRs { + actual = append(actual, value.String()) + } + for _, value := range serviceConfig.AllowNondistributableArtifactsHostnames { + actual = append(actual, value) + } + + sort.Strings(registries) + sort.Strings(actual) + if !reflect.DeepEqual(registries, actual) { + t.Fatalf("expected %v, got %v\n", registries, actual) + } +} + func TestDaemonReloadMirrors(t *testing.T) { daemon := &Daemon{} daemon.RegistryService = registry.NewService(registry.ServiceOptions{ diff --git a/fn/vendor/github.com/docker/docker/daemon/rename.go b/fn/vendor/github.com/docker/docker/daemon/rename.go index 2770ead80..2a8d0b22c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/rename.go +++ b/fn/vendor/github.com/docker/docker/daemon/rename.go @@ -32,6 +32,9 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { return err } + container.Lock() + defer container.Unlock() + oldName = container.Name oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint @@ -39,9 +42,6 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { return errors.New("Renaming a container with the same name as its current name") } - container.Lock() - defer container.Unlock() - links := map[string]*dockercontainer.Container{} for k, v := range daemon.linkIndex.children(container) { if !strings.HasPrefix(k, oldName) { @@ -82,7 +82,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { daemon.nameIndex.Release(oldName + k) } daemon.releaseName(oldName) - if err = container.ToDisk(); err != nil { + if err = container.CheckpointTo(daemon.containersReplica); err != nil { return err } @@ -99,7 +99,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { if err != nil { container.Name = oldName container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint - if e := container.ToDisk(); e != nil { + if e := container.CheckpointTo(daemon.containersReplica); e != nil { logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) } } diff --git a/fn/vendor/github.com/docker/docker/daemon/restart.go b/fn/vendor/github.com/docker/docker/daemon/restart.go index 79292f375..9f2ef569a 100644 --- a/fn/vendor/github.com/docker/docker/daemon/restart.go +++ b/fn/vendor/github.com/docker/docker/daemon/restart.go @@ -52,7 +52,7 @@ func (daemon *Daemon) containerRestart(container *container.Container, seconds i container.HostConfig.AutoRemove = autoRemove // containerStop will write HostConfig to disk, we shall restore AutoRemove // in disk too - if toDiskErr := container.ToDiskLocking(); toDiskErr != nil { + if toDiskErr := daemon.checkpointAndSave(container); toDiskErr != nil { logrus.Errorf("Write container to disk error: %v", toDiskErr) } diff --git a/fn/vendor/github.com/docker/docker/daemon/seccomp_linux.go b/fn/vendor/github.com/docker/docker/daemon/seccomp_linux.go index 7f16733d9..472e3133c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/seccomp_linux.go +++ b/fn/vendor/github.com/docker/docker/daemon/seccomp_linux.go @@ -14,7 +14,7 @@ import ( var supportsSeccomp = true func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { - var profile *specs.Seccomp + var profile *specs.LinuxSeccomp var err error if c.HostConfig.Privileged { diff --git a/fn/vendor/github.com/docker/docker/daemon/secrets.go b/fn/vendor/github.com/docker/docker/daemon/secrets.go index 355cb1e13..90fa99e98 100644 --- a/fn/vendor/github.com/docker/docker/daemon/secrets.go +++ b/fn/vendor/github.com/docker/docker/daemon/secrets.go @@ -3,21 +3,8 @@ package daemon import ( "github.com/Sirupsen/logrus" swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/docker/swarmkit/agent/exec" ) -// SetContainerSecretStore sets the secret store backend for the container -func (daemon *Daemon) SetContainerSecretStore(name string, store exec.SecretGetter) error { - c, err := daemon.GetContainer(name) - if err != nil { - return err - } - - c.SecretStore = store - - return nil -} - // SetContainerSecretReferences sets the container secret references needed func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error { if !secretsSupported() && len(refs) > 0 { diff --git a/fn/vendor/github.com/docker/docker/daemon/secrets_unsupported.go b/fn/vendor/github.com/docker/docker/daemon/secrets_unsupported.go index d6f36fda1..d55e8624d 100644 --- a/fn/vendor/github.com/docker/docker/daemon/secrets_unsupported.go +++ b/fn/vendor/github.com/docker/docker/daemon/secrets_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux +// +build !linux,!windows package daemon diff --git a/fn/vendor/github.com/docker/docker/daemon/secrets_windows.go b/fn/vendor/github.com/docker/docker/daemon/secrets_windows.go new file mode 100644 index 000000000..9054354c8 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/secrets_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package daemon + +func secretsSupported() bool { + return true +} diff --git a/fn/vendor/github.com/docker/docker/daemon/selinux_linux.go b/fn/vendor/github.com/docker/docker/daemon/selinux_linux.go index 83a344711..fb2578bf4 100644 --- a/fn/vendor/github.com/docker/docker/daemon/selinux_linux.go +++ b/fn/vendor/github.com/docker/docker/daemon/selinux_linux.go @@ -2,16 +2,16 @@ package daemon -import "github.com/opencontainers/runc/libcontainer/selinux" +import "github.com/opencontainers/selinux/go-selinux" func selinuxSetDisabled() { selinux.SetDisabled() } func selinuxFreeLxcContexts(label string) { - selinux.FreeLxcContexts(label) + selinux.ReleaseLabel(label) } func selinuxEnabled() bool { - return selinux.SelinuxEnabled() + return selinux.GetEnabled() } diff --git a/fn/vendor/github.com/docker/docker/daemon/start.go b/fn/vendor/github.com/docker/docker/daemon/start.go index eddb5d3d5..8d938519c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/start.go +++ b/fn/vendor/github.com/docker/docker/daemon/start.go @@ -58,7 +58,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos // if user has change the network mode on starting, clean up the // old networks. It is a deprecated feature and has been removed in Docker 1.12 container.NetworkSettings.Networks = nil - if err := container.ToDisk(); err != nil { + if err := container.CheckpointTo(daemon.containersReplica); err != nil { return err } } @@ -86,11 +86,6 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos return daemon.containerStart(container, checkpoint, checkpointDir, true) } -// Start starts a container -func (daemon *Daemon) Start(container *container.Container) error { - return daemon.containerStart(container, "", "", true) -} - // containerStart prepares the container to run by setting up everything the // container needs, such as storage and networking, as well as links // between containers. The container is left waiting for a signal to @@ -117,8 +112,9 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint if container.ExitCode() == 0 { container.SetExitCode(128) } - container.ToDisk() - + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err) + } container.Reset(false) daemon.Cleanup(container) @@ -207,7 +203,7 @@ func (daemon *Daemon) Cleanup(container *container.Container) { if err := daemon.conditionalUnmountOnCleanup(container); err != nil { // FIXME: remove once reference counting for graphdrivers has been refactored // Ensure that all the mounts are gone - if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { + if mountid, err := daemon.stores[container.Platform].layerStore.GetMountID(container.ID); err == nil { daemon.cleanupMountsByID(mountid) } } diff --git a/fn/vendor/github.com/docker/docker/daemon/start_unix.go b/fn/vendor/github.com/docker/docker/daemon/start_unix.go index 103cc73b8..12ecdab2d 100644 --- a/fn/vendor/github.com/docker/docker/daemon/start_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/start_unix.go @@ -9,13 +9,14 @@ import ( "github.com/docker/docker/libcontainerd" ) +// getLibcontainerdCreateOptions callers must hold a lock on the container func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { createOptions := []libcontainerd.CreateOption{} // Ensure a runtime has been assigned to this container if container.HostConfig.Runtime == "" { container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() - container.ToDisk() + container.CheckpointTo(daemon.containersReplica) } rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) diff --git a/fn/vendor/github.com/docker/docker/daemon/start_windows.go b/fn/vendor/github.com/docker/docker/daemon/start_windows.go index 0f9739091..74129bd61 100644 --- a/fn/vendor/github.com/docker/docker/daemon/start_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/start_windows.go @@ -9,7 +9,6 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/layer" "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/system" "golang.org/x/sys/windows/registry" ) @@ -32,12 +31,6 @@ func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Contain } dnsSearch := daemon.getDNSSearchSettings(container) - if dnsSearch != nil { - osv := system.GetOSVersion() - if osv.Build < 14997 { - return nil, fmt.Errorf("dns-search option is not supported on the current platform") - } - } // Generate the layer folder of the layer options layerOpts := &libcontainerd.LayerOption{} @@ -45,14 +38,10 @@ func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Contain if err != nil { return nil, fmt.Errorf("failed to get layer metadata - %s", err) } - if hvOpts.IsHyperV { - hvOpts.SandboxPath = filepath.Dir(m["dir"]) - } - layerOpts.LayerFolderPath = m["dir"] // Generate the layer paths of the layer options - img, err := daemon.imageStore.Get(container.ImageID) + img, err := daemon.stores[container.Platform].imageStore.Get(container.ImageID) if err != nil { return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) } @@ -60,9 +49,9 @@ func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Contain max := len(img.RootFS.DiffIDs) for i := 1; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] - layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) + layerPath, err := layer.GetLayerPath(daemon.stores[container.Platform].layerStore, img.RootFS.ChainID()) if err != nil { - return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) + return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.stores[container.Platform].layerStore, img.RootFS.ChainID(), err) } // Reverse order, expecting parent most first layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) @@ -161,7 +150,11 @@ func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Contain var networkSharedContainerID string if container.HostConfig.NetworkMode.IsContainer() { networkSharedContainerID = container.NetworkSharedContainerID + for _, ep := range container.SharedEndpointList { + epList = append(epList, ep) + } } + createOptions = append(createOptions, &libcontainerd.NetworkEndpointsOption{ Endpoints: epList, AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery, diff --git a/fn/vendor/github.com/docker/docker/daemon/stats/collector_unix.go b/fn/vendor/github.com/docker/docker/daemon/stats/collector_unix.go index 5ad965869..cd522e07c 100644 --- a/fn/vendor/github.com/docker/docker/daemon/stats/collector_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/stats/collector_unix.go @@ -72,7 +72,11 @@ func (s *Collector) getSystemCPUUsage() (uint64, error) { func (s *Collector) getNumberOnlineCPUs() (uint32, error) { i, err := C.sysconf(C._SC_NPROCESSORS_ONLN) - if err != nil { + // According to POSIX - errno is undefined after successful + // sysconf, and can be non-zero in several cases, so look for + // error in returned value not in errno. + // (https://sourceware.org/bugzilla/show_bug.cgi?id=21536) + if i == -1 { return 0, err } return uint32(i), nil diff --git a/fn/vendor/github.com/docker/docker/daemon/stop.go b/fn/vendor/github.com/docker/docker/daemon/stop.go index a17e8c8dd..6a4776d15 100644 --- a/fn/vendor/github.com/docker/docker/daemon/stop.go +++ b/fn/vendor/github.com/docker/docker/daemon/stop.go @@ -1,13 +1,14 @@ package daemon import ( + "context" "fmt" "net/http" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/api/errors" - "github.com/docker/docker/container" + containerpkg "github.com/docker/docker/container" ) // ContainerStop looks for the given container and terminates it, @@ -40,7 +41,7 @@ func (daemon *Daemon) ContainerStop(name string, seconds *int) error { // process to exit. If a negative duration is given, Stop will wait // for the initial signal forever. If the container is not running Stop returns // immediately. -func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { +func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds int) error { if !container.IsRunning() { return nil } @@ -60,7 +61,10 @@ func (daemon *Daemon) containerStop(container *container.Container, seconds int) // So, instead we'll give it up to 2 more seconds to complete and if // by that time the container is still running, then the error // we got is probably valid and so we force kill it. - if _, err := container.WaitStop(2 * time.Second); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal) if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { return err @@ -69,11 +73,15 @@ func (daemon *Daemon) containerStop(container *container.Container, seconds int) } // 2. Wait for the process to exit on its own - if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(seconds)*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) // 3. If it doesn't, then send SIGKILL if err := daemon.Kill(container); err != nil { - container.WaitStop(-1 * time.Second) + // Wait without a timeout, ignore result. + _ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it } } diff --git a/fn/vendor/github.com/docker/docker/daemon/update.go b/fn/vendor/github.com/docker/docker/daemon/update.go index 6e26eeb96..a65cbd51b 100644 --- a/fn/vendor/github.com/docker/docker/daemon/update.go +++ b/fn/vendor/github.com/docker/docker/daemon/update.go @@ -22,20 +22,6 @@ func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostCon return container.ContainerUpdateOKBody{Warnings: warnings}, nil } -// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. -func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error { - if len(cmd) == 0 { - return nil - } - c, err := daemon.GetContainer(cID) - if err != nil { - return err - } - c.Path = cmd[0] - c.Args = cmd[1:] - return nil -} - func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil @@ -52,7 +38,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro if restoreConfig { container.Lock() container.HostConfig = &backupHostConfig - container.ToDisk() + container.CheckpointTo(daemon.containersReplica) container.Unlock() } }() @@ -61,10 +47,18 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) } + container.Lock() if err := container.UpdateContainer(hostConfig); err != nil { restoreConfig = true + container.Unlock() return errCannotUpdate(container.ID, err) } + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + restoreConfig = true + container.Unlock() + return errCannotUpdate(container.ID, err) + } + container.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { diff --git a/fn/vendor/github.com/docker/docker/daemon/volumes.go b/fn/vendor/github.com/docker/docker/daemon/volumes.go index 9f0468e1a..6f24f0591 100644 --- a/fn/vendor/github.com/docker/docker/daemon/volumes.go +++ b/fn/vendor/github.com/docker/docker/daemon/volumes.go @@ -5,7 +5,9 @@ import ( "fmt" "os" "path/filepath" + "reflect" "strings" + "time" "github.com/Sirupsen/logrus" dockererrors "github.com/docker/docker/api/errors" @@ -27,9 +29,11 @@ type mounts []container.Mount // volumeToAPIType converts a volume.Volume to the type used by the Engine API func volumeToAPIType(v volume.Volume) *types.Volume { + createdAt, _ := v.CreatedAt() tv := &types.Volume{ - Name: v.Name(), - Driver: v.DriverName(), + Name: v.Name(), + Driver: v.DriverName(), + CreatedAt: createdAt.Format(time.RFC3339), } if v, ok := v.(volume.DetailedVolume); ok { tv.Labels = v.Labels() @@ -112,6 +116,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo for _, m := range c.MountPoints { cp := &volume.MountPoint{ + Type: m.Type, Name: m.Name, Source: m.Source, RW: m.RW && volume.ReadWrite(mode), @@ -239,48 +244,125 @@ func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPo return nil } -func backportMountSpec(container *container.Container) error { - for target, m := range container.MountPoints { - if m.Spec.Type != "" { - // if type is set on even one mount, no need to migrate - return nil - } - if m.Name != "" { - m.Type = mounttypes.TypeVolume - m.Spec.Type = mounttypes.TypeVolume +// backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13 +// mount configurations +// The container lock should not be held when calling this function. +// Changes are only made in-memory and may make changes to containers referenced +// by `container.HostConfig.VolumesFrom` +func (daemon *Daemon) backportMountSpec(container *container.Container) { + container.Lock() + defer container.Unlock() - // make sure this is not an anonymous volume before setting the spec source - if _, exists := container.Config.Volumes[target]; !exists { - m.Spec.Source = m.Name - } - if container.HostConfig.VolumeDriver != "" { - m.Spec.VolumeOptions = &mounttypes.VolumeOptions{ - DriverConfig: &mounttypes.Driver{Name: container.HostConfig.VolumeDriver}, - } - } - if strings.Contains(m.Mode, "nocopy") { - if m.Spec.VolumeOptions == nil { - m.Spec.VolumeOptions = &mounttypes.VolumeOptions{} - } - m.Spec.VolumeOptions.NoCopy = true - } - } else { - m.Type = mounttypes.TypeBind - m.Spec.Type = mounttypes.TypeBind - m.Spec.Source = m.Source - if m.Propagation != "" { - m.Spec.BindOptions = &mounttypes.BindOptions{ - Propagation: m.Propagation, - } - } - } - - m.Spec.Target = m.Destination - if !m.RW { - m.Spec.ReadOnly = true + maybeUpdate := make(map[string]bool) + for _, mp := range container.MountPoints { + if mp.Spec.Source != "" && mp.Type != "" { + continue } + maybeUpdate[mp.Destination] = true + } + if len(maybeUpdate) == 0 { + return + } + + mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts)) + for _, m := range container.HostConfig.Mounts { + mountSpecs[m.Target] = true + } + + binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds)) + for _, rawSpec := range container.HostConfig.Binds { + mp, err := volume.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) + if err != nil { + logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport") + continue + } + binds[mp.Destination] = mp + } + + volumesFrom := make(map[string]volume.MountPoint) + for _, fromSpec := range container.HostConfig.VolumesFrom { + from, _, err := volume.ParseVolumesFrom(fromSpec) + if err != nil { + logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport") + continue + } + fromC, err := daemon.GetContainer(from) + if err != nil { + logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container") + continue + } + + // make sure from container's specs have been backported + daemon.backportMountSpec(fromC) + + fromC.Lock() + for t, mp := range fromC.MountPoints { + volumesFrom[t] = *mp + } + fromC.Unlock() + } + + needsUpdate := func(containerMount, other *volume.MountPoint) bool { + if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) { + return true + } + return false + } + + // main + for _, cm := range container.MountPoints { + if !maybeUpdate[cm.Destination] { + continue + } + // nothing to backport if from hostconfig.Mounts + if mountSpecs[cm.Destination] { + continue + } + + if mp, exists := binds[cm.Destination]; exists { + if needsUpdate(cm, mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type + } + continue + } + + if cm.Name != "" { + if mp, exists := volumesFrom[cm.Destination]; exists { + if needsUpdate(cm, &mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type + } + continue + } + + if cm.Type != "" { + // probably specified via the hostconfig.Mounts + continue + } + + // anon volume + cm.Type = mounttypes.TypeVolume + cm.Spec.Type = mounttypes.TypeVolume + } else { + if cm.Type != "" { + // already updated + continue + } + + cm.Type = mounttypes.TypeBind + cm.Spec.Type = mounttypes.TypeBind + cm.Spec.Source = cm.Source + if cm.Propagation != "" { + cm.Spec.BindOptions = &mounttypes.BindOptions{ + Propagation: cm.Propagation, + } + } + } + + cm.Spec.Target = cm.Destination + cm.Spec.ReadOnly = !cm.RW } - return container.ToDiskLocking() } func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error { diff --git a/fn/vendor/github.com/docker/docker/daemon/volumes_unix.go b/fn/vendor/github.com/docker/docker/daemon/volumes_unix.go index 29dffa9ea..0a4cbf849 100644 --- a/fn/vendor/github.com/docker/docker/daemon/volumes_unix.go +++ b/fn/vendor/github.com/docker/docker/daemon/volumes_unix.go @@ -6,6 +6,7 @@ package daemon import ( "encoding/json" + "fmt" "os" "path/filepath" "sort" @@ -42,8 +43,18 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, er if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { return nil, err } - rootUID, rootGID := daemon.GetRemappedUIDGID() - path, err := m.Setup(c.MountLabel, rootUID, rootGID) + // If the daemon is being shutdown, we should not let a container start if it is trying to + // mount the socket the daemon is listening on. During daemon shutdown, the socket + // (/var/run/docker.sock by default) doesn't exist anymore causing the call to m.Setup to + // create at directory instead. This in turn will prevent the daemon to restart. + checkfunc := func(m *volume.MountPoint) error { + if _, exist := daemon.hosts[m.Source]; exist && daemon.IsShuttingDown() { + return fmt.Errorf("Could not mount %q to container while the daemon is shutting down", m.Source) + } + return nil + } + + path, err := m.Setup(c.MountLabel, daemon.idMappings.RootPair(), checkfunc) if err != nil { return nil, err } @@ -73,9 +84,9 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, er // if we are going to mount any of the network files from container // metadata, the ownership must be set properly for potential container // remapped root (user namespaces) - rootUID, rootGID := daemon.GetRemappedUIDGID() + rootIDs := daemon.idMappings.RootPair() for _, mount := range netMounts { - if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { + if err := os.Chown(mount.Source, rootIDs.UID, rootIDs.GID); err != nil { return nil, err } } @@ -126,6 +137,9 @@ func migrateVolume(id, vfs string) error { // verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. // It reads the container configuration and creates valid mount points for the old volumes. func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + container.Lock() + defer container.Unlock() + // Inspect old structures only when we're upgrading from old versions // to versions >= 1.7 and the MountPoints has not been populated with volumes data. type volumes struct { @@ -166,7 +180,6 @@ func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { container.MountPoints[destination] = &m } } - return container.ToDisk() } return nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/volumes_unix_test.go b/fn/vendor/github.com/docker/docker/daemon/volumes_unix_test.go new file mode 100644 index 000000000..3a81eeeb7 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/daemon/volumes_unix_test.go @@ -0,0 +1,256 @@ +// +build !windows + +package daemon + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +func TestBackportMountSpec(t *testing.T) { + d := Daemon{containers: container.NewMemoryStore()} + + c := &container.Container{ + State: &container.State{}, + MountPoints: map[string]*volume.MountPoint{ + "/apple": {Destination: "/apple", Source: "/var/lib/docker/volumes/12345678", Name: "12345678", RW: true, CopyData: true}, // anonymous volume + "/banana": {Destination: "/banana", Source: "/var/lib/docker/volumes/data", Name: "data", RW: true, CopyData: true}, // named volume + "/cherry": {Destination: "/cherry", Source: "/var/lib/docker/volumes/data", Name: "data", CopyData: true}, // RO named volume + "/dates": {Destination: "/dates", Source: "/var/lib/docker/volumes/data", Name: "data"}, // named volume nocopy + "/elderberry": {Destination: "/elderberry", Source: "/var/lib/docker/volumes/data", Name: "data"}, // masks anon vol + "/fig": {Destination: "/fig", Source: "/data", RW: true}, // RW bind + "/guava": {Destination: "/guava", Source: "/data", RW: false, Propagation: "shared"}, // RO bind + propagation + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, // volumes-from + + // partially configured mountpoint due to #32613 + // specifically, `mp.Spec.Source` is not set + "/honeydew": { + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/honeydew", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, + }, + + // from hostconfig.Mounts + "/jambolan": { + Type: mounttypes.TypeVolume, + Destination: "/jambolan", + Source: "/var/lib/docker/volumes/data", + RW: true, + Name: "data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/jambolan", Source: "data"}, + }, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/banana", + "data:/cherry:ro", + "data:/dates:ro,nocopy", + "data:/elderberry:ro,nocopy", + "/data:/fig", + "/data:/guava:ro,shared", + "data:/honeydew:nocopy", + }, + VolumesFrom: []string{"1:ro"}, + Mounts: []mounttypes.Mount{ + {Type: mounttypes.TypeVolume, Target: "/jambolan"}, + }, + }, + Config: &containertypes.Config{Volumes: map[string]struct{}{ + "/apple": {}, + "/elderberry": {}, + }}, + } + + d.containers.Add("1", &container.Container{ + State: &container.State{}, + ID: "1", + MountPoints: map[string]*volume.MountPoint{ + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/kumquat:ro", + }, + }, + }) + + type expected struct { + mp *volume.MountPoint + comment string + } + + pretty := func(mp *volume.MountPoint) string { + b, err := json.MarshalIndent(mp, "\t", " ") + if err != nil { + return fmt.Sprintf("%#v", mp) + } + return string(b) + } + + for _, x := range []expected{ + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/apple", + RW: true, + Name: "12345678", + Source: "/var/lib/docker/volumes/12345678", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "", + Target: "/apple", + }, + }, + comment: "anonymous volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/banana", + RW: true, + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/banana", + }, + }, + comment: "named volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/cherry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/cherry", + ReadOnly: true, + }, + }, + comment: "read-only named volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/dates", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/dates", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "named volume with nocopy", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/elderberry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/elderberry", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "masks an anonymous volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/fig", + Source: "/data", + RW: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/fig", + }, + }, + comment: "bind mount with read/write", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/guava", + Source: "/data", + RW: false, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/guava", + ReadOnly: true, + BindOptions: &mounttypes.BindOptions{Propagation: "shared"}, + }, + }, + comment: "bind mount with read/write + shared propagation", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Source: "/var/lib/docker/volumes/data", + RW: true, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/honeydew", + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + { + mp: &(*c.MountPoints["/jambolan"]), // copy the mountpoint, expect no changes + comment: "volume defined in mounts API", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/kumquat", + Source: "/var/lib/docker/volumes/data", + RW: false, + Name: "data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/kumquat", + ReadOnly: true, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + } { + + mp := c.MountPoints[x.mp.Destination] + d.backportMountSpec(c) + + if !reflect.DeepEqual(mp.Spec, x.mp.Spec) { + t.Fatalf("%s\nexpected:\n\t%s\n\ngot:\n\t%s", x.comment, pretty(x.mp), pretty(mp)) + } + } +} diff --git a/fn/vendor/github.com/docker/docker/daemon/volumes_windows.go b/fn/vendor/github.com/docker/docker/daemon/volumes_windows.go index b43de47b6..62c9e23ac 100644 --- a/fn/vendor/github.com/docker/docker/daemon/volumes_windows.go +++ b/fn/vendor/github.com/docker/docker/daemon/volumes_windows.go @@ -6,6 +6,7 @@ import ( "sort" "github.com/docker/docker/container" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/volume" ) @@ -24,7 +25,7 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, er if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { return nil, err } - s, err := mount.Setup(c.MountLabel, 0, 0) + s, err := mount.Setup(c.MountLabel, idtools.IDPair{0, 0}, nil) if err != nil { return nil, err } diff --git a/fn/vendor/github.com/docker/docker/daemon/wait.go b/fn/vendor/github.com/docker/docker/daemon/wait.go index 2dab22e99..76c16b9ef 100644 --- a/fn/vendor/github.com/docker/docker/daemon/wait.go +++ b/fn/vendor/github.com/docker/docker/daemon/wait.go @@ -1,32 +1,22 @@ package daemon import ( - "time" - + "github.com/docker/docker/container" "golang.org/x/net/context" ) -// ContainerWait stops processing until the given container is -// stopped. If the container is not found, an error is returned. On a -// successful stop, the exit code of the container is returned. On a -// timeout, an error is returned. If you want to wait forever, supply -// a negative duration for the timeout. -func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { - container, err := daemon.GetContainer(name) +// ContainerWait waits until the given container is in a certain state +// indicated by the given condition. If the container is not found, a nil +// channel and non-nil error is returned immediately. If the container is +// found, a status result will be sent on the returned channel once the wait +// condition is met or if an error occurs waiting for the container (such as a +// context timeout or cancellation). On a successful wait, the exit code of the +// container is returned in the status with a non-nil Err() value. +func (daemon *Daemon) ContainerWait(ctx context.Context, name string, condition container.WaitCondition) (<-chan container.StateStatus, error) { + cntr, err := daemon.GetContainer(name) if err != nil { - return -1, err + return nil, err } - return container.WaitStop(timeout) -} - -// ContainerWaitWithContext returns a channel where exit code is sent -// when container stops. Channel can be cancelled with a context. -func (daemon *Daemon) ContainerWaitWithContext(ctx context.Context, name string) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - return container.WaitWithContext(ctx) + return cntr.Wait(ctx, condition), nil } diff --git a/fn/vendor/github.com/docker/docker/daemon/workdir.go b/fn/vendor/github.com/docker/docker/daemon/workdir.go index 99a2a8ea5..6360f2413 100644 --- a/fn/vendor/github.com/docker/docker/daemon/workdir.go +++ b/fn/vendor/github.com/docker/docker/daemon/workdir.go @@ -16,6 +16,5 @@ func (daemon *Daemon) ContainerCreateWorkdir(cID string) error { return err } defer daemon.Unmount(container) - rootUID, rootGID := daemon.GetRemappedUIDGID() - return container.SetupWorkingDirectory(rootUID, rootGID) + return container.SetupWorkingDirectory(daemon.idMappings.RootPair()) } diff --git a/fn/vendor/github.com/docker/docker/distribution/config.go b/fn/vendor/github.com/docker/docker/distribution/config.go index f24678d0e..1c10533f6 100644 --- a/fn/vendor/github.com/docker/docker/distribution/config.go +++ b/fn/vendor/github.com/docker/docker/distribution/config.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/libtrust" @@ -58,6 +59,9 @@ type ImagePullConfig struct { // Schema2Types is the valid schema2 configuration types allowed // by the pull operation. Schema2Types []string + // Platform is the requested platform of the image being pulled to ensure it can be validated + // when the host platform supports multiple image operating systems. + Platform string } // ImagePushConfig stores push configuration. @@ -82,7 +86,7 @@ type ImagePushConfig struct { type ImageConfigStore interface { Put([]byte) (digest.Digest, error) Get(digest.Digest) ([]byte, error) - RootFSFromConfig([]byte) (*image.RootFS, error) + RootFSAndPlatformFromConfig([]byte) (*image.RootFS, layer.Platform, error) } // PushLayerProvider provides layers to be pushed by ChainID. @@ -108,7 +112,7 @@ type RootFSDownloadManager interface { // returns the final rootfs. // Given progress output to track download progress // Returns function to release download resources - Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) + Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) } type imageConfigStore struct { @@ -136,21 +140,25 @@ func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { return img.RawJSON(), nil } -func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { +func (s *imageConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { var unmarshalledConfig image.Image if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { - return nil, err + return nil, "", err } // fail immediately on Windows when downloading a non-Windows image - // and vice versa - if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" { - return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + // and vice versa. Exception on Windows if Linux Containers are enabled. + if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" && !system.LCOWSupported() { + return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) } else if runtime.GOOS != "windows" && unmarshalledConfig.OS == "windows" { - return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) } - return unmarshalledConfig.RootFS, nil + platform := "" + if runtime.GOOS == "windows" { + platform = unmarshalledConfig.OS + } + return unmarshalledConfig.RootFS, layer.Platform(platform), nil } type storeLayerProvider struct { diff --git a/fn/vendor/github.com/docker/docker/distribution/errors.go b/fn/vendor/github.com/docker/docker/distribution/errors.go index adf272a5c..7f97c1d5e 100644 --- a/fn/vendor/github.com/docker/docker/distribution/errors.go +++ b/fn/vendor/github.com/docker/docker/distribution/errors.go @@ -78,7 +78,7 @@ func TranslatePullError(err error, ref reference.Named) error { switch v.Code { case errcode.ErrorCodeDenied: // ErrorCodeDenied is used when access to the repository was denied - newErr = errors.Errorf("repository %s not found: does not exist or no pull access", reference.FamiliarName(ref)) + newErr = errors.Errorf("pull access denied for %s, repository does not exist or may require 'docker login'", reference.FamiliarName(ref)) case v2.ErrorCodeManifestUnknown: newErr = errors.Errorf("manifest for %s not found", reference.FamiliarString(ref)) case v2.ErrorCodeNameUnknown: diff --git a/fn/vendor/github.com/docker/docker/distribution/metadata/metadata.go b/fn/vendor/github.com/docker/docker/distribution/metadata/metadata.go index 05ba4f817..3dae79555 100644 --- a/fn/vendor/github.com/docker/docker/distribution/metadata/metadata.go +++ b/fn/vendor/github.com/docker/docker/distribution/metadata/metadata.go @@ -26,15 +26,17 @@ type Store interface { type FSMetadataStore struct { sync.RWMutex basePath string + platform string } // NewFSMetadataStore creates a new filesystem-based metadata store. -func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { +func NewFSMetadataStore(basePath, platform string) (*FSMetadataStore, error) { if err := os.MkdirAll(basePath, 0700); err != nil { return nil, err } return &FSMetadataStore{ basePath: basePath, + platform: platform, }, nil } diff --git a/fn/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go b/fn/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go index 556886581..385901ec4 100644 --- a/fn/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go +++ b/fn/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go @@ -3,6 +3,7 @@ package metadata import ( "io/ioutil" "os" + "runtime" "testing" "github.com/docker/docker/layer" @@ -15,7 +16,7 @@ func TestV1IDService(t *testing.T) { } defer os.RemoveAll(tmpDir) - metadataStore, err := NewFSMetadataStore(tmpDir) + metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) if err != nil { t.Fatalf("could not create metadata store: %v", err) } diff --git a/fn/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/fn/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go index c31c7214e..7524f63ce 100644 --- a/fn/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go +++ b/fn/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go @@ -203,7 +203,7 @@ func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, me return serv.Add(diffID, meta) } -// Remove unassociates a metadata entry from a layer DiffID. +// Remove disassociates a metadata entry from a layer DiffID. func (serv *v2MetadataService) Remove(metadata V2Metadata) error { if serv.store == nil { // Support a service which has no backend storage, in this case diff --git a/fn/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go b/fn/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go index 8e3e4614c..b5d59b229 100644 --- a/fn/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go +++ b/fn/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go @@ -6,6 +6,7 @@ import ( "math/rand" "os" "reflect" + "runtime" "testing" "github.com/docker/docker/layer" @@ -19,7 +20,7 @@ func TestV2MetadataService(t *testing.T) { } defer os.RemoveAll(tmpDir) - metadataStore, err := NewFSMetadataStore(tmpDir) + metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) if err != nil { t.Fatalf("could not create metadata store: %v", err) } diff --git a/fn/vendor/github.com/docker/docker/distribution/pull_v1.go b/fn/vendor/github.com/docker/docker/distribution/pull_v1.go index d873d338c..7151a7584 100644 --- a/fn/vendor/github.com/docker/docker/distribution/pull_v1.go +++ b/fn/vendor/github.com/docker/docker/distribution/pull_v1.go @@ -232,7 +232,7 @@ func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNa } rootFS := image.NewRootFS() - resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, "", descriptors, p.config.ProgressOutput) if err != nil { return err } diff --git a/fn/vendor/github.com/docker/docker/distribution/pull_v2.go b/fn/vendor/github.com/docker/docker/distribution/pull_v2.go index 93760a399..50257f5cb 100644 --- a/fn/vendor/github.com/docker/docker/distribution/pull_v2.go +++ b/fn/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -27,6 +27,7 @@ import ( "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" @@ -131,6 +132,7 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (e type v2LayerDescriptor struct { digest digest.Digest + diffID layer.DiffID repoInfo *registry.RepositoryInfo repo distribution.Repository V2MetadataService metadata.V2MetadataService @@ -148,6 +150,9 @@ func (ld *v2LayerDescriptor) ID() string { } func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + if ld.diffID != "" { + return ld.diffID, nil + } return ld.V2MetadataService.GetDiffID(ld.digest) } @@ -330,18 +335,18 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat manifest distribution.Manifest tagOrDigest string // Used for logging/progress only ) - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) - if err != nil { - return false, allowV1Fallback(err) - } - tagOrDigest = tagged.Tag() - } else if digested, isDigested := ref.(reference.Canonical); isDigested { + if digested, isDigested := ref.(reference.Canonical); isDigested { manifest, err = manSvc.Get(ctx, digested.Digest()) if err != nil { return false, err } tagOrDigest = digested.Digest().String() + } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() } else { return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) } @@ -482,7 +487,26 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverif descriptors = append(descriptors, layerDescriptor) } - resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + // The v1 manifest itself doesn't directly contain a platform. However, + // the history does, but unfortunately that's a string, so search through + // all the history until hopefully we find one which indicates the os. + platform := runtime.GOOS + if system.LCOWSupported() { + type config struct { + Os string `json:"os,omitempty"` + } + for _, v := range verifiedManifest.History { + var c config + if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil { + if c.Os != "" { + platform = c.Os + break + } + } + } + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, layer.Platform(platform), descriptors, p.config.ProgressOutput) if err != nil { return "", "", err } @@ -552,10 +576,11 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s }() var ( - configJSON []byte // raw serialized image config - downloadedRootFS *image.RootFS // rootFS from registered layers - configRootFS *image.RootFS // rootFS from configuration - release func() // release resources from rootFS download + configJSON []byte // raw serialized image config + downloadedRootFS *image.RootFS // rootFS from registered layers + configRootFS *image.RootFS // rootFS from configuration + release func() // release resources from rootFS download + platform layer.Platform // for LCOW when registering downloaded layers ) // https://github.com/docker/docker/issues/24766 - Err on the side of caution, @@ -567,7 +592,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s // check to block Windows images being pulled on Linux is implemented, it // may be necessary to perform the same type of serialisation. if runtime.GOOS == "windows" { - configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + configJSON, configRootFS, platform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err != nil { return "", "", err } @@ -575,6 +600,16 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s if configRootFS == nil { return "", "", errRootFSInvalid } + + if len(descriptors) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + // Populate diff ids in descriptors to avoid downloading foreign layers + // which have been side loaded + for i := range descriptors { + descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] + } } if p.config.DownloadManager != nil { @@ -584,7 +619,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s rootFS image.RootFS ) downloadRootFS := *image.NewRootFS() - rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) + rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, platform, descriptors, p.config.ProgressOutput) if err != nil { // Intentionally do not cancel the config download here // as the error from config download (if there is one) @@ -602,7 +637,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s } if configJSON == nil { - configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err == nil && configRootFS == nil { err = errRootFSInvalid } @@ -649,16 +684,16 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s return imageID, manifestDigest, nil } -func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, error) { +func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, layer.Platform, error) { select { case configJSON := <-configChan: - rootfs, err := s.RootFSFromConfig(configJSON) + rootfs, platform, err := s.RootFSAndPlatformFromConfig(configJSON) if err != nil { - return nil, nil, err + return nil, nil, "", err } - return configJSON, rootfs, nil + return configJSON, rootfs, platform, nil case err := <-errChan: - return nil, nil, err + return nil, nil, "", err // Don't need a case for ctx.Done in the select because cancellation // will trigger an error in p.pullSchema2ImageConfig. } diff --git a/fn/vendor/github.com/docker/docker/distribution/pull_v2_windows.go b/fn/vendor/github.com/docker/docker/distribution/pull_v2_windows.go index aefed8660..543ecc10e 100644 --- a/fn/vendor/github.com/docker/docker/distribution/pull_v2_windows.go +++ b/fn/vendor/github.com/docker/docker/distribution/pull_v2_windows.go @@ -23,20 +23,28 @@ func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { } func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + rsc, err := blobs.Open(ctx, ld.digest) + if len(ld.src.URLs) == 0 { - blobs := ld.repo.Blobs(ctx) - return blobs.Open(ctx, ld.digest) + return rsc, err } - var ( - err error - rsc distribution.ReadSeekCloser - ) + // We're done if the registry has this blob. + if err == nil { + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + if _, err = rsc.Seek(0, os.SEEK_SET); err == nil { + return rsc, nil + } + rsc.Close() + } // Find the first URL that results in a 200 result code. for _, url := range ld.src.URLs { logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) + + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. _, err = rsc.Seek(0, os.SEEK_SET) if err == nil { break diff --git a/fn/vendor/github.com/docker/docker/distribution/push_v2.go b/fn/vendor/github.com/docker/docker/distribution/push_v2.go index d89416d2d..ffc7d6810 100644 --- a/fn/vendor/github.com/docker/docker/distribution/push_v2.go +++ b/fn/vendor/github.com/docker/docker/distribution/push_v2.go @@ -118,7 +118,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) } - rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) + rootfs, _, err := p.config.ImageStore.RootFSAndPlatformFromConfig(imgConfig) if err != nil { return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) } @@ -141,12 +141,13 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id hmacKey: hmacKey, repoInfo: p.repoInfo.Name, ref: p.ref, + endpoint: p.endpoint, repo: p.repo, pushState: &p.pushState, } // Loop bounds condition is to avoid pushing the base layer on Windows. - for i := 0; i < len(rootfs.DiffIDs); i++ { + for range rootfs.DiffIDs { descriptor := descriptorTemplate descriptor.layer = l descriptor.checkedDigests = make(map[digest.Digest]struct{}) @@ -239,6 +240,7 @@ type v2PushDescriptor struct { hmacKey []byte repoInfo reference.Named ref reference.Named + endpoint registry.APIEndpoint repo distribution.Repository pushState *pushState remoteDescriptor distribution.Descriptor @@ -259,10 +261,13 @@ func (pd *v2PushDescriptor) DiffID() layer.DiffID { } func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { - if fs, ok := pd.layer.(distribution.Describable); ok { - if d := fs.Descriptor(); len(d.URLs) > 0 { - progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") - return d, nil + // Skip foreign layers unless this registry allows nondistributable artifacts. + if !pd.endpoint.AllowNondistributableArtifacts { + if fs, ok := pd.layer.(distribution.Describable); ok { + if d := fs.Descriptor(); len(d.URLs) > 0 { + progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") + return d, nil + } } } @@ -415,6 +420,10 @@ func (pd *v2PushDescriptor) uploadUsingSession( var reader io.ReadCloser contentReader, err := pd.layer.Open() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + size, _ := pd.layer.Size() reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") diff --git a/fn/vendor/github.com/docker/docker/distribution/push_v2_test.go b/fn/vendor/github.com/docker/docker/distribution/push_v2_test.go index e8e2b1af2..99f5acb0c 100644 --- a/fn/vendor/github.com/docker/docker/distribution/push_v2_test.go +++ b/fn/vendor/github.com/docker/docker/distribution/push_v2_test.go @@ -185,7 +185,7 @@ func TestLayerAlreadyExists(t *testing.T) { expectedRequests: []string{"apple"}, }, { - name: "not matching reposies", + name: "not matching repositories", targetRepo: "busybox", maxExistenceChecks: 3, metadata: []metadata.V2Metadata{ diff --git a/fn/vendor/github.com/docker/docker/distribution/registry_unit_test.go b/fn/vendor/github.com/docker/docker/distribution/registry_unit_test.go index ebe6ecad9..910061f45 100644 --- a/fn/vendor/github.com/docker/docker/distribution/registry_unit_test.go +++ b/fn/vendor/github.com/docker/docker/distribution/registry_unit_test.go @@ -152,7 +152,7 @@ func testDirectory(templateDir string) (dir string, err error) { return } if templateDir != "" { - if err = archive.CopyWithTar(templateDir, dir); err != nil { + if err = archive.NewDefaultArchiver().CopyWithTar(templateDir, dir); err != nil { return } } diff --git a/fn/vendor/github.com/docker/docker/distribution/utils/progress.go b/fn/vendor/github.com/docker/docker/distribution/utils/progress.go index ef8ecc89f..cc3632a53 100644 --- a/fn/vendor/github.com/docker/docker/distribution/utils/progress.go +++ b/fn/vendor/github.com/docker/docker/distribution/utils/progress.go @@ -14,7 +14,7 @@ import ( // WriteDistributionProgress is a helper for writing progress from chan to JSON // stream with an optional cancel function. func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { - progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) + progressOutput := streamformatter.NewJSONProgressOutput(outStream, false) operationCancelled := false for prog := range progressChan { diff --git a/fn/vendor/github.com/docker/docker/distribution/xfer/download.go b/fn/vendor/github.com/docker/docker/distribution/xfer/download.go index 8bd48646d..6769ee1cd 100644 --- a/fn/vendor/github.com/docker/docker/distribution/xfer/download.go +++ b/fn/vendor/github.com/docker/docker/distribution/xfer/download.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "runtime" "time" "github.com/Sirupsen/logrus" @@ -22,7 +23,7 @@ const maxDownloadAttempts = 5 // registers and downloads those, taking into account dependencies between // layers. type LayerDownloadManager struct { - layerStore layer.Store + layerStores map[string]layer.Store tm TransferManager waitDuration time.Duration } @@ -33,9 +34,9 @@ func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { } // NewLayerDownloadManager returns a new LayerDownloadManager. -func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager { +func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager { manager := LayerDownloadManager{ - layerStore: layerStore, + layerStores: layerStores, tm: NewTransferManager(concurrencyLimit), waitDuration: time.Second, } @@ -94,7 +95,7 @@ type DownloadDescriptorWithRegistered interface { // Download method is called to get the layer tar data. Layers are then // registered in the appropriate order. The caller must call the returned // release function once it is done with the returned RootFS object. -func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { var ( topLayer layer.Layer topDownload *downloadTransfer @@ -104,6 +105,11 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima downloadsByKey = make(map[string]*downloadTransfer) ) + // Assume that the platform is the host OS if blank + if platform == "" { + platform = layer.Platform(runtime.GOOS) + } + rootFS := initialRootFS for _, descriptor := range layers { key := descriptor.Key() @@ -115,13 +121,13 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima if err == nil { getRootFS := rootFS getRootFS.Append(diffID) - l, err := ldm.layerStore.Get(getRootFS.ChainID()) + l, err := ldm.layerStores[string(platform)].Get(getRootFS.ChainID()) if err == nil { // Layer already exists. logrus.Debugf("Layer already exists: %s", descriptor.ID()) progress.Update(progressOutput, descriptor.ID(), "Already exists") if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) + layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) } topLayer = l missingLayer = false @@ -140,7 +146,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima // the stack? If so, avoid downloading it more than once. var topDownloadUncasted Transfer if existingDownload, ok := downloadsByKey[key]; ok { - xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload, platform) defer topDownload.Transfer.Release(watcher) topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) topDownload = topDownloadUncasted.(*downloadTransfer) @@ -152,10 +158,10 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima var xferFunc DoFunc if topDownload != nil { - xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload, platform) defer topDownload.Transfer.Release(watcher) } else { - xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil, platform) } topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) topDownload = topDownloadUncasted.(*downloadTransfer) @@ -165,7 +171,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima if topDownload == nil { return rootFS, func() { if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) + layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) } }, nil } @@ -176,7 +182,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima defer func() { if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) + layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) } }() @@ -212,11 +218,11 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima // complete before the registration step, and registers the downloaded data // on top of parentDownload's resulting layer. Otherwise, it registers the // layer on top of the ChainID given by parentLayer. -func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, platform layer.Platform) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { d := &downloadTransfer{ Transfer: NewTransfer(), - layerStore: ldm.layerStore, + layerStore: ldm.layerStores[string(platform)], } go func() { @@ -335,9 +341,9 @@ func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, src = fs.Descriptor() } if ds, ok := d.layerStore.(layer.DescribableStore); ok { - d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) + d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, platform, src) } else { - d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer, platform) } if err != nil { select { @@ -376,11 +382,11 @@ func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, // parentDownload. This function does not log progress output because it would // interfere with the progress reporting for sourceDownload, which has the same // Key. -func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, platform layer.Platform) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { d := &downloadTransfer{ Transfer: NewTransfer(), - layerStore: ldm.layerStore, + layerStore: ldm.layerStores[string(platform)], } go func() { @@ -434,9 +440,9 @@ func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor Downloa src = fs.Descriptor() } if ds, ok := d.layerStore.(layer.DescribableStore); ok { - d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src) + d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, platform, src) } else { - d.layer, err = d.layerStore.Register(layerReader, parentLayer) + d.layer, err = d.layerStore.Register(layerReader, parentLayer, platform) } if err != nil { d.err = fmt.Errorf("failed to register layer: %v", err) diff --git a/fn/vendor/github.com/docker/docker/distribution/xfer/download_test.go b/fn/vendor/github.com/docker/docker/distribution/xfer/download_test.go index 69323bb86..e5aba02e3 100644 --- a/fn/vendor/github.com/docker/docker/distribution/xfer/download_test.go +++ b/fn/vendor/github.com/docker/docker/distribution/xfer/download_test.go @@ -26,6 +26,7 @@ type mockLayer struct { diffID layer.DiffID chainID layer.ChainID parent layer.Layer + platform layer.Platform } func (ml *mockLayer) TarStream() (io.ReadCloser, error) { @@ -56,6 +57,10 @@ func (ml *mockLayer) DiffSize() (size int64, err error) { return 0, nil } +func (ml *mockLayer) Platform() layer.Platform { + return ml.platform +} + func (ml *mockLayer) Metadata() (map[string]string, error) { return make(map[string]string), nil } @@ -86,7 +91,7 @@ func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer { return layers } -func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) { +func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID, platform layer.Platform) (layer.Layer, error) { return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{}) } @@ -267,7 +272,9 @@ func TestSuccessfulDownload(t *testing.T) { } layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} - ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) + lsMap := make(map[string]layer.Store) + lsMap[runtime.GOOS] = layerStore + ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) @@ -286,13 +293,13 @@ func TestSuccessfulDownload(t *testing.T) { firstDescriptor := descriptors[0].(*mockDownloadDescriptor) // Pre-register the first layer to simulate an already-existing layer - l, err := layerStore.Register(firstDescriptor.mockTarStream(), "") + l, err := layerStore.Register(firstDescriptor.mockTarStream(), "", layer.Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } firstDescriptor.diffID = l.DiffID() - rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), layer.Platform(runtime.GOOS), descriptors, progress.ChanOutput(progressChan)) if err != nil { t.Fatalf("download error: %v", err) } @@ -328,7 +335,10 @@ func TestSuccessfulDownload(t *testing.T) { } func TestCancelledDownload(t *testing.T) { - ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} + lsMap := make(map[string]layer.Store) + lsMap[runtime.GOOS] = layerStore + ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) @@ -347,7 +357,7 @@ func TestCancelledDownload(t *testing.T) { }() descriptors := downloadDescriptors(nil) - _, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + _, _, err := ldm.Download(ctx, *image.NewRootFS(), layer.Platform(runtime.GOOS), descriptors, progress.ChanOutput(progressChan)) if err != context.Canceled { t.Fatal("expected download to be cancelled") } diff --git a/fn/vendor/github.com/docker/docker/dockerversion/useragent.go b/fn/vendor/github.com/docker/docker/dockerversion/useragent.go index 53632cbc3..c02d0fda1 100644 --- a/fn/vendor/github.com/docker/docker/dockerversion/useragent.go +++ b/fn/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -52,8 +52,8 @@ func escapeStr(s string, charsToEscape string) string { var ret string for _, currRune := range s { appended := false - for _, escapeableRune := range charsToEscape { - if currRune == escapeableRune { + for _, escapableRune := range charsToEscape { + if currRune == escapableRune { ret += `\` + string(currRune) appended = true break diff --git a/fn/vendor/github.com/docker/docker/docs/README.md b/fn/vendor/github.com/docker/docker/docs/README.md deleted file mode 100644 index da9309307..000000000 --- a/fn/vendor/github.com/docker/docker/docs/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# The non-reference docs have been moved! - - - -The documentation for Docker Engine has been merged into -[the general documentation repo](https://github.com/docker/docker.github.io). - -See the [README](https://github.com/docker/docker.github.io/blob/master/README.md) -for instructions on contributing to and building the documentation. - -If you'd like to edit the current published version of the Engine docs, -do it in the master branch here: -https://github.com/docker/docker.github.io/tree/master/engine - -If you need to document the functionality of an upcoming Engine release, -use the `vnext-engine` branch: -https://github.com/docker/docker.github.io/tree/vnext-engine/engine - -The reference docs have been left in docker/docker (this repo), which remains -the place to edit them. - -The docs in the general repo are open-source and we appreciate -your feedback and pull requests! diff --git a/fn/vendor/github.com/docker/docker/docs/api/v1.21.md b/fn/vendor/github.com/docker/docker/docs/api/v1.21.md index cb72b6811..1d42fd0ec 100644 --- a/fn/vendor/github.com/docker/docker/docs/api/v1.21.md +++ b/fn/vendor/github.com/docker/docker/docs/api/v1.21.md @@ -2739,7 +2739,9 @@ Content-Type: application/json #### Inspect network -`GET /networks/` +`GET /networks/(id or name)` + +Return low-level information on the network `id` **Example request**: @@ -2787,6 +2789,7 @@ Content-Type: application/json - **200** - no error - **404** - network not found +- **500** - server error #### Create a network @@ -2853,7 +2856,7 @@ Content-Type: application/json #### Connect a container to a network -`POST /networks/(id)/connect` +`POST /networks/(id or name)/connect` Connect a container to a network @@ -2884,7 +2887,7 @@ Content-Type: application/json #### Disconnect a container from a network -`POST /networks/(id)/disconnect` +`POST /networks/(id or name)/disconnect` Disconnect a container from a network @@ -2915,7 +2918,7 @@ Content-Type: application/json #### Remove a network -`DELETE /networks/(id)` +`DELETE /networks/(id or name)` Instruct the driver to remove the network (`id`). @@ -2930,6 +2933,7 @@ Instruct the driver to remove the network (`id`). **Status codes**: - **200** - no error +- **403** - operation not supported for pre-defined networks - **404** - no such network - **500** - server error diff --git a/fn/vendor/github.com/docker/docker/docs/api/v1.22.md b/fn/vendor/github.com/docker/docker/docs/api/v1.22.md index 0c5a06127..9bf64b7e9 100644 --- a/fn/vendor/github.com/docker/docker/docs/api/v1.22.md +++ b/fn/vendor/github.com/docker/docker/docs/api/v1.22.md @@ -3055,7 +3055,9 @@ Content-Type: application/json #### Inspect network -`GET /networks/` +`GET /networks/(id or name)` + +Return low-level information on the network `id` **Example request**: @@ -3108,6 +3110,7 @@ Content-Type: application/json - **200** - no error - **404** - network not found +- **500** - server error #### Create a network @@ -3183,7 +3186,7 @@ Content-Type: application/json #### Connect a container to a network -`POST /networks/(id)/connect` +`POST /networks/(id or name)/connect` Connect a container to a network @@ -3220,7 +3223,7 @@ Content-Type: application/json #### Disconnect a container from a network -`POST /networks/(id)/disconnect` +`POST /networks/(id or name)/disconnect` Disconnect a container from a network @@ -3253,7 +3256,7 @@ Content-Type: application/json #### Remove a network -`DELETE /networks/(id)` +`DELETE /networks/(id or name)` Instruct the driver to remove the network (`id`). @@ -3268,6 +3271,7 @@ Instruct the driver to remove the network (`id`). **Status codes**: - **200** - no error +- **403** - operation not supported for pre-defined networks - **404** - no such network - **500** - server error diff --git a/fn/vendor/github.com/docker/docker/docs/api/v1.23.md b/fn/vendor/github.com/docker/docker/docs/api/v1.23.md index a5006eb33..508a721c7 100644 --- a/fn/vendor/github.com/docker/docker/docs/api/v1.23.md +++ b/fn/vendor/github.com/docker/docker/docs/api/v1.23.md @@ -3150,7 +3150,9 @@ Content-Type: application/json #### Inspect network -`GET /networks/` +`GET /networks/(id or name)` + +Return low-level information on the network `id` **Example request**: @@ -3209,6 +3211,7 @@ Content-Type: application/json - **200** - no error - **404** - network not found +- **500** - server error #### Create a network @@ -3300,7 +3303,7 @@ Content-Type: application/json #### Connect a container to a network -`POST /networks/(id)/connect` +`POST /networks/(id or name)/connect` Connect a container to a network @@ -3337,7 +3340,7 @@ Content-Type: application/json #### Disconnect a container from a network -`POST /networks/(id)/disconnect` +`POST /networks/(id or name)/disconnect` Disconnect a container from a network @@ -3370,7 +3373,7 @@ Content-Type: application/json #### Remove a network -`DELETE /networks/(id)` +`DELETE /networks/(id or name)` Instruct the driver to remove the network (`id`). @@ -3385,6 +3388,7 @@ Instruct the driver to remove the network (`id`). **Status codes**: - **204** - no error +- **403** - operation not supported for pre-defined networks - **404** - no such network - **500** - server error diff --git a/fn/vendor/github.com/docker/docker/docs/api/v1.24.md b/fn/vendor/github.com/docker/docker/docs/api/v1.24.md index b7586c93e..d07ea84b3 100644 --- a/fn/vendor/github.com/docker/docker/docs/api/v1.24.md +++ b/fn/vendor/github.com/docker/docker/docs/api/v1.24.md @@ -285,7 +285,8 @@ Create a container "Test": ["CMD-SHELL", "curl localhost:3000"], "Interval": 1000000000, "Timeout": 10000000000, - "Retries": 10 + "Retries": 10, + "StartPeriod": 60000000000 }, "WorkingDir": "", "NetworkDisabled": false, @@ -397,9 +398,10 @@ Create a container + `{"NONE"}` disable healthcheck + `{"CMD", args...}` exec arguments directly + `{"CMD-SHELL", command}` run command with system's default shell - - **Interval** - The time to wait between checks in nanoseconds. It should be 0 or not less than 1000000000(1s). 0 means inherit. - - **Timeout** - The time to wait before considering the check to have hung. It should be 0 or not less than 1000000000(1s). 0 means inherit. + - **Interval** - The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + - **Timeout** - The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. - **Retries** - The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. + - **StartPeriod** - The time to wait for container initialization before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. - **WorkingDir** - A string specifying the working directory for commands to run in. - **NetworkDisabled** - Boolean value, when true disables networking for the @@ -824,7 +826,7 @@ Get `stdout` and `stderr` logs from the container ``id`` **Query parameters**: -- **details** - 1/True/true or 0/False/flase, Show extra details provided to logs. Default `false`. +- **details** - 1/True/true or 0/False/false, Show extra details provided to logs. Default `false`. - **follow** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. - **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. @@ -3214,7 +3216,9 @@ Content-Type: application/json #### Inspect network -`GET /networks/` +`GET /networks/(id or name)` + +Return low-level information on the network `id` **Example request**: @@ -3273,6 +3277,7 @@ Content-Type: application/json - **200** - no error - **404** - network not found +- **500** - server error #### Create a network @@ -3365,7 +3370,7 @@ Content-Type: application/json #### Connect a container to a network -`POST /networks/(id)/connect` +`POST /networks/(id or name)/connect` Connect a container to a network @@ -3403,7 +3408,7 @@ Content-Type: application/json #### Disconnect a container from a network -`POST /networks/(id)/disconnect` +`POST /networks/(id or name)/disconnect` Disconnect a container from a network @@ -3437,7 +3442,7 @@ Content-Type: application/json #### Remove a network -`DELETE /networks/(id)` +`DELETE /networks/(id or name)` Instruct the driver to remove the network (`id`). @@ -3452,6 +3457,7 @@ Instruct the driver to remove the network (`id`). **Status codes**: - **204** - no error +- **403** - operation not supported for pre-defined networks - **404** - no such network - **500** - server error diff --git a/fn/vendor/github.com/docker/docker/docs/api/version-history.md b/fn/vendor/github.com/docker/docker/docs/api/version-history.md index d5a24e3dd..4a9dddbfd 100644 --- a/fn/vendor/github.com/docker/docker/docs/api/version-history.md +++ b/fn/vendor/github.com/docker/docker/docs/api/version-history.md @@ -13,17 +13,51 @@ keywords: "API, Docker, rcli, REST, documentation" will be rejected. --> +## v1.31 API changes + +[Docker Engine API v1.31](https://docs.docker.com/engine/api/v1.31/) documentation + +* `DELETE /secrets/(name)` now returns status code 404 instead of 500 when the secret does not exist. +* `POST /secrets/create` now returns status code 409 instead of 500 when creating an already existing secret. +* `POST /secrets/(name)/update` now returns status code 400 instead of 500 when updating a secret's content which is not the labels. +* `POST /nodes/(name)/update` now returns status code 400 instead of 500 when demoting last node fails. +* `GET /networks/(id or name)` now takes an optional query parameter `scope` that will filter the network based on the scope (`local`, `swarm`, or `global`). +* `POST /session` is a new endpoint that can be used for running interactive long-running protocols between client and + the daemon. This endpoint is experimental and only available if the daemon is started with experimental features + enabled. +* `GET /images/(name)/get` now includes an `ImageMetadata` field which contains image metadata that is local to the engine and not part of the image config. +* `POST /swarm/init` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic +* `POST /swarm/join` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic +* `POST /services/create` now accepts a `PluginSpec` when `TaskTemplate.Runtime` is set to `plugin` + +## v1.30 API changes + +[Docker Engine API v1.30](https://docs.docker.com/engine/api/v1.30/) documentation + +* `GET /info` now returns the list of supported logging drivers, including plugins. +* `GET /info` and `GET /swarm` now returns the cluster-wide swarm CA info if the node is in a swarm: the cluster root CA certificate, and the cluster TLS + leaf certificate issuer's subject and public key. It also displays the desired CA signing certificate, if any was provided as part of the spec. +* `POST /build/` now (when not silent) produces an `Aux` message in the JSON output stream with payload `types.BuildResult` for each image produced. The final such message will reference the image resulting from the build. +* `GET /nodes` and `GET /nodes/{id}` now returns additional information about swarm TLS info if the node is part of a swarm: the trusted root CA, and the + issuer's subject and public key. +* `GET /distribution/(name)/json` is a new endpoint that returns a JSON output stream with payload `types.DistributionInspect` for an image name. It includes a descriptor with the digest, and supported platforms retrieved from directly contacting the registry. +* `POST /swarm/update` now accepts 3 additional parameters as part of the swarm spec's CA configuration; the desired CA certificate for + the swarm, the desired CA key for the swarm (if not using an external certificate), and an optional parameter to force swarm to + generate and rotate to a new CA certificate/key pair. +* `POST /service/create` and `POST /services/(id or name)/update` now take the field `Platforms` as part of the service `Placement`, allowing to specify platforms supported by the service. +* `POST /containers/(name)/wait` now accepts a `condition` query parameter to indicate which state change condition to wait for. Also, response headers are now returned immediately to acknowledge that the server has registered a wait callback for the client. + ## v1.29 API changes [Docker Engine API v1.29](https://docs.docker.com/engine/api/v1.29/) documentation - * `DELETE /networks/(name)` now allows to remove the ingress network, the one used to provide the routing-mesh. * `POST /networks/create` now supports creating the ingress network, by specifying an `Ingress` boolean field. As of now this is supported only when using the overlay network driver. * `GET /networks/(name)` now returns an `Ingress` field showing whether the network is the ingress one. * `GET /networks/` now supports a `scope` filter to filter networks based on the network mode (`swarm`, `global`, or `local`). * `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass. -* `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output. +* `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output. +* `POST /containers/prune`, `POST /images/prune`, `POST /volumes/prune`, and `POST /networks/prune` now support a `label` filter to filter containers, images, volumes, or networks based on the label. The format of the label filter could be `label=`/`label==` to remove those with the specified labels, or `label!=`/`label!==` to remove those without the specified labels. ## v1.28 API changes diff --git a/fn/vendor/github.com/docker/docker/docs/deprecated.md b/fn/vendor/github.com/docker/docker/docs/deprecated.md deleted file mode 100644 index 7e0bfc0a6..000000000 --- a/fn/vendor/github.com/docker/docker/docs/deprecated.md +++ /dev/null @@ -1,321 +0,0 @@ ---- -aliases: ["/engine/misc/deprecated/"] -title: "Deprecated Engine Features" -description: "Deprecated Features." -keywords: "docker, documentation, about, technology, deprecate" ---- - - - -# Deprecated Engine Features - -The following list of features are deprecated in Engine. -To learn more about Docker Engine's deprecation policy, -see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy). - -### Asynchronous `service create` and `service update` - -**Deprecated In Release: v17.05.0** - -**Disabled by default in release: v17.09** - -Docker 17.05.0 added an optional `--detach=false` option to make the -`docker service create` and `docker service update` work synchronously. This -option will be enable by default in Docker 17.09, at which point the `--detach` -flag can be used to use the previous (asynchronous) behavior. - -### `-g` and `--graph` flags on `dockerd` - -**Deprecated In Release: v17.05.0** - -The `-g` or `--graph` flag for the `dockerd` or `docker daemon` command was -used to indicate the directory in which to store persistent data and resource -configuration and has been replaced with the more descriptive `--data-root` -flag. - -These flags were added before Docker 1.0, so will not be _removed_, only -_hidden_, to discourage their use. - -### Top-level network properties in NetworkSettings - -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v17.12** - -When inspecting a container, `NetworkSettings` contains top-level information -about the default ("bridge") network; - -`EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, -`IPPrefixLen`, `IPv6Gateway`, and `MacAddress`. - -These properties are deprecated in favor of per-network properties in -`NetworkSettings.Networks`. These properties were already "deprecated" in -docker 1.9, but kept around for backward compatibility. - -Refer to [#17538](https://github.com/docker/docker/pull/17538) for further -information. - -### `filter` param for `/images/json` endpoint -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v17.12** - -The `filter` param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`. - -### `repository:shortid` image references -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v17.12** - -`repository:shortid` syntax for referencing images is very little used, collides with tag references can be confused with digest references. - -### `docker daemon` subcommand -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v17.12** - -The daemon is moved to a separate binary (`dockerd`), and should be used instead. - -### Duplicate keys with conflicting values in engine labels -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v17.12** - -Duplicate keys with conflicting values have been deprecated. A warning is displayed -in the output, and an error will be returned in the future. - -### `MAINTAINER` in Dockerfile -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -`MAINTAINER` was an early very limited form of `LABEL` which should be used instead. - -### API calls without a version -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v17.12** - -API versions should be supplied to all API calls to ensure compatibility with -future Engine versions. Instead of just requesting, for example, the URL -`/containers/json`, you must now request `/v1.25/containers/json`. - -### Backing filesystem without `d_type` support for overlay/overlay2 -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v17.12** - -The overlay and overlay2 storage driver does not work as expected if the backing -filesystem does not support `d_type`. For example, XFS does not support `d_type` -if it is formatted with the `ftype=0` option. - -Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for -further information. - -### Three arguments form in `docker import` -**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no more supported. - -### `-h` shorthand for `--help` - -**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -**Target For Removal In Release: v17.09** - -The shorthand (`-h`) is less common than `--help` on Linux and cannot be used -on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on -`docker create`). For this reason, the `-h` shorthand was not printed in the -"usage" output of subcommands, nor documented, and is now marked "deprecated". - -### `-e` and `--email` flags on `docker login` -**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** - -**Target For Removal In Release: v17.06** - -The docker login command is removing the ability to automatically register for an account with the target registry if the given username doesn't exist. Due to this change, the email flag is no longer required, and will be deprecated. - -### Separator (`:`) of `--security-opt` flag on `docker run` -**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** - -**Target For Removal In Release: v17.06** - -The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide keys and values, it uses the equal symbol(`=`) for consistency with other similar flags, like `--storage-opt`. - -### `/containers/(id or name)/copy` endpoint - -**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`. - -### Ambiguous event fields in API -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -The fields `ID`, `Status` and `From` in the events API have been deprecated in favor of a more rich structure. -See the events API documentation for the new format. - -### `-f` flag on `docker tag` -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use. - -### HostConfig at API container start -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of -defining it at container creation (`POST /containers/create`). - -### `--before` and `--since` flags on `docker ps` - -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -The `docker ps --before` and `docker ps --since` options are deprecated. -Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead. - -### `--automated` and `--stars` flags on `docker search` - -**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -**Target For Removal In Release: v17.09** - -The `docker search --automated` and `docker search --stars` options are deprecated. -Use `docker search --filter=is-automated=...` and `docker search --filter=stars=...` instead. - -### Driver Specific Log Tags -**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -Log tags are now generated in a standard way across different logging drivers. -Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and -`fluentd-tag` have been deprecated in favor of the generic `tag` option. - -```bash -{% raw %} -docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" -{% endraw %} -``` - -### LXC built-in exec driver -**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** - -**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -The built-in LXC execution driver, the lxc-conf flag, and API fields have been removed. - -### Old Command Line Options -**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** - -**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand: - - docker daemon -H ... - -The following single-dash (`-opt`) variant of certain command line options -are deprecated and replaced with double-dash options (`--opt`): - - docker attach -nostdin - docker attach -sig-proxy - docker build -no-cache - docker build -rm - docker commit -author - docker commit -run - docker events -since - docker history -notrunc - docker images -notrunc - docker inspect -format - docker ps -beforeId - docker ps -notrunc - docker ps -sinceId - docker rm -link - docker run -cidfile - docker run -dns - docker run -entrypoint - docker run -expose - docker run -link - docker run -lxc-conf - docker run -n - docker run -privileged - docker run -volumes-from - docker search -notrunc - docker search -stars - docker search -t - docker search -trusted - docker tag -force - -The following double-dash options are deprecated and have no replacement: - - docker run --cpuset - docker run --networking - docker ps --since-id - docker ps --before-id - docker search --trusted - -**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -The single-dash (`-help`) was removed, in favor of the double-dash `--help` - - docker -help - docker [COMMAND] -help - -### `--run` flag on docker commit - -**Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)** - -**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor -of the `--changes` flag that allows to pass `Dockerfile` commands. - - -### Interacting with V1 registries - -**Disabled By Default In Release: v17.06** - -**Target For Removal In Release: v17.12** - -Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the -docker daemon from `pull`, `push`, and `login` operations against v1 -registries. Though enabled by default, this signals the intent to deprecate -the v1 protocol. - -Support for the v1 protocol to the public registry was removed in 1.13. Any -mirror configurations using v1 should be updated to use a -[v2 registry mirror](https://docs.docker.com/registry/recipes/mirror/). - -### Docker Content Trust ENV passphrase variables name change -**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables - -- DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE is now named DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE -- DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE is now named DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE - -### `--api-enable-cors` flag on dockerd - -**Deprecated In Release: [v1.6.0](https://github.com/docker/docker/releases/tag/v1.6.0)** - -**Target For Removal In Release: v17.09** - -The flag `--api-enable-cors` is deprecated since v1.6.0. Use the flag -`--api-cors-header` instead. diff --git a/fn/vendor/github.com/docker/docker/docs/extend/EBS_volume.md b/fn/vendor/github.com/docker/docker/docs/extend/EBS_volume.md deleted file mode 100644 index 8c64efa16..000000000 --- a/fn/vendor/github.com/docker/docker/docs/extend/EBS_volume.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -description: Volume plugin for Amazon EBS -keywords: "API, Usage, plugins, documentation, developer, amazon, ebs, rexray, volume" -title: Volume plugin for Amazon EBS ---- - - - -# A proof-of-concept Rexray plugin - -In this example, a simple Rexray plugin will be created for the purposes of using -it on an Amazon EC2 instance with EBS. It is not meant to be a complete Rexray plugin. - -The example source is available at [https://github.com/tiborvass/rexray-plugin](https://github.com/tiborvass/rexray-plugin). - -To learn more about Rexray: [https://github.com/codedellemc/rexray](https://github.com/codedellemc/rexray) - -## 1. Make a Docker image - -The following is the Dockerfile used to containerize rexray. - -```Dockerfile -FROM debian:jessie -RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates -RUN wget https://dl.bintray.com/emccode/rexray/stable/0.6.4/rexray-Linux-x86_64-0.6.4.tar.gz -O rexray.tar.gz && tar -xvzf rexray.tar.gz -C /usr/bin && rm rexray.tar.gz -RUN mkdir -p /run/docker/plugins /var/lib/libstorage/volumes -ENTRYPOINT ["rexray"] -CMD ["--help"] -``` - -To build it you can run `image=$(cat Dockerfile | docker build -q -)` and `$image` -will reference the containerized rexray image. - -## 2. Extract rootfs - -```sh -$ TMPDIR=/tmp/rexray # for the purpose of this example -$ # create container without running it, to extract the rootfs from image -$ docker create --name rexray "$image" -$ # save the rootfs to a tar archive -$ docker export -o $TMPDIR/rexray.tar rexray -$ # extract rootfs from tar archive to a rootfs folder -$ ( mkdir -p $TMPDIR/rootfs; cd $TMPDIR/rootfs; tar xf ../rexray.tar ) -``` - -## 3. Add plugin configuration - -We have to put the following JSON to `$TMPDIR/config.json`: - -```json -{ - "Args": { - "Description": "", - "Name": "", - "Settable": null, - "Value": null - }, - "Description": "A proof-of-concept EBS plugin (using rexray) for Docker", - "Documentation": "https://github.com/tiborvass/rexray-plugin", - "Entrypoint": [ - "/usr/bin/rexray", "service", "start", "-f" - ], - "Env": [ - { - "Description": "", - "Name": "REXRAY_SERVICE", - "Settable": [ - "value" - ], - "Value": "ebs" - }, - { - "Description": "", - "Name": "EBS_ACCESSKEY", - "Settable": [ - "value" - ], - "Value": "" - }, - { - "Description": "", - "Name": "EBS_SECRETKEY", - "Settable": [ - "value" - ], - "Value": "" - } - ], - "Interface": { - "Socket": "rexray.sock", - "Types": [ - "docker.volumedriver/1.0" - ] - }, - "Linux": { - "AllowAllDevices": true, - "Capabilities": ["CAP_SYS_ADMIN"], - "Devices": null - }, - "Mounts": [ - { - "Source": "/dev", - "Destination": "/dev", - "Type": "bind", - "Options": ["rbind"] - } - ], - "Network": { - "Type": "host" - }, - "PropagatedMount": "/var/lib/libstorage/volumes", - "User": {}, - "WorkDir": "" -} -``` - -Please note a couple of points: -- `PropagatedMount` is needed so that the docker daemon can see mounts done by the -rexray plugin from within the container, otherwise the docker daemon is not able -to mount a docker volume. -- The rexray plugin needs dynamic access to host devices. For that reason, we -have to give it access to all devices under `/dev` and set `AllowAllDevices` to -true for proper access. -- The user of this simple plugin can change only 3 settings: `REXRAY_SERVICE`, -`EBS_ACCESSKEY` and `EBS_SECRETKEY`. This is because of the reduced scope of this -plugin. Ideally other rexray parameters could also be set. - -## 4. Create plugin - -`docker plugin create tiborvass/rexray-plugin "$TMPDIR"` will create the plugin. - -```sh -$ docker plugin ls -ID NAME DESCRIPTION ENABLED -2475a4bd0ca5 tiborvass/rexray-plugin:latest A rexray volume plugin for Docker false -``` - -## 5. Test plugin - -```sh -$ docker plugin set tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY` -$ docker plugin enable tiborvass/rexray-plugin -$ docker volume create -d tiborvass/rexray-plugin my-ebs-volume -$ docker volume ls -DRIVER VOLUME NAME -tiborvass/rexray-plugin:latest my-ebs-volume -$ docker run --rm -v my-ebs-volume:/volume busybox sh -c 'echo bye > /volume/hi' -$ docker run --rm -v my-ebs-volume:/volume busybox cat /volume/hi -bye -``` - -## 6. Push plugin - -First, ensure you are logged in with `docker login`. Then you can run: -`docker plugin push tiborvass/rexray-plugin` to push it like a regular docker -image to a registry, to make it available for others to install via -`docker plugin install tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY`. diff --git a/fn/vendor/github.com/docker/docker/docs/extend/config.md b/fn/vendor/github.com/docker/docker/docs/extend/config.md deleted file mode 100644 index 4feb6bf31..000000000 --- a/fn/vendor/github.com/docker/docker/docs/extend/config.md +++ /dev/null @@ -1,236 +0,0 @@ ---- -title: "Plugin config" -description: "How develop and use a plugin with the managed plugin system" -keywords: "API, Usage, plugins, documentation, developer" ---- - - - - -# Plugin Config Version 1 of Plugin V2 - -This document outlines the format of the V0 plugin configuration. The plugin -config described herein was introduced in the Docker daemon in the [v1.12.0 -release](https://github.com/docker/docker/commit/f37117045c5398fd3dca8016ea8ca0cb47e7312b). - -Plugin configs describe the various constituents of a docker plugin. Plugin -configs can be serialized to JSON format with the following media types: - -Config Type | Media Type -------------- | ------------- -config | "application/vnd.docker.plugin.v1+json" - - -## *Config* Field Descriptions - -Config provides the base accessible fields for working with V0 plugin format - in the registry. - -- **`description`** *string* - - description of the plugin - -- **`documentation`** *string* - - link to the documentation about the plugin - -- **`interface`** *PluginInterface* - - interface implemented by the plugins, struct consisting of the following fields - - - **`types`** *string array* - - types indicate what interface(s) the plugin currently implements. - - currently supported: - - - **docker.volumedriver/1.0** - - - **docker.networkdriver/1.0** - - - **docker.ipamdriver/1.0** - - - **docker.authz/1.0** - - - **docker.logdriver/1.0** - - - **`socket`** *string* - - socket is the name of the socket the engine should use to communicate with the plugins. - the socket will be created in `/run/docker/plugins`. - - -- **`entrypoint`** *string array* - - entrypoint of the plugin, see [`ENTRYPOINT`](../reference/builder.md#entrypoint) - -- **`workdir`** *string* - - workdir of the plugin, see [`WORKDIR`](../reference/builder.md#workdir) - -- **`network`** *PluginNetwork* - - network of the plugin, struct consisting of the following fields - - - **`type`** *string* - - network type. - - currently supported: - - - **bridge** - - **host** - - **none** - -- **`mounts`** *PluginMount array* - - mount of the plugin, struct consisting of the following fields, see [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts) - - - **`name`** *string* - - name of the mount. - - - **`description`** *string* - - description of the mount. - - - **`source`** *string* - - source of the mount. - - - **`destination`** *string* - - destination of the mount. - - - **`type`** *string* - - mount type. - - - **`options`** *string array* - - options of the mount. - -- **`ipchost`** *boolean* - Access to host ipc namespace. -- **`pidhost`** *boolean* - Access to host pid namespace. - -- **`propagatedMount`** *string* - - path to be mounted as rshared, so that mounts under that path are visible to docker. This is useful for volume plugins. - This path will be bind-mounted outisde of the plugin rootfs so it's contents - are preserved on upgrade. - -- **`env`** *PluginEnv array* - - env of the plugin, struct consisting of the following fields - - - **`name`** *string* - - name of the env. - - - **`description`** *string* - - description of the env. - - - **`value`** *string* - - value of the env. - -- **`args`** *PluginArgs* - - args of the plugin, struct consisting of the following fields - - - **`name`** *string* - - name of the args. - - - **`description`** *string* - - description of the args. - - - **`value`** *string array* - - values of the args. - -- **`linux`** *PluginLinux* - - - **`capabilities`** *string array* - - capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security) - - - **`allowAllDevices`** *boolean* - - If `/dev` is bind mounted from the host, and allowAllDevices is set to true, the plugin will have `rwm` access to all devices on the host. - - - **`devices`** *PluginDevice array* - - device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices) - - - **`name`** *string* - - name of the device. - - - **`description`** *string* - - description of the device. - - - **`path`** *string* - - path of the device. - -## Example Config - -*Example showing the 'tiborvass/sample-volume-plugin' plugin config.* - -```json -{ - "Args": { - "Description": "", - "Name": "", - "Settable": null, - "Value": null - }, - "Description": "A sample volume plugin for Docker", - "Documentation": "https://docs.docker.com/engine/extend/plugins/", - "Entrypoint": [ - "/usr/bin/sample-volume-plugin", - "/data" - ], - "Env": [ - { - "Description": "", - "Name": "DEBUG", - "Settable": [ - "value" - ], - "Value": "0" - } - ], - "Interface": { - "Socket": "plugin.sock", - "Types": [ - "docker.volumedriver/1.0" - ] - }, - "Linux": { - "Capabilities": null, - "AllowAllDevices": false, - "Devices": null - }, - "Mounts": null, - "Network": { - "Type": "" - }, - "PropagatedMount": "/data", - "User": {}, - "Workdir": "" -} -``` diff --git a/fn/vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png b/fn/vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png deleted file mode 100644 index 1a6a6d01d2048fcb975b7d2b025cdfabd4c4f5f3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45916 zcmd43bx@UE)He)x5RM?JfJle5lt_rQlp>N6($cM@(x9NyQUU^sbT>#NA)(SC4bm-w zNPg=w?&o>uo%!bb^P4&MJ;U}~wXeO`TEAH5*`3>RXU|Zc!N9;cs~|6?8mEj#}{hMznYINBu?&oe6G$A5M8Mr ztp2%?yOZk~NktGSO(E}t`um}Kj_Q>uvLRE#Vv_j#d5(dNzJ`Sk$N2q`=ttiPm#IZt z{P`34KQ{W{zd!!}_0JMA*zHq|{>*G_?Q1M*>9Dh#VM&LRcFg&rSx zqV8j@3jHsWhM6dpm8o)fUw9v_7<@n2sL#1iD7KdpOnpr=U%!@JyZC-ld^Fc=n`(Vf zM)_vzcntLu*&9zJ`cVqiE^F_shw^Vnd;OY=O}q7q&(Hq!q}M^-d*1oauSeN#GtfA# zk7;ij$t%$jUG;sZpjTY`IUxJC_u*EcN>L!KmzLE~zAaVT!R<_nr_a?GDsE&_@Fi0) zP6ZiKT(Jn3mgzU2sByQuS+|h%I9TZ5=Z4@6OjFa=YRMOo=jYgFK8Dl#&*oRJKRBG@ z_I8}AvEDoSIXO@!_2i5&pJ~T8x6MWzY6I`1vQ&e5FZPXG?Gj3b_pTduQ9oglMuoy& zCkj8zwnkoMpS!nhmFGdZ$xQRJ(B*-gYj@1vJ1tQaX2DAceSOW9(*3ISU{|5!cH?dv zt+%?PzS}hR?0d05y!T=k_|2RWuepBSI^6zp?fH3*`3A?ij%ezK^Ig0h-Pc%@lbn96 zj`T%{dwa>naB5Q#iXRr5w1rX=e8P0zWwDv48NsZ6LB_Q#pDb3(sGKAk`rdg(gp5-w zoc&h>qa4-vd*_GWI%B^?2tPE_xU{U2DtlIg`{84{qUgQ#VN{3O#X6@YRZ|m~C5;oz z&gl<<*Df2><)&uZ(j6Yn&FAaieo{^4_T0Wj_Aoi_AbG-jDJ?cpaXVJIGlomUYtLZ3 z(8xd4X{mSFTTob7doH8%1DPk$TFmvwE<2JshbOe6Zrwd;H_7GJ$Ev7d(LNOY{*f`% z`*=I~_U=>zZNAGodH(rc8>fqld3JaPHqKth<O{E%xZw@1v&Si_61$gmMUD=>Y9uD`AKSVZ);YO=~aOVtu2P{+n;3-nzPj= zq^!!r-LRq5JxbRW?`9|`52jQ4bsHROR#xT2suU(rD{t+Dw9#JN`J^&**M3L3o1ONk zTgD);yZhJ9VvqEIL@bVD@X41emc5x_p?8b#E6Dj`VXKRb5&m!w+Q%cmOmXgT&T7cZ zNWHl&&e>YFZ^qct{jSbU-k)lV* zTNPn%C^&YXL{>-d4t5*X^Y{~RrhF+$*>*De@*=JBezPuz6q>?|e%P3k1dY-sr|Py^FNW@{3|+gd_x@e} z!J}b1>N=H074qPY#Tp0x?nch=&9Fk3H65ID0(Di(d&jsWN6G;>XZP$z%RlkjjR`Rl z2ESk2{L!a;alv{ZdrqswEQGy8JxjIK{o!M+kd08K6j68qcEbTZ5%ouF_MvQQ=|Wb6 zxt59cJ#U}n$J5In&e=ansD5+-yLx2noWZ2qVUgodzFz3hN$<(b6*cASb9>)AxzE0_ ziw^U$cx$tX>V&N)a=aDhok?jyQ#tWYfs{G!OQP^><-vOO*^6h$FRu>QKCYN;4Y^2g zNK1cN3^p(y>!OFpj$HLFUMfR6wSn#Qi@bsIUVMLBJhk(|dIn>kJkO<;zhqI?Wc*4& zJhWzeT)+L3#)e|btZms-c6)1}?M~GB1E;#i2Nho@%;t|puZ_{+cV=axNydA|Y*z+y z78*j()BRa$trh76)FMKX-Gq*~q(>oWhX|JDhKbt8A$#>ly9x1pjRM=Jx;t>6o!2Fx z5;8pGf7565aesejr6Xyh{$v8P(U@SiElhC6`+Gaf{7nLHnyW<4B4bq#km9;|J0cNy}-r55^xow;zzj zB^tk6P=EYhjaTzm9BuqY?aw4NeIcI7;7>IWvyRQ6T#jTPRI$fAio%&1dy5V6QJO8T~s7rs(4oNw&7KK${cDNFc4aJ^?&w&$A} zyRzC(u>4)C=$OU%YA)UgN&aZl6f&wI>PDEp@kOIUS;FE6x^? z-S~TE@uMFv@6~x|6VQleex4pjn|8)%9PFvRce0GGcVzIP<|v9pD1^HXtjq1S1ybu8n#%bC+FPfSngyMl&03W^N?PvBi+_( z5*&97vHIivoQ^cO&)c5N(ogZfckzFrU^Z09wcc!@-GaCu@0uBURMexU-S{0-&HjyQ znqsTz_G8Pgs|z0^J_My62;2^MXq&M>p(Y1UV|1#mYa@@C+>{(TpmwMCq z*VVL^o*2&2thSTo{AB(%Yfp>US|?G3GddBmn40vpX~&~qbEGRlP`R-8G&d&9L4wXY zo=TDXwwWbtI-?AIzaUaK;g=cYSEh}ucNJZKPN24>hglm+{pZ@|Q)qMd6VK~9Y&Txh zxFdM9@ z&)XYnrlw-9@T%2^Jh$YqysOeWL}geGVU|=Hzw^FO8RDbcbMkHx822u;FTZf2CbH-i zGVAc*Z25%Qfl*F=#)y|*hC~HSRFUv-Ic_O|2z6cQEC2bl`Y4?$oP=%O4V(u^+{Lys z;2HC`w0C$WLo2ZTvz&H-n55=)nWX<|D|IH5*5HoYH9rlz;&`8a8YwXs+}%pQd&pRQ zAbPwv8_92ed#6>LN$BBxaK!BmNLw-+l)BqW1cYkTc9V5^6ZI)!MPccZ+oEC{LKW79oZM>U8E(-R)DA}A5 zIV7#!Y{DysP}?Z|9s(#k-jRgl<3)$#L-$&18|7p%+f|b`E02t3Nt!rp&N|7e(+?*d zS@J!$Dw;l(t=^$NDDKe}Zv5V!C|uIET04?<5Sh_DJx^&>h1L zIK8w?y%dESx5y$JrRU*tPJv>**U>kBLa{q)bVNdrhM~;VGxiEHj57X`bX_E?XaEb7 zrHgQm*88w0j=eV`Z`iN+((Oc6H?Hq3FUYaWy|!A&t-ikVI@)KyDf({b!7`rYaalX3Equj=e#2 zyz3Re>5s*p@5culoxvCQQO2n0c9vvy-EUjNCSmPwZ@jp_|6^%6dUe^alQ!6R(TBXG zanhva`S}hxspKJD2x4WU6xZWK-HX4AOVmNFs_xuP=!CCq6=6sw3z6|nFs-=$?#5)n z3xaksMm}7_zDh?ENnuEQC0A_kC$+UDFztMr8+V;Ne|^S?j3%yFssUFqS6^ zTLv&bd8wSDQPeSDcf`>B=s?lDqWnxib+rg=vg(@!U$m>fXf2t%DOMg`+8DH3TPqmP zntAd`Z&0bVZbBMM*{tT{t;fA4ujrZ}y=3cuOZ$>gq1EE6!69<5_R+waj!O*==1ts) z!J3Zw;UW_P&N;q?XP%rlTkv`c?Y(&8&+DEhq)B8qcqpuem3wO0uEjwPC_vi)r*fh42NZj=S&#j7%PinPJBl)_@ zTY1AlqP2Oeg&l1*Bq(kUr?ZcqVCsLqPoOoP`YPkyJBrxCbNb0`FP4dNAC&3V^RO?N zwdv~a8C#d-N%fG~Wg8}bZyg_#+o^u#B<1>(m*c15>#&u<4$oFA+7e@f%t~Ebd;W(8 zKQG^PnHDYogo}WRypKNR|HTv+B{qp?mqo3<}UUF>%+W3}v!+a0E z$^ZKHQn}wkCW7Hcl=MX^MWZhZ$=>-k{ktgy_O&}Pl)S$7vQN&OZ_e#E=-heL(}b=HqOsc@BV32x_V&wbFhQ`mPk z(E}?lLnGg1h1_>=^sY#CnFih1^?@EJAk=68m1rJlv9504ugKVTv9?!s*%wJhmCbUf z*4%5`c+lXD^&xOxgJnxq++b%9iqdGhhC<`^NapCmKFdNBxfhdwRXe?IUm8Lm{%u_bi0U2D8)xEB_HOGbkY`631^n91a87 z8ok*X92KA66`tuMj-!0&X{7yw)2#guL-S0kK~s$B8P;{3oo2j+!2$zrH*-7>olOS* zy&S{+(_(^?-Y3^vC=cmOpwG3 zQ3{p&Z33Z!>!3O7P!{k_2L4E6kJ%SS$X$XgRZlr15C_8>7F(Q@9Wu zL6VWDt@ih#gq0_iVr? zg7^pCZpg5WWZ(yS^2_&v(6b{$#&2KQSl{Q|aUPYzFEaW;kLJB&Cijm}GkgtG5kR^Sh!qLnC{QPZ2t{N-GUd=Eb)r^9}DN>495(<;>G?qAQ1$#TmS`Y z3FF^aMpAg&$FEt0KmGR$M5Yjz7xPRc(J51x|Cqf&I7Z97_1u30^p_+gNsLpvLjTc$ z4I-FlNLISy{QeElHN#tx!jBsH`=T+>S}gG1USClAyUsDt0deqVgM9g}{e5qAm@6n7 zHu<`fZ*Et8!H3A_dzTLp0 zXX!4rR2x{JdAwQ_KDV}>)B~GGkCkLYsn~!tMUxzU zID~`?)S3%Iz0%L7>-U<3~m&gnk-Nb>Afk*2&C{j z+PTZDD!#wSs9kCifmD7m-|yci*_6RBuz+1$P)Fo>{A=OCw~xfBC+>=~D;})t7l5`* z9(p`EWZ=yuXg3xPsb1uj$?-|!lhF$MPADYjXixS6_g6xXYYNGFy{GlT+s(Xa^FYHlhMcRn}pjL;ztUac{38b`5xJzDXF`2!QRwD2UHjU!02xKa=l!|beOYEu z;l(P8AE_%%-bxl5pA}aIj`ZD~H&T8^o=^8K`KS~(`r~xMZvEB~&9R(LIVoVa#&E+B zYX8V-AWmG{w5TevdW&vSNzX3oh;G`6^R08&FJM4T2==C%0EKXbOXxBE?NQonauV^O$xeayQs;++Y`Fi{4g(R_qA@UzcjR*bqcc_)IUZh+>ROBY*q zy=N~QF`*xWk5`HH&}Ga1%!sKIT3d&y2FwaV8|7m%irNpJn3lwcRpy4MG}n!Z?2QJw zq|i&y7iLz4uNk zKYMLPiqX9%K#+Fie=W0Kv6y*OU}hlL&9oY7i+biY^@00?t`8|=^9`sIYHsl7Pq$)7 zV$%C39OH4He-b=(V27l|5_2yK4>Q%hlsT9CZ8Wu(6LCXhyTEffbo{e+HbiZ;c6Kn!ckPb`v6 z?b0hD$G1>JOY{5)5mT-A@)UO4Oo@LaWAI^9$U}GYta_kyAb|qRxas$A?W)=8Y)hjR z1wejDrYj{}0tTsm>Zu*U@ehTE9_u)A7^#f7)Yn|pbwyW+d0L1pHW!jGRjMD%Df;qr zc`S6_-Qqdf9ksts_QI0+)?MTG*nXPcD&ON8K0O()cFifd@q*0x;lNk6A9{!O9*0|P zR#6vsTGiPT9rZTK)*qM|xXLm6eip{UXP|h8tM@TkT>Pmh6@-JrsHM~}vFu*G+D95` z)}!TlB4S>Ll_Klo)fFOM5CC3(*DEr9A~Kn!#xm;x#P2{=M$)4Wp}^P37J9t_kK^bC z1oHA2;OLNtZ@!`hM(>QwDT^`6g*xHWhz=)bdbI${9XuoIb?8>G_4vo)o0+_ORjMu4 zW0ggWKp1WT0uW9EO19{^x!vrY9}x(%KQ~%vB;5wwc`#qH_p!UP7jOivRr8+`avmal zk0Ze~&}CE>Xdf-o&(=hXSu&k5X6aue4`35}`Sin}ZiSujne*)5J||*1!Y8am(GCtP6p(s z>}^8PO&_r}huXW=1oJ0KGA^zs0(@v=7OEo_+=p@f(+$#3IQw$2?cv!axE;8SY$?PH876@*NJN5K7F;DadLuIT-8jTRVqJ85cKL~!M6tEJy z5Zpt8y7h_<^;M3C-IxFoBSf0r@ds8MfI-UC3y!BV)bfDUw>-+2Zqqdl!CQmz`b9xI zbv4wy_3BGA%IQ&{zchyp&z)s7#NE_DD$8Y#M?aXT$PAvtrG!11!xxR^i>5q1;?2*`qS_B0Z52 ziQ?2&-N??XQ<$1=frpO0toOhw2XbWg?8Al6?Xc|LH8V}z^-G(@rC655)u8n)z{E-{ z_=L%D5tGB0Xet5Ygyq&+C7#1i_ zE%Z190HY^tiuWess_j>GgE*q4&FK$M;~HWw4;@cO^?29cdR2dC`h`KA$1l_g3sGy_ zXZ)*%a~F>by1W=flsQ$qPw@5=GxDtUhKH_4Jg?s_CuVZU^`nEF8fKVRo8e>6O`+77 zSY3cIiGkYlsR1UNCRKVQ!9T;BN;$N2zH}^%0JHYn-N~-0jIG|A+*z)Ji*qBJ^u2^0 z<1xOw#L90+hH#T<8n;T%NFB&F(za3!vwXVJR6?jy?eXhDwD6(O7}x68u?(Y1*U6=Mo+~~*b7w_W|O1lEpwC6ek*li+0V@pI!Vptc6@7?XQcw8#f8bmLBA8oi# z#+bc#l`z9($cOu;gKDcf9hD&Qj~|INj(wK*u+zk7pL0!wUYJi+SkIA+K7TWikoNLM&35Q_vNnge)(uKj8>bq?aR)yioT0#K z5!A$`zV)fHZv1?X$z~qz@oBBMmduOZsvMRSf}$uM@sOn}xjYgVO*&2)qr9Xj%mi_Q zW9S#{Ur(c#$ z&R4JnkB^TIIzeze^m-OvC3^kJ#?q*d@a3N$q%jy!f(yJRt*pss4p%Y!Tx4Yo_;5Jc zJX#Y(F8=cZ%thbrXRE~*O!RENS3VKg-D;oO5Q_?ZI?wZX*Jg*({)hKngL4l>*8`I< z5i2V7=&Nk7qR9^hDO_2fwRYbK?a6X8AH?0mWa@6GZNBN>Whzo%vz;8*=nP&8}XQ$w6!Siw;ZP<`+ z*gE&&ty{?yC}tZu`mN3$qm7%FYI+21hEwdWQk1ue<41`n_GA5;$Eqfp;;c6NM7bk& z#w$mlFwg^s(EfUTSOj*k&9p9gVhX*)rcZ+vJL<x!Un#tu4m5t{jkG=h0UOt6(V=zo0CZPzLcJa8`e2Qjb(rs8m#xUU2q|WAJ z60PO=_BY*n%y?v+nO@{LsoX>iZiSMK6&5{GXV9UNQ?DBP`ifhyr5f+I)3We%3n)92{igaPb0467W&o`dy%5KLz}{`G!>m+ zpm!o}8jvf!p07LhuKr;CS|gu9|0z!r-CP%+J8XCJ6Rk;T0$*XE(0=--SF(hTT=Z*l z@jY?Xyol)Bo?v)=$GZ8RS0m^kCTY3b9sBW@ZZCGCp?&YhKC%Lt}pFRpOwu z-51QdCIu`gGhWSVqpvrWm6z@n`jMo{QW%|SAw0kA;XhJ*f|BMIwX_wGB1vNByZaj# z`pOeEuoCIFW7HEr^hqX{`tbDZr;I9PtlKD?Iq8}E7YP4=U*qQ(6UEPm_zZh6C&K(9 zNcN%((-@nBex1$d)!?PPNDMHTxUowL`7+%x+K^J_feg?m>RIsRexA|=4%<)dMPFaOdV_kiwr_QLEw^IynLR}Rp++2x?;(Z6%z!8sj! zf^$)S5qtAhK;y%9;yTKIc>qUAIOpZ~i0Hq7-FFxP;KM)?VI9t&wg{N(Gk-(8kY<6a3h)*2o6iSf3DLCE9s=5EDa3?tW2WD7!bMGnA_!VHxc@M(F zqCcaqZ4rhl*PPyEJ}9-R`B`SDP*QOEX801Ll8+cu#HG*Q|06n7iGenH62S7`00T19 zz0KM*{}CPRFp*i|oB!r9T;d&KNQxKg{EzvNzX8XbCrSE``3SlKb1hG5?*1Rqkp)xv zPO6FYzkvv3vMpyg!~P>W&ciY4@^1gbeDuO>Gbkm_{l|Q~V~~)HJcCvq4rxfc8T$>; zl?(ri-9J1?KsYQJT%-LC07UnS0RyV{Y;rV%xOZnkx6F!JK|#Sz94f9<}b z_L_fA)Q)`l(iCdEtd7tpq;2DMI~Wpi#0gGdy-iH=_mZ z_1QFY^KqR+;*b+bq zK*rMPw_j(^0KF;yVBs^@TxV=7zygY`p*&p{jcoM@QTOlizy8q{<8Gs!Euh*q z48W${3r`u-E-^#PYKvmuA~t$=AHYr|z4Yk|eD{OiDJSb^Yv!?o6D!)XFTEY8pm*R< z@;fBv5m5zNN?Dt70sI?tyvoC_-@hpcoB<$DgWxTRcQTAltHUa+if<*8OyNZ`iTFA_ zBcSWu;Z}VY1aw+Y)-cduBGiF1hTvPWT0W)5`f)OWsAujxxBk5~8d0}9q14wdIW!GS z@2-tnEuZdV0fae_F;I@!UE1bP-D;zA>nzc)eZ=Rweox@}_Wnu%hwkK~6)q5fUkK*} zt#TXG-P0+xpx2SO@$L?p)bD^rM@9dv?>2^aaItCUnPxw6V-y9QJ`tS|yGAw>u(5(- zaUG59)VV)?{j5J(lR4WbglpM*m~jpKJAIY;)T*^}Xcqn9_{hk&>AoPgaDu^!KW5*m zI{4OyniA7aGG;{~RT8uAuHPZ}7VNxRJe3EuSQPXPR*7iROSsM0C>#&P)-c+FL%Gk1 z!l|>`nVaOBx&{PNnqi}#D)_Fl4C?C; ztun94*Sif?adZjWp(@x+vQ(vHU_x*3=mltqSr6u_j*uktt zOe&1#`aAEFV?MC6I8!|nKgS%A(#3~FyD4u}B-NwH6Yqr?JNy1jl?T8dT@Ze7e{A3r z!TF|aD$a!gZr2ijCEu@8BG&^{D23Lnl1X;cB-;7(-k}Q%WFY|J#bC(E`!Rp~;i%-p z$m}>?lS_pT_X&YeH=N09@%%bE?gm=(BJbY-l_b%`d2qB|y@ARRq>xfhfhq_~ZpdvO zPo>y#R(8iL$}Qh~@iETB->d!vmnoP|A@=g?M`z!AYf-#FsggSEa4S&zoBxT_Sj^!( zeyGWrt;cNmOj98~89jt+W6xtN_3AuEbjfP|2n$3QnET-Jbt+w+yV8XEzTADo=e?th9G2+s|^GX1gdW8$?xColpkpTwm(f*a%J-OeaB#)8`2 zcuw8&TM$0XD2hPz7GQ8(`zkPd4`jdlz`6CND_s=!+%I)n?pHAToM?G3`KoPv6uX8Z z#Dq(bI${yx)VkVrBL-L(1>jxNJbr!WGik+lreWl$dN3E+lPY%;?jy~j=dG*=I4_vK zR@ieRu297n#IM01+YYo()MY&#voA@u!$n|mj5Ywbs`@d|iNyUM?j{*{?nL+J>E3-X z-^B)Q%a_kcS)PBGCbD;-eDWah&-!72iDmE;E`D+g*hx^q8C}PnV-f!%B9E&sUS@7S;{MS#8`;%pi)&EidcuO z50||#Qs-H{I##7tw>N_)Nu_K`_o?dP!aE4?y>Fy(%IXapv6hAkk`FgO5GsmF_(*)C zn}FE_nd1hSeO3j&K$2v;mh3izHYJ>4B;=|T9m!hx%=KqY+=o>z^Yme}Vx#{fNJUmr zr=HCC9s~2584(~6LcwbsV5t5UOg&cbrV%&Hgqg_wk3~KYtAhko@H*SBf@~z;KR3^A_D=xy^`i zf5PwOWXOWGw0a-k7M+A4D>oIO1J-Ls{#!2$OQ)gozWVuIa#vXTu1F(W7Iq9;E6Tj{T*mWVjC-@Ufm{ET$D(Xvh}Pe;HR_u@3Ue?$nlCO_8x zoXA;T>f8lSUG~zYhe>f>O&^~TXs)e-)D%o5tjbbxM7D-CT@Ju=>1$ce73;xUz;C6< zB(H^J_`dNj? zagBGIsSGoO^M2`wu1n+laRu}P`Ng!!xI9_!Evgyq%z#M}WCZmc@0eSD`s|w!%V2~c z#UY}R+B0OwCVUIEkpjT>=!eU|9Q0rLeE4fSIxO%r9wS|xj&J1_86@X7bW;v)YhoH0FX%P(u4lmK+rdp-Qj<-a&HTXY-CI1*IiJb(GiT(Q@0Dz$3bS zDgkCATZ}^3@Aam+`@(D2cfh@htp&H?I*>i3{RAZ?7dJXgjwY8Ba*8LCQ!=>x%2ABC zo1{RcFgI9H1OYk5j1nm;V=3MZSBS9+a{J(SQn61aQ zg0iXMJ`SOx(5I$ycnPW@&k;vl(xYWIjnkSFh;C)okE*+*wd6m=rW_nVr;N*e>l31y zeII-HB`a(sUFK_>6=QI`l%s#R1Jv_h2?PJn<1eZ`QPk3?lda#$BGmgujqKkuxW$ye(fBSx*=xX6z56JGRdS-_>u%$UcO`uDDKaGXbD6|u`+zE?@- zW=ZDd5b^t7SEXRx|30r8%2*>9338xE(IMRiPzki0dq@dsnP?Ioa;Dl-yPAin!Fs4D z{!$E;mJz?#N+z(a(2*d#bIRXH;sYm=`{)d@5{uCk)TEc#F}>48KtH$f$H}n-e?9{e zIqM@66BBJNj#}U%lWGAE6_2W5nqu*V?cMo(uCq2YoT_4^mPzh{M^o!bnXb zd?Zt3o{=EI7|azf97F%+szyMH=ulcU7FNt#-glyyEM`l-!h9LgLkc6y?YcsY3xiS| zC801Ffz%dky__tU(RZSnO-qVZVNit*2C)}8`Rd@C&37LcUxmRQc;CyzebLWQJY$D|gyL%KwmnJlavmQJWt)S{{JSfBxf3%W?2W2;#*qM1bDh5|OQP(<7)s4LijB-R8w**>n9vlgxW zXt@#qRVt(=c=zE0mESwS$w;!IW;QD~|B@7PLD!NMFcO?XW&<*tA#R{n7D)=dIR zr9dRuqi_EDK4;n*d_nfmd1X)mJkU(gs?nRP&A-(B#vkdE2`7|wy%S16anvY)idws@ z45pC(FJM<$*Z!&-M_YpD`kotm@`3_g4{#bxj|dHqOrA;rZXJj^ldkc@93*6;GFmJn zplGs1anY`rsYD=9#2y{|#FLQ$H{QS^zK0;srz^nt3(P%)CJCh137=X-9rhRtQjY=S zP}$kLGzP5q^?9_fzbKv7?L;B^ftH0TOXtV#=; zi4hsTIfGa*G&H*cQ8ow!Ci^78DBmE68B8`cTK~&`De6ujz)L(?txaD-bjND}>lnkR z7PW?R#IlOzcH`^-fx+APR31vOG^>H^-U@qzS5Pe?t_gPaOuCP>n1l-=&dY8cy+ZX? z1MFe$O?600SBV2V>&G}=6VwcSe$4$bKv{KV!8L*QJZeZ z%-b&}q$E);+Q5o!2pR;aKW??0yuZj2~{ z=k@2e%={}J)Qq_P`fjRGGe29vb=)fbnoaRAt;NlSrGf2Ybp_EHqb~(R_uCtIWt8Sk z0cb2o%tp-Q$gSBnj*|%R8f5kaLt<}-h-(UkY_N0c;z=JWwU2bAehQBihcqKlxN{ngWfFr)Q-rM^E)Dt4D z*L0`8Enjvk`MoM0-n}jKStTmKpM5*{iZ4@O5>tp%J2B_~M$)L8rYc+VKAvo)dCxV3 zwLa$t18N5>-8t~sDthv^ey&V7-Sjbhm(9o^r08^7RTD9Z3l<5TZvZ`p zcuc-tb*rtXuqsJ~QUcNWR62zqGc#rg#}XLa?jO`uMRB+kH}xrCAuuBgpC_&t0FqE-LNHq7`@2h~{*UApL6>4cO8cx>>ohtb0jiuz zG`}~J=5SB>Mn#>Qwuj)arQbMl6HI>&;ioHZjVQFregR^c~IHvSq#vE zggAfb<7|u>xa8!)LubMC`q=?HZE4UqkZs7zOvz=p73Hb8% zAM_^mg`NRL#M&??L&SWJDd`c`mwya>6j~yRQ>Rp3n|R}GfFD!G>yVbe|KuZxwMnah zQjwCJ$s^yS{F^w3eIyuyNY0R4bh;A%FD=&LHdnJw7aJH{_Zj+oO}aBE!~la-(GXRx zMeCQ_XlsMb0_h~7 z{tUWf0C)oh=u|$W!ItC-lGwnbfY%&K@UZ|oa;(~||0dsM60c`&-6@i)l@3PjV7-EL zcgJOQczQej^IiM#k>#vg@3g^127L`Tz^cOt{K89UmHG$)U*k2e4(hZBQkv*}D9RA6 z9Z3COtaDeQu$uYBpSouvEA5Rn!;zyEUS&tpz57eZI zPT!2-R3Y34kE1-|Dgw7$wvGMJ7%|W28DdP`@o4w|mA?(Gp*D#=yZ-%3GF~YSlBJDT z{$}a;lHe6jxMe$i&EKv~shcS<(~(@dS9c%}%c9Lb#WRDqn(xeVUq%=u+eoEjD&oLI zhk(nc^Dm^ovn778%HL?w`GUKSwes}VA5x$g(r}}7Zy^4KxA_`>83lMcH7^b~e>T}$ zAS^IKBa2EZIV;t!+j}#Lh2m7(8zV^_&AKWakkK=*VaduZ_h-#Usx5zzScwqHXcm%| zjHFoW()mrn$>SP!V5>1*b6r>WVx&lG==T#o2YfZP6csQ?9UcnQv?5OO;RI^p^LGI9 zw?}itA`AhLC+WM6!Cydv1SNt|wLt&+t81?Lxq8)_A)$uZ3*dlK9dWBRyWCb&V7sGv zDP8$F8JE@VOQs!Bjv`lS4Mk2j>L&RPTU}6Wi(o*ac#6BnxCsfSwSHJ`p8)w zd-HS!1;!s7?K_($R9j=8Imbh8x@xUUFC-d`P%9bnZyT@NhD^w;6o2ATQ18B-9{*md z^(*}VaK!k#>V(7DYkc@hKGcANBp6GcBb$%tEEa(B=-S6W*%vfL3wg6$E_43+=LPuw z`Pv393gvMXOqkqID#burXT!O>=Xbw7@&-Z($QmF<9{IpP@xyCClyd_ar+GW=Be~~OhouiP^F^on0jxExMvFhDxz-q{zM2Su~` z>ZVwv3>s|Ty}h|CzXNj|hO87%nP1?QfDf=3FN0Ajao>>g^bnZYGd&cNJiaUZn(`;? zzk;t>oNG|856$Up*mz`7V0Xgxvas9RobIi9Xg;U@+yoah(Vvucw^nak!`a@{xP^$y z%!osCx4}FeRxkU_pf}LTnK>Z6`3$J15Z)(qT*?OJGi}@ZKR39c>fJVdGQAhdOS`3rtJ>BA(}xI zyEC{7rH@ED|9Ra>cmVuRuvSX{!(~KVSocgxxxRD8A`1n`XHbuy(3c|ie4V7Oe>AT= z9R>ttObHW`{}5g`f&;mf=l|L2nScO46N!<+i~gHq!X+#q23aOLSRTAO7Vdn4zY~Wg*0a5mpSi-3{XSCPu_;?2F znVA(4zV;}#mumY!W8Hl4pg32*Rzzc@$l)#M-AF1gNC(UI5VB?q92uOqAo@H%0eQ)j zOc=x*$$G@12pTybfaRr`HSda3Ndljx`}7lX(bj2fk|2aX+UkwWmAggS@B!eK z@$ySZ4vqfuw0|YvXu&oJV2MZzAyl_Wt{zkg&hw(!T_4wlT|^JFsj&r{mI;XM7{Cok zhgt9*D4Q=q>yrmQ{vB*fgzdg?%_a9yrty88xkaBI+I!LxBkSLtgsAu|y6M=!_Vw*c zvH_^Ftk55R#}bHLM&OTQz%$Ioo4zezk92TAH5&tbb~|9vbE$$8Zqgv0b0Q%F8UWNL zJb&tC>6B%|4`4{>dmlM7{<>@6$qYC=8fN{BkW!+MieZ?>CE2G8SeOHKo{xV)Op3Zr zfqCKjqf)Sot3e)M2MkOelqr6+r2M_W`{*VT6@h@)&~_`vA*W{@g5DXVrM60F{iVbh zu{^n}fbkP~x(*cRRPSVky37o9-hg#VB%pWgc{VmqOE253&@h$XIfV>JYW!RIeTitr#kVOPt7@NDSJBy(4G!U+5E`I@^ zKo&7JE}3Wz4gpoB7?BvHdQx28AcCL^u0}9r>;|4^`zWD%RRzle{z=h6!e$K++x4!E zx&u4+{iTe7(la)XK)W!7xXA9cJ^u+1KWFy%p(&QiJlDNS2T7$io5?!8XVH&0YVRtI z8^vcCw>($ml(M0MWXt=&{0sjVQAQ9iqQRxf?ieJ5&qP#!1iAvQ&PO+%U^L^_EmfcX zAHYDW+=Z?cqb+Ysz9@|0?724r9Iq)y=$@FF@7s!$!YJ}Tn!5^@n z(v17PaZ&!Yh_k4vWuifuRdk!E(Ik-61O6_cbATOz!k1!;#qiwBsC|7j4vkv_n+VMm zt`6%oMXF#Sbt*j$A&vT53_lD8^iB;tdH)Zz(9#qkDfQ4OnQ4m0<`X6OT@$7A)WF zHWj~43U~sIbE3InP;SU_IK}^wByJ@eer=@ch+;DlbNbr$1LyzX2;y%0=5692lDV4x zI)NdJ&8fQ&8`Mdq;;)?i%74;BOYK0w$O>qCo)?G8l-bG`@67kYiQE+*v1hIdI{=y))L;HhedUl>9VKZ zC7TjT#VA&KXn=Q6e9FdA;?`&SxMY&epN`Jzp+G3E!xR7IB7;geql~uGNaCAtYFQ3PrL)jUeV`^xfSLZOE zviBWe#3dp|T;?1ag&jBqMOxIe9Kb9X&!PV>UEa^trSfzXnwCl^NarjCTD#gM??K!X z+(+QTpQ~C(QllNJso`}Dxwu#)zMGOn6bSp{3!>W>3x-+?TlKU0wjDrTn5~S-)nUSo zJBSqIjl?+7^h>MN!b}Ca0ew8rH)VsDq3tmvJXYx_z5agXvOpR3!d-9ws)GH5r(5!L z@yK>l+ULAxhIS)m>>?JM#)>g{&S@@JxlRb_ox0 zgB5;m`m^#=)^XY7{3FKgmnRGlyib05^O}@sNN+8>?X5MmJ@G}&yvDdY()<{&#EYJ# z5Twau_hk)AF~(LmRBj#8tdgq#0<0l7#-BP+`e?{$o|^C)EiqTlX{Sg=y+y1;R*TvW zxg9)vR~dTM_e1_~-+*j{>&t_ig{fqUqeub)IYJ);da zgx`7uz!V!zP4+R=rghCZ=IQjVL%^#f&!9|TD7(5ssfu-unZBjH3u+LOL>TmQ#n2QN zg2Bcu6Dy>sKJRXhg-YZ)uMYF7YM2WDsnJf5rp(m*LW>>-sKJA4 zcE4H|+l=rDyR60;jYHAVFZc^G;)R7D^3uQ#e*;X}3+*MZgde6mdP2TJ`ecyq3a5<; zJ+R+c-g^vPmUDoH5YGDIg2x)C-g|ZM&agrAR0RBV1_;S|s9i&u(`^W#9zuQ)NqqvO z$26$jA_c591^8wyzsbhZq6wgUiiOm5AsHx5dE^%Y)+#1rAOu;fRsJdDgNS^ch}i>M z_9P7fghCdgty;;i{uUue69L^i24HZT=4Woc2|odF9}ED!nJSFTMB$6jD-;DkkfM)p zI!I~p1dqJqrLtJocYle-%ri1AkkGSaPuHAB_4ro)Kh(W>IMjdpH_SAav5rwuqQTgg z$WmFyzGlr96{2s6P?jOvNTTdZDN9*eM4}``HKG*SktoKRJ(N_BV1fEka{0G!w;_A6|>^ePesiA=o}cnETh1WUiM zx!6w!fTSf7W}~v;&9meRze8{p*%`4pEM@bS{qHJ4AEwqqZC|MAy>r+JNzCX}f@w@n z!$DRZJS&QUu~*+5F+)XafL?HR&j;QgJ!-Sc^j`)q*3sDkBMBXjwbDco3vTm#d|+We zB0iukk$?i~Fsr1En%#B0u?GQ1G2Zw--}D&mAa|1~9JF45ELV)wYCo;E0$InWy|)L= zk~|xvaKD*ju}d)hyPMb_0-~6VK~7e)!Tr0Kc$*0l=7Q@zu=^WgS>_laK5d#jry3)2 zd~~6|eR>bN%9b0hca!!{(hD^}h!*zkjS0&;-CoRz}@JR($N#)Kly=wmjvo%PU`xNflI%Y#~nejihwyS zFzMIaN_`6VB1xTjWtx`wLVEH}vt0*x=1g^d4)HgzQ&yHE(%( ziKV>;A5+8W^+`|NhZ^l;A_Z+f)Q87n2)9pwM=#E5H5*!SOAk$Dok? z%IfQZX75_5XjZ=rt)D+?J!<8_SK*q-kyJj}`=H035G?Yt27KZwC`Q}sCx=Nh_TtHltiqKwBYOO>XOi(dbehjQ< zmxD1JkLVd&f6PX_KQ`iLN0V!$%VtzV*jZpOx3TdOAhzqu3d_2Irk? z*gBnOH|XW|zmEcVeUF48V3m1zUFn?B$n1G3s%0j#F@!myhbU7Kae?sh&V zN@)hrz%-PRw$B&Bi!7X6s|U*lzVK;f7Q zykO|LXR1bQ6>q2UKeS3_oY03D~KH z$S01x^$@&r?XB9u*JQXK5N?$XO8L@KQJA=c$dwR(sej$+rs5hlo?YI6&DX4u{5szaIM9C9O0fJ7o2I{SN)pOqP?SqwKSJe9VOy$Qw}Ko)UDXGRpk zH^7=(fZx_@?FVR>)a;NFxYW6-h_Dg$ThN+VBT2_VG<0#XZ_Wzri?9vg9ze-wJ|$k@ zwnI{PHCgIm_-C`BIc$MD8tV-JkzH~Qe8eW`h{21o_f~9pdOn00L+3wXn-5x}=MY<$ zElsT*e^Y#_#NwP4BKT00i;Tm;5R~V)Fp%^6=;VFg^@1BFV7i7%CZUSY;Y9CaHEN=J zhoyDL)SwcY!HTG$;UPd5)UqnZ_uJt zB7ghNDF(mMqtmn&o4JG7XUL29J*dK7irR*sobzA$bl+4}F2miWj_&N+tW{@rKwIGO zju>#{$=yRq*XzH&)HElfK_W33K$li%y0APIGb~76MN3~>jDoLh#!^JIuQLSph&MP3 zIDBaYpFr`Zoa+q_?;c*>N`2J&N_Ryb5GJG^eAc(J96G$O=-vK}9K2q_5yBIc+3J(2 z_$8{6`N@#|jH9v+^iEY47F}&TNpbW-HL#&dY!ZoH3JjaR_qmv3G!_o&yZT9Gb?rsn-b4g3f zwIC76q*z5TXUf$y2!|BPbqx;`T2JhvNW>8-4 zDfzRT8=nUQD+FdCQt(1B4%OvZ-9*2`-o_Nn@ssa=w*s}@k>C9_buYCof zLGaKsXCwgotj1P@RX{m9zRUY7!!Ni1LePOT_?wyJO@tf(DK_auI70%> znE1j;&&Ybe9`)JaJ>uuJkJqFh0W`)uts0P*l+C&o#Q_YZOcYXCaPo_%lOy>4m!iUr z|8GTwvJV^k(;y%cDZM44mzUnc!1)0np8FvX#R5%JSv>@^A;KGg$m3q4uM1;U+5gi3 zQF#d5U*j9!dVD(wvTM8B2X@NruD-Sky_8lQm*ovB(wUyz#WII{E2iuvoMwt`z%$Yu zu`qATLyHi63&-0FV})iu=P+~JGX}e11uDmn2O_^ux*#*gWZ$EGFbv2?|9W?^1k$vD zf!YYT;l81&W)XWa68JJuhbMt0im@UB13ClypL_2eHNstdk-M!GhCe6-={FH(*U8UJ;;`_7D*ij*2H#r?Bz=+MfYy} zL$W#o(ag!dMn&@A*h(jY&BxT{zy=cD0|T+z4&oew4zD*agvM!9`}gftdK}9eVQMoC zE;($md;tbH3Ol56_z#X^%YXDNyY`|8L~+zgrh_3rVZFFU9}0e&(x-nL@34SW?n_NZ zke-VaLP4Umfz8#c>R^o9gBt+^kPhCv^oFLbH{ej^LsMd_inkyC#xd-lpmh$zlT z%We6=5F##DuT%Z21=w@{+O>ddPp-xD;02%`#wX+ejW+hq=gc4gh#NlRolc?Q3Is>( z?WtGi>OYizP3&ARt{NZ~&x>D!YeHICXMeqJYm6<4X>%&{gS&4r4X`llY=SSJu)T3S z0F0SbY3d1uxt(Tae3q}s@{C3#pQw1{6lzaF?Ok5A(;uf5;SDs^FR}HV;5alXRh2Hiawp^cAWt zwZbbfq1Hh!X1z?#{LR?uRw z(K;l&Wy{;occlBj?JRkIyWWOidUW*8(ZaTl17U!cnbY^^LJDH8i}5kfWTr*CZ?0Ue zp`wPB_gP<7CvN=Ke(v6~px)b}xX2F6_{AzR6r-xw=6`&@OT*;zEVcsWS?R550==R~n zP;d;H;aVI+X2=aP!(aIdzc$@P6bsbqMF>Qc&})!G z0%(E*!@|NyXByp0k`I+`54t=?yuva7W#Hct1=`yOQIk%{9^6)RFt_6 z!HR!hVt@e=)Uve~o^lHooOVG{h&FX$ZjdgRdSUwU;=Oxo&OR&WzcG!v2w)4Xeq%yO z6EESsczeJF&7khbFbm9-D8jjh1U~U0xJ2Go?KimxlaJwXITka+z}tUQ4=cC))fZ+{ z)7OK(@cWcN_1`>0F)OAbFNhbu{?YU_fg>Yfn;Pd|#r<)9kTn`K8R1wGA1q>TtZF@| z*YF9|(uKrCbCugRe+n+4QkM$9t+;q6I{MN{^xEJLo(}`Z5!zFMR$1h?$iWPORMXRs zR-!)KIehNj9UEc5gin*cGI|~8qXeaFpiORAJP6=S{&h+1@Na2Am8*69w8;mF#yYc+ zztR?Et4nZ9#im}xTj}XYIlT=~;dJtoF@0=w_9sIqwuw+1K7So$OoxA;ltE0lsZY+oU5pOHp-`N} z_q{*-lx3Pi!k4v4n`)`cY=Q9*{?rm^{&_HR8SgzlrRGxOdwvtjJZo9y6wKX%Mqlee zLGyL{5yXut0rYhaC;-#{!e!*#6fH*CJmZ}=il=&VS}#L<=I-{|$B#609@ zLNN4a9Tk;*ZjR;7qU&UL*;v8|f4LTP6B9f=cE_l(B(+1g714E2xt-aK0yR_yaO|@t zB4G^P>L6b{2T>YDaovmfd5ey@d+43yoxUC&6J{~s)FFaK zL@@r;iJdu0dfrb?ZpI9PiZ#|l(z>P)brCr1#I2l7 zAc+leJ$*;4J2JU4OqKH!SI58Cyt*S?PB$d}jN5$#C~AZfoqTvhX_b{;#>Olc z#@uhn#QZTIT_2x}ig(mrgEq3a{}5U*kS$OMa1KFOSOFGb$DW5JMGa4Y@q2r{ z7fLnE{|+VaMiJg_!dHpo{T}ZW<8Lx1&a2eU0usnJV2*Z0_I^Lq)7Cd#dx_`U zkkh%@=m?8Z!@0{UD#p(Q(d>(KDW7oz+tcuOsr`J%Gy(_WUPX>9Ime%B-1GR!yE_qF z=Z0S7zTXqltlq_K%@esd31Q~82y28B(SCLFOfq)mIFVrpl5AZrlxD#g(%3WeYqRUe9Acs4r^IvB>3n! zFm&?V8Ur=w_}aPZR~_os6A4cmY}VJCxI?%)L`e%-|2**^KbF)L=c0qoT)6Tnf=Y&i z7w8*kTzqae{lT<+ZOk5JPS(9lZb|4ZDDW$Dul#%!jq*_gx=(IZicpUH6%-r@FZK>_ z$R?3_Tkn@^&9~dzf%I2x2uWXt&y%&-VvRC0@Lp#M3z0q6WefXq(9T}pMjNilWk`g| zme%*>rg#dnHtM0pPH^g&1NNAePigoB8KBsz?Kv`W^Puf3^UL`V=k34f=J&NabKmDzs*#<+-litkb{DXlBTSc~dnKFTKB!ome_B`dmkr(PxpWU9! zoZwc(nN8bp@#-be?AP$&58>=<_!eX7?)fm*l-Ii;Eo|@5H{rHpaUuk%cDL2i%zPY= zRug5LVBe+kE!Bac9VN%kQBKB^gQR7>^H)ng8B-_`!}h?#xf}|T&j?8r>95 zRsT~L(_&sA1b7MS{`IeCU=yO24Ys^qH7pV;cOXFfB9kxUONC#HLq5TPQI_sMjR^pZ zU^?;q${+J$RVPXfAkx^1PyT%lc76=S3CE9%V<4cbubbTflfJy`t&*WtrX_Z}*Z$ZB zqt)hzNPUegk=&u3ep#L7=AXf&-GqzhrWrLi4Oq;b}cGGQ=tM3p+I9W4=By7t2Bh3E&(ZWtM;_Ur<2F|!4p@5wDP@_ zL2eXbnWq)D)yx={NVyCQ4oZM7!44J)AqGP*UKA<`q?=y3wzaYV#%L5;THFRcyJv8l z2~W78GOY+-^Tc;y1zvj$%b;HKN<4P37)S{8_f)hf)E0Ava z31V$wiy9eVh;oi82pK|CAYNra0YC$5^K*&{&mdT0cPRuD&c)JH-ok<+mj~f@=0Ie1 z#~c=UDMH7U4JGwC46Y^68lbkx|H-k<@X;Ewqo+5uk3>9C#8? zllw56l=p;IYbCKCR0i4qywWY%dCLZy7AQwUty>=L8eqm?E+Tx2g7-g5rU7}?B!gCS zNdVa4<7fr?O>a;c`gB@>w6_GvZSy?Xo0AvYl(-T2=$KXkYaaNK|*^BU(S9bU|n@jA6^s(L_hAfocV=Hx((Q!D8VSx}&on zr>Dh!pw;~v8wMhbbjD9uZWno%*uOy!xZ1O)W%P6;;qrVYiWlCo)5F4)7>u(?T1x!^ zm+s7?b>}3wZj8p2)|sNr;UaziQ_s46 zHxHFd^L#n#Ax5U-8y*)e?dSf9MCXNHk3Oip1(#q7>LM>768377G#q1Tro>%X zlnVuFD;|e==xLTDX`(`p(r20K_-So?T3K?El250lyQ0wf#4ZSDUreEwUWgb20w!n% zZ|{L~3LgX?p`J+_hgdeS%O&6CdmxGi&jAk>UV5n^9uE&T1xW~j0{>6U`!tiIejyAoTVde9!uQY}G^z7|spJQnzq$mx!X9-EOH|D}x;j=VpGMu1GdOyaN|=$@)W` z`#C|Q1~=FpBi9e6j2B0O7e{{UA6-FKKDgB$qM$gnZvgs09(x?AYQD*lAEeM8V8 zAsM_2fbry#rsn2W$gOCsYd)BNg6S8mS9y4J^tC9uMfYn8{E^m`o6u@vNzkv`fs+); z$sr@j06gFdU~bAl$>ML$ATkR4lJ5n4W~Xx??XyUzrFa&8oii|9F^6vxTl8`<6Nqp(BF} zS{69y{Pi)2ZP3Zn;%WoVH;LMBQU1ZC4#()8;l%u#A<)`c27w^z%p=k8Fd?Ig zGEPa`(9IU2u)F%T(Y@cxkXz+Ni(x+UB?TPSKhrp z>S+(+K5uvmwD}X11*ZwDGvFN#X3RjK)y{+{{XN3muT0=bMVhu2{@CP|g=XD$9m>9M z8lH|K5i@$!ZCC??+RFYS4v!PNE}O3lzMG;+q3lbm{n4ry7d_Gfd^H&|~*52+U(d_|{lyG$}YiY4>#j!w+au)Voib0J2 z?5^_tnq44<=&L%CeG(=bWXZe?itV`$r+sE{8Qt*Im;%hKl9{>ZXs( z?%D>CQ5$rl>V3N^^>A2<(cH+3GM*EQb!g})zjq`ke^Ua?104XXlnAqO5$ zOOT)z4oEJ^u-WhT*y1)@%Nc+CAzDznUcx4LVSnnjklq&R4ZIhOf4in=uQ?5~A3b-< z1(V;6_)dIK65XY#CjuNFvM(`>Zs`TDqpCN276!>BwCSA8JsgB49(PCX1101nas++g zR2N?fiur|-edW72`?^JZF_rAlcE3S*9&{re^4vowgd(7>Ma|1;i6*$x%6Ygv==yI! z12R8*mDu)FZwF+z-y_&0JctRs3i{BN9zue~QdP5%rvGpZTfL>ABRP+|UJLKKEV<8( z@8Yq+r`*FE^dNY0%^Tcuh*<*51@LI@xzHK{fSz*Hx_fhmE=jcwB1+qM(47e`J4kIkFIBGZ3h*xHLs7Y&Z;@*feUdhxwhsRZ~c zy?ihXk3O-k^U!yMOh?veyJM>b&0eYw$82a|n5$`zp+3uh3{%zCV$XS!;2@$NALID- z-HFB%q#LL=QGw<{^M_7PfhWdst@uIJUbVO%tc({!u0_Mlxq?f=XT{welrgUx;M_z)k#NU)Hc#MYJ1)H|83c!DI=;sw;c{W3*T52s@SQ}VRH87o#Cv%s_IT- zvaZv=^&7-D$jpnN#PAcr&`0ZgvdJ;P2WxnHTIu~Egc*Gz(5ZXBpZ0oH@M&VXy%)zk z*M0b=8FZ-jbv~U8O}!aRV;LmNHbcyc_zVD9tzZ|c=lSsMOW#T~TL1&2H>F82KTTUO zY=RU(OCo?h>j7&5A+`6Wc$JM6=u0Bsg11Hz66UQ|p#t1xo7MFb>yEtf7?@Z|bW@j0 zd!Q5Qx_tEmjJecy$elrmnn@Gkl5}EMg?A%#19qTtG?QKPPjI>kDE{YuWhnr`U=e|(O_v6reEiqb>Y;ER>@|6Y0iPi-MXtx?S{@Szkr7RU z4(&Vyjzk)v&JCPdWi^Azk71ck;Gzr!iY|1g>O;dv4~hW2)@A4ofYHrZ93Nq26jzj5 z?9PASC;9-8=@_B_%B;a)a2bf}lzEU6oxb_IyX2M5Vq4%xs4&^YLlBLM3hqDs+t8<5 z!U0dPtD()@lvs&Hv3+3g+jS8+E`f#@o&dYTx_ygx)J7seVwD9=K`V5S-thbyX9gDm z`a{yZG0=%26*tPz%)SXN9RntJxSnNsLDg({8;PgcaD?1_XU#LX_YyME+zS2Z%d3y$`5X*l6bf#6%g^7(#bRqM~}e$G>PJqqN3D zXwwP&{KeQOkkLB~FgjXQL{$Jpz^>61qK#&p`#@T?>-_YjFYM~<36b;`?;a>k?j1*c zeD>6Z!M^3Q^^1?8>Y)%gil`xu96=RWC>k7>`|mNLdT;gIorM=i4%;E$5XyVr3`bTW{Ha6vPck z(REO{s5Nphd0V@fVjEmO*Il={WTlWJ1f9JA4TE2Nti9{IzZ24qdh&`Pf&IwI3gvHNC4i# za~SbA4U1jsj1Y~lC4Z`=2`KDtPv_-y)`iw}ztG(H+I8zBAhF@=?u*+rgLf=POcY9R zZ42|+Z#Uk)>33;QUMNhVXr^d`;W6SxeY^0*+jO&?1;rsCPeE^**n;Pk@>9iLxVWx*nF z*$)s&S?B4X#T9>h8QMizJ~hCBz4^CZEwQ?rEwNv`sbIatriW&%WD$ZRl&Z(qHNi&Z ze2OXF4W$WyRwN6hO~W%WoPQwh)w5ANKyw!s2=dJZU~G4%Ler!WS2Gd6egLaiKNIJk z!zK`wkOSbl1q6zqjjoSK+JT-wBqAy-&l5|a*#YeWOJOO?x#_>jT$>T*`7=9Mq{3GN}>P3G6$ z!Bkg#V5f#mW zQ!g(Jy|=VBoBJ^PYzCd{BEQLUaHi`UGVz{hOtUAEZcFP0DD&f-6Pf<`;9Pq5;9+O{ z!s;~S;c>&@?TJWUMO7g`-@R)i1*y0Lp3%(GGPy%YfPW!qL zzlN_CIps!lI8Mi3bi*Cgx%K0d-5dk~q8sBy%i-Cp1@&_8!cYnWOO^iDL&b?Sp!d+o zN)Ql`f*?bssj5yfmc%>1&U7-^X&W3}+rY9@%v~@ZL9!@A5u`eI%B4NH67Rg!+|3Y< z!8iPaFD4ZBvKC=Q_~j(wbNar~?{B9dfDx>yx&=~zz%iQ&^rQ%QalkpdWC3_NUtaua zwbIk$V;qo(Aot}EW8y;}$5v)IMBwQz23F@Y!W(Gcu~9#V1vY2|T?75r)3JA;BHIzY zTu<-eNQ`abcwi{B+Og2qP$4dtv)_=j_2BtcOcLU0tsf~ z=}^w&t~OM+*rD&YV~^@t^h5ds zHZcP@L%LZ%cN*!%fti%@fB5rCadJU->LAJ%gl2;QKtWpD5G2WFA=mhR?K^zmkKeMt z`{H`v8%MRgkUw}_+*zhLMy#E|4j|Ff0L^1q2N@Lg9N6B?z+Nx}*|wP|3yt#-fW;C3 z#K1~Jwb*!5ghZ2aA?Dc$~6n2 z?gLLB9`FHlXzT6*FZ~?$rIWw_BGMBGk?E zFr;d%+YZH|7z{X10mDcF=iv6KML_71Ae;G9j!xu#J^_Kx)o9U$F;EuC06qyRD=R;R z1~D1hr;QLX6+HB{=2iOGO%^BDOxMq73vhP?09PjKHb;I-w*?VpqwC^);6i69=xUy7 zeyJb4liWMmK3p5MIPUa*sc`q_wB+DwaRqcLP}BHnX`Jz<&kXvPt z?(6e=6*0LQ$*BGG?OeZe>;Y!Wem}U{O5EXsPJ*JUp`|Fe9%Wu3Z5gZqxE>#Vpe?8H zG=mrpbLqM8QNwn|Dn#K#PR{J?>DXe!;7jy@U9UA-KzA zbJov8CYK|3HaU+ZDS6!^39vu=4=#JRKkIS~AQct{~?k9`l zSdbhWxhF7gux4-p*{Wo|v;w=ZEA2C!Kv{Ib9Y;%uR%@yV(gI=q$+U0#<&XD#KI43J zqJ3D5eI8sh=7ypbSAgUhxbgef_RzlG?*126q24ZsgtSSH;|kpjx^TT1lQnz1vsy^RHBqNoxJ4wCs<($4uC=d>*r3I zS|7$rwaR?0Ptezu3Ff+KDt}KnI07yq(p`2D*m=eDN}_<>Q55cobz%*y=i*{QL{}RI zL)-0=W#8mZ`97YVEZn(z;Jd59k*Yosp#CR$2u{^>rv0XmtDFupfF9J11DO-o=*B!Q z6K;yZW<2>b!oq(^7op}t6Y(#wpUMP$01j`u{;&9plokQc{`{Y2$ZKR&4j91=;fyB} zT^LSv^ixoBe3J4rDmdfDX+dnGUmAy?eo8WhMFL*ECDzNL%8Jg z1elU7A|mP`)ZYQc?1EfoeWOyzDx2WI+W8eOe$WyGXSu+pk@IkX?$76hkGd1t`#(J_ z9`-P&Lr?7C<1+IC1PpoKCO_D72q?;@YRx4;tZnB%mzz_BGvng0Xgf=uZuDMAF%n1B z4!~p!5Mp`3k#{`_(sf!POVAt60-xXT8qr>tLoivs(m?Up#3i_v;-WXd(dUE!?6Iz@ z;*;_`d8J6h3obTO7;zj5(d-LJrJSC|^=S4r1B9OCCrprX$aS|DPchzo_lN>A5%*@m zC}su&2Ez7_j=hyY(msG!U7;hGzCoFWPP397KH4)uYhQ3hgE6MIcpvaa3qnFz&VJ#0 zAyXGXwqyp1uM()dErX~I-Mo_zVSbPjG@ivaF_VYV$BT-Ju%2uMgqa(TH!h$EvsENJ zP$Na|fB5ta$!F4`633@Il>WmJZwDv4C|YMr;Kkz?(CV=#Cn&eEm=TzltX#kxDG%?> z;r7wpHNG-H$2i^&K?DitgzW~r047*VvTCo@$8>_E)^lpPc;16dlQ*u|_eg|w{pE)^ zym(br6=v23-c9pWNO+ioSb-L3QMER90J@BpmeLxJf`14}Ls6s?0014zf;A0E`H1ZT ze=eeE<{*OK9J-Ve({wSB)?psF#}32sP~{l?rdvsztUR}(iLDY7EDGE?6NQXoF!T-I zZ>{37W*^W~a%DBp#uFLmtJmY%94oF@>~_Fb?6SnoA>`ALhdTP~5QnQirqm+Fb11q# zdZGW+%8goikAp^1mYN3)eJ^zuI2`gLDgMkP2^`Lv!$?qClDrs|Y#k1eiK8 zn%O%!bYSTRUF85beA?fU7Ubsgfx~OabSPs!HEh$PVOK?%F3B@iG#SPjsB3YX(yU=E zJMX#C(SuO#Qkun;>*5lz`esb84(kz>U}{3arw-w#L*myl*K56W9ca8?|EB+8JCW%P zz?zO?G?h;C2S%=qu{2=t%dcDs7rHeD44QM7x%i}oJ9sXRLh|k+#=TX+Bu1HZc4F(s zO+FaP(a|EKGbMoZz=zH6bIe&69c_kKIIt2%jn&2O;K0_HQbf8_cpEg;)C8oJe)FmI zDN81>!%AbfP$qaC`uJT_Z$Ds`Alnjb^5j8RO#8D+|*Ihce7U!q>-L zwxCz&jFz^0*QLatl7pP`)sf-==z;{F;5a>eV_w zU4;v6;IJ+Sa$LNd1+gI#E+9e0bXl$tv@_@r(G49CmH$Xu7sx{(h0{gIpf-mHhfNcJ zKp|9c>|*5i^Mkc;%*g2>zwVCU@vN7R{r?t-!%#6qCkr4A47mk6q<{&{0Hs`WI4re& z@;=J%{*HY>OnXDqh~&9wv**i!1)cw9dKiWsXo-M>?}2t#0#=>a{p4;;131nC;!8=8 zI>9jJ0L9gf0u+$C;;%6WC7vQ{LG1pt|Cm6iJ5iX)V!)<)lmBi&_+2n=aQEil@9ZbL_p(j}$j0{a!fiT|#KTtQBNTgRAt7&Z_$_`QL} z^J$<~eFoV7Fnq9R*$3Lyh0;}!`53B$lNd=uV>eh_%i@>}4Iwb4{U2bvuYUx8rCk4Q zk4Dyeps_})RB%ZkjWq-IxE@G#WbX+^yMod1fjj*lEHK6u!rw~b??OOaf7L^f{#@Jo z>B9#Q3*Ma?GXC?8OaOLdXh<{$Kd<1--s3C3e`K35?`8S@{{$DBCp-W#aRyTSfb|$T zZqUMx7L8*gNpm?!CItD>io26WkM@E5=?**fE9eQ(7WwR2X&^WVJEU*J_a=LURecQW zLG9YmWdlo_(7E{j7myJ{Z>;u6KKj$~lv~igix3jhp&|VgTF7m9ZsZR}D8+RQtKqR7 z+bB~@D~|BjH+;VbfrAtXWZe-qSF+Lg&Ejqp&8v$1i4DKK}k$9e>VKq`-%MWnAE2I|F8No!428W8F6 zR%rr_1|ZkU?&@+5mL9Mt1`P}hc+HP@+}}Q*+%LLM|2e|Ag03f^ss(?}lH7Ylg>Hj< zwG+fXqHRw%z=8}8d2LX_5;lP#VQo`9QD;q~Ou|Zx(;5SP=T4m7HHciciDj*lgS+n> zOhTpm@k(cM&I7#n99$DwId7WpEq-?dxbsu?6Td(I3Sc4@ytpUNR;=u}?v=x$XtdgB z!}fqnr{9saUVtQXm*sPCi6i7L_R?F_(*Q*6P|gu+m#(zP+NjJPb8`C|y?VI%GEsbl zp8?qyVRmc=TM7uVG1?uc_A|QeBY(t)l{{3{_=2m4gmZt_fT+JP_Pj7GiMC}fukCrN zlw9^t6RY&nfzM-1mVGDj4U-$O@46+w#dMsA&LV=ce|3|&*zgeG?WSY7%i4xB`unFdx!K!_1T!Qq#&7mKynUuvTvgP@2 z!P0)>m%FBBm6t2J)@Y39N$#}Y^Ma|Zl@bK#8wGrv(D0S$vT1&x?frQYGDm^e_RG zFcBlOOV*-i8FU>yE+gd*J1gDRO33IQa>g*835c%?F;iF^lU)|}m#XLAm56CuF*R#R zWMkWmTl}X?Ry9LpBCg9Zjj6~b3rmn?xOf=d_)Kcd4HlfuA=|e#ey%Ielo`YhdX;1v zJ85w^9AzDV#et$vk^xx#hK?Z;g|#}^lU^W2Bzuw64p=h@E7780`N*+3EkN`LBz^7l`R5N#PdX!R@X zii66%Q{%#m)wzrsfO?9T4-9%%Ybi|O2 z)Ai9Wz1mU*Xn_p0jL)FvbbJTUsB9kXX&>f%+5+S&O3!JKzTmxVL20pJ%MKG zMaw7>JOs_t=$l6%5bPX^&H$)^ku3sBCySt?cS@BvpRSH^6lx~m4&D1==HPL>S3@8VwVIX1+RjKH` zK+bC2`jLO({iEf}^|mvnexRj!xUziw9x!`^o6*ZZucAx^9uck4W%nZz%(^)8&_L0r zoBMRim8_SW%E6*gEwtRr3UX+f8lC|fHH-1n!FkyFw1~D)a;4LywbAPt*&RW-D zZ%_O6zP@nc+kH@EhTr^zbiE-en~13$9}b6`>)N0C9OZSJNSm>-Nb^URt-)@MJ&nsP z`;Mg6AY>cAC<~3<5x&o`-Bd}ELoHQQIQ03a+95=og1z}HkbW{wl5Q(#Km~Yz$2w}2 zTf%vm@r+P}h1$~#u*9qsmi5s-UAKpg9Ycvy1vX^~!3{Z+Xv=-*SIul3%FavK?3rhv z0rRm1piV_@u-m@rrHA$$Lz5y<`!FyCZ5u&BCh;PH6%E=cUF$zm$3YdO zTnahD5uiUtVmIV}(psRO*y`~Z8#Y>a3adLhl9hGrZQjLJ8F8jB>~;WsbZk@nZYt<$ zdc?fNpVe>*SbLU+>V*8pd#BI*gw=Z!RlB+5JZcGz3i#?@#?J?tkA$GS8$Sh5TjxGl z_IrHq%t|tpE150Zx|0icVF&Locixvf|M|YAta|qWl1d`YjIS0vK1XV`&~lca4v;>h zb3LUla4=p28odyo-*CD6Jb<`0pGeTES`A0R;sl@L0GSB)^i&@zuF(doOp2egGu%eq zy}++Ge}@d;Qa_N61^IHe>tFi7+EP)Hr2^{#_k(EC?D>TfEtry>@&LylQ;UYL(%~W4 zWv)lSyJI2uhn~v{r((kCb#PQ2Jk#Lqz zNa!$3#xJ)+SDU$~;*oCnXYk=w{386mnt8R4>r->^c)K!4Kd;{wLW>5xwZL_q;#a|m z)>5|EgEMFlD4>cu?tLq|P-C1{q#!py-CZT)aIV|?V2SDOE@)4@d0RSgey3AYoq9lH zRk}^6;!($i6F;N*&wu*8tifT{ps)1u;@X@>1MWt4Azs|HM!#coP&HY?Fn7Q7wSfM| zn7a>m^jEA4eU1G0z4)&>6puDsg*}ODSm~q`vvwIBpyU=Z*Yp0A8lY0FSQ}kq<3#yO zh6==IPUqNz=D1z9rqSUhZ2B2)7ps>v4LGv}T!o*sJ)b?zTLuS|to1AKjke>dVVhr> zsU*`C8jrQsuTsvlT+&b2S`M^C|&sF~Vo2C^aDi1dA^|r*2%Uhu~ zPUpH5f9*fsmZRJ~SNMsST^I?~56@s8l_u*dL}Tctn5GiJw}kTL!lmWKNG>j>uiVNsv`p%FSK5 zDgGZ(QHrWyDabpmp6DMmG&1@W+8AdEm_TR(?qkrsg3VSezk^l09~M4N-Uk>Sc0~&R zRPd$cp~r38miC<=Gz>bQQ*voV;G4R2Uxi=)z3IeVLw>wMda*QF_6#VZ>?7UB&1;WS zx3TP>en{|KlZIb|mMKu$c)b0s3YG)4Sl(Vp9X@)HeN^VruV${mjawdk%LqS^Tu_iK zC>Gi9KJxs>V@;U{zYq7n6WL^an;7J~cEF=8@pH*}msXDA}UD($H z>!kXRM4Q(fvYpr+uj4cy(j+eWkfm3h5*7L6Q-3YUlENe1Vih$d`hGPWnI0IUs|DTt zv?o&Q{U_V1<{CIq*?Zvw&J3SU*rpd7T@FLJb#K(79~wF9zh^jh*4807_kI1xLwhg_ z9`Pp3xA!O+#@4CkSZ#c-a>sAwZk0LVD{Q~GIw6_btMdGshZQzKAUULO*n+3bwQyU^?Tl%+|yzjMR``uVB_lhL=YYDM1-vofuQSS#;`km_r zGr!{XA8I{<&Jmw}9d7-6MBr|Ej2Iq9rNPT+-5FR=t{n`l<=?{})ikTcv~w4yzvpSMik@;EUO76(hx`uk(x^^oJudl%E-PU#*t+I-M zs$|YIQ5WXstL)$;6?#)8VCr!G`-}csmp9Iez))LhmHb&j;!bjHx}gwM>+pS<#F6T4 za}V=Bk}=VI?75d5xjxWw<$_X708YdU-|tdI&%pxdMvVA?aSB^THd+V22m!y*v+4zY?kG4)Q2#cv5613ZI!Z= zi5SjyYM0dJ-j?$@;ogx4T!qP@#l*8%YW&DIXsk<{DT7O)$wtbb2anQm#8JUVIKjDh zdXp7*=;xK$Psjf-IR0Vgp+X6yZ+%P`{(QbpbRcW_0R2#hwf210>BJM;n%{lBuqhsU zP_)#qDUB=sKHW$^?S@rCN8^kPKSXZe)kO$`r={+Q8Dm3b1I`aH>n`xo;FhOw+?6hn z-wq4g_e~w0Z2rXaF*m_@AE<|ZRZ)9HR+Ma(+3hS|=5tQ!>sL-9XZu%aiqiy5WQN79 zettnmi*v@ezk*ZmHJ(^Yc2{3Vcq#PphG@(>gKaquF|&RjKRmvXbn5K+&6suN$C~)D z6hC4ubk1S(Y3gmS8-6B-UZI<4OT4O^*(G)NxKs?yCeLZ6J$z9+hDd8{d2K>T*l@t1 ze+jwQx_Q!Lq73EJ%G^!I{8{76`V*I8M!CYJU_%o<6{0Tru6ao zfLZARwb|ea-H(=f(Q4sMAm}j=rZ}8*y~*D4GiV{WSF( zG{RCjBWFVBc}?36Xp*mG$-BIr$5*hOXTst#3RovfHGT)})hg%ujE`R;l4sbN>*p}` z_my`g^eJe!7q*&@`NyZ&Q)@d{I13)m>hfJ1vK9)Q_S{b3nu$K$=Ud;!+iJSJ6vEaL zb#}in=Fp7l*`b^>oPk?;cUlG4XgqE41vcvX0log@7nYsk?w6NuXWZ!2pXK=JT!BkJCkk$+sx8VZ! znu2Un)qi9wZaW`)nIJ)`6^0iy#JbC1C@r!gtubb76devNi3FJjOOph?ul#Nb9ZDN> zWno&iV^NVqHd*R3sTdFZ?;=x6D;oc4{g z&H6)sUHz-X=SjJ|7`swDN$h@l@?XyIFjA0&ti1WVJ0yV}25-JM$-ay%CaM|@eV^6yb}|Sc!~cVqyO6Q{^nwcAuFq4kT{?%e`fQ4u{3{2bv{Ja6$yP{|2sy5 zVwQ0D$L1S1sYYzMG~`JEodj=)nv5a<8mIZXA2OBY0ibcsi<3~t4iLnG@>%^G9My3rQ}2vgXj6;DwJ?hbi#iP!Kry(pDXOf4+|yv^0{9 zIQ>`kL}1qeNMe=TA>qGc(H9MZgI9>fNc|AjL3cfQ~JDl zEP(ab`+4e5nerZA`rhO3(}lLUr_8lAigk_Nh|vD13M00RV|mV{6d+lHlN4vbqA~~L z5=xI2gE{IPOc}0U=60)Z1$(R$04@&q7Ic9A7<7IwnhRMKjwlU6u(_#8S`+4-@n?^~ zomc8~vp|;fGv{`+L48UWA}j_#*SpyiELSDa&HB{c-5T5j`(g@zu?C00=m$zDap<^h zLoVr5@00TXeeLV0zyC+Tdn93WJ_q(yY2G&g+HFC*Do;r^E7oI^S2h3E82D6FP#>5A zmZbvdX?VmnG`efSZ`J+%|LW=5t#}G@+>|$LSDvMH*M402Y);0B6m&r6( zOGtyQXz7r0-PW|pamiM0p;;wLE-4c!!Z2j`*RGLDa?_=BZufNRuK9~3Rz2bW8 zC8z^O)}pW?*Kxh3KzMO?Z^%GZJ|F5WHdIPXu*Vsbg-86=etI#yr9ruu0U0`f-=7w2 zP`^wlo6<2Nmn}j7y9qe>UMOffC5TtdKql9{JERjBmbxpW-w)!G)fm8cI$#tUhPDkC zzRkPPanrn>Gvcwf|F#>Zx(&z`3|0{ zZqR#Qvb_>@RhpF|@jf$%XgG~-G=f3I6hKs!69b|aJ|fz74 z`NA{EWP%InF45)F=1ab5LSj zx{P#jvh=3LSNFFviTg1jV(}?stXeK&81e)*EHZ<20Jp}CMN~Dl^R@4OR9Ao|bIy5Y zNkFxr<%Z@#Qan#A>y7W8&?b7C4!x}+adaz$ERA9q8j#WQW%uDKeSKjjT7V5>4+8D~ z(thm1Yv7u*)74ERfLIu_Ej@`xdH_~TkvPE>+gKseg`^n?Uk*zyewgPdDeSTGw_clq zidYf_TYC%a0M8X`pK@Vz#bN||CvsOzG3ws#prLQD>r&CJwl<47{Oa-0RjukHVFG?t zEtq~k-W20mbutk=L-fEw2{37;x=lQpq>7s-GwFmh3Ax%SFVKIx>-V4c_Z~%@T12H(Ar-^1rWx0h24RG zuk%>N1VUWeAqNqf8i5RR(cVLQWJ-#FiO7(IDPec&W32xLYdE#~k0|LzoE%0w<4eBH z>*r@qwN)+Wh#yxR4!>USLZ`GO%Q9bxo2>#k70Y}-6j{bD=j&0|QkmwOL=b|oAh(8W zO-sKmA+&oFxpHaMrsz7TetoTE?%=e5B;mCRuWsI#saWwddqf<35JW`wB)bF*lR(kHd zr{0cl*g^LN2;55T)RwVGffVR7uf#H>u}~i$70ATCZR|+#k$UG|=!z4jFU`rAHG!KU zGraC@f4k2+i^7tKICDe;8DR}X?zI_XByEr9`N6c$nTIulE;th&8>Xr(YkE}AG6)Z? zoH%*qn#B||OTCwK2;Ogv(vVHN`nbX+LU8TjWTmFxt3%_TeM1=ve6c~kzqFLW-B*8# zcaAFwfkFgwXZGW1RGpju9QRbniV`ViW z0?9ptTbPE)Ya@x>JMn=10T_{?+Z$YzOu)Ap#6SCd6sG^1fo!<_ zB2Y%N?(aan&;GDAkVB>QAu3kpzc`no)wZ4u-yp~a*kLP|vkwTi+^?>=rngg%ACD3x zic~IdFfnn@*SwRp2m{T|7;#ZQB#Gi6jmC}x19J$y&;TPCm6ON`vz_hF5=B^DZHT%K z9O@66i*ixgCY?-$mL`V}L&ZTPrcaA(z=ROO7Q-|c+sM)0r8bgQ39r~*htA345(yYnR5`czI#;WUf zbz5I5a~FFHQKnr#|ErraZ()Ct Ol5(_nwX52}O8Ou06wbc@ diff --git a/fn/vendor/github.com/docker/docker/docs/extend/images/authz_allow.png b/fn/vendor/github.com/docker/docker/docs/extend/images/authz_allow.png deleted file mode 100644 index f42108040bbbf9facb9fff0c320428323f061e10..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33505 zcmdSB1yEIg6hBBjN;;%dQfW}S1ZgBh8UaD1rBg{Mr4a;_loDxCN>UIMkWxZGLZm?& zY4%+8xBK7!?9S}Y&d%)4ym98ryZ65PjdRW?&V8hn=q!g+ci8JxciMc^7!KkZ>@vJuSY|M72eJ0*RBf^rxl2wv9?-oiI(lUN6 zFTW}e7?N}zG8p>)$&@ceomwRb_4h-s%3R0l@IfsB4KM!BhYN$k&kUDM4g>j2Tfh;H zC||MCpF=t1ZlOD;t;3&{XH}UwaPy)c z8F$#SHy>}Q*9Dx^o2<#69lQG;Z+3qFK~_~|J8;3M>4C4MCgG9zOY@PkTYBQ(s%(fI zd{4!$4aQdJC#H>uqm_$~U)H-unBT8V;e7}Hxr~4PB?0TMq(oj5kvgBv7d;u021+rs z>~>!YVoC%_c}jH)HAA%CJk(o_6v27QB#zNAf1z_slYPL`xp4U0`{6HzTHRxB9foKK z=|qwivI5PV%p3jmP7nG5JGzTZYVLJ3>{VKIPQ==Lex}Dze6uL=aYXm&+YvM2L8+tF zs^!|6gN^&Ye|^fn`Y=x=;le9-ojNz`nk5t~IjJ?3o zd3weAucUs$)B9aCYpBgreWGMPTv9aX`@GP!&MjX@w()N3Hl3Zv{`_<2uV1gVhG4OK zE`LpARVFIZEsX6+6ISNb={#}WxEtZ>MzTBPz}PQ-v^!!xH)LR*EFX66Vy4Aqs}2J9 z>VO)|qLe8DGO-6as|I>ZH39bS|!itj*|`F#IXAkVzVyWC6cZ?&Caw%Y;ATj zsSmx5OzTOK-oHM|o^BGi9-cRKuXCHbu|}X3VtVv)>6}}T5L?W@{#If z(FotI^;#RVUKO?NXEw`t=e1@}fA+iiY|Tqb{VC6~;fKfP^&WgFxE}MSp5Vdyw<=|Z z^`YVv*M=MUY8}<$Yp<=lC%SwN=2999+QjF&Q@n3A_zb8_y|z|Q7IB~NDbq+3qFL}< z8NQzHyE|kMBj)tw@p!!pg7rBp&2*6+%F(#dTz!igB{D^8XQvk30Ps<&+?rWfmv zT>t*A=CRgOp0pL18l{FB!i?dGGC#VJ?%z{^&gou9drk&1v{#~qJS9#DB|XLb58tdL zC^K$8tJcnuFN3AIlvidk&Yg;kS2-d68-EYK0Lh3TvBFCwkJ5rDLp_!T>&ncV%ZQTs zZjJ4?N_nlOiq~;d=S;oxE-$v}%j}4oJX&vFH?F!Hv(gbmcS-9+A%dxAXX(qkjyR_E z!@Y6mTY(4N=F#rYM4j&?$=VIFvC1gN(w7Au&oY&1$)0@GD>c5tI+<>EHp&v1r)=!u zDB?7hx3$Bew9uR3&TClFvSxLf5V$j-OsILqVff~pJoVkr&*sLO{NFjWN0Nr~*5@XX z;5d7=v{Q$?+sk~j_3KguQ-Hv#5(7!V!5kGU z(c0n87cIJ>i+Zn;B*Ir(&1?O3UvVowrnux3LBwFXSn=YanD5pdDha%gR1D}0lfDZK zxjhn|%e74qIff?d+k#_o+p@{>*Uwj!S#SKE>CpX@w+xo@yLjw_YNv618wXtJ zADDLK7Gvy!BQgZ|1^&^GUD^auTsyILU1xfCI%63+hz()=MPo8O*a#+#W8!wlRa$hN ze5sQg-LQdaM|Af#?Pc3|*Tu=k`+Oc!8<@DahpwK*&no+!ot{XH?ZU{@u^OhkPq(0Z z_8eRNhAl+5v>VPZIM}%vqa(i6&gn2*QkE5X`c3S3HcqzwZeQlw){ev?>8)?Ctz+Jt zitWE<9X)*J8*qBCp!AKHN#?T83#$}u+yb*E$KU6f95Z0$7L2YCO(b2^joy=cey1mO z!1L_XpGqRHKBFi1fwSk8!Z4pQlN!xi3M%jJ|Gq~hfiWM?Z!VK8Dy0w6Q<#X?=ym&0 z#chQVhw{b(DKsiRQx|rvOw)X)siwvRBD(rE{MHd))VNve8+FU_6N~ZA(!_~MY4qL! z$vfvdObHutiCyOv?J^|2MG~(+7a`1$vXV8Wd+RVF^19=NT;n;$np2i^ad#zn0Gqcl zZ&KZd(cNc#h=PbxJ9h}?kEP=d#-=K*B0Sb7Z!u_QO4apbN}H%X<*{Sq*DZ+lSRK`Q zb+hQg_CjAulmAf{)9D6|c))S3nmarqt5XG?YU#RFob+9aN4^f9$=yHWg}DrI3wpkn zAP$Y86FoQV>NxgBb?t03E|B8Qr_awt=iaS1J%Qj#Gh{#6P*=I;gz0`7Cv*5x;*IBu zzFmKIu!TQfQ{~gPp2gK~RUyO}^V`BUy^eRjy->tAg6$JcFMi|m_co%Q6V~aynr5HIj|<1)Ci`vSV_RZO*tFRQxMrAXbU@*JNX{{_qMePtrDJ z#Way@z&_~?K_){`9xh#g?Pqk$xa>eNe)A?r`i={neHO9r+~yuH+c60>z@{sABJ5+0sLQS*uS(xNu;6 z=9PivGHQTP(yL^m)>T2;MHOFq3_EJPqJo^^ozr;r#ch@!=ehO2>TMpz#Pqdf$j+jyrz9lD+@x}N`vdhSbjvXV*gS($mmt7n z?oKl_GOc>czettzzQ?LQadDbPGeg4q+HJ)*HZ5vPa+vK5vRi&^usS7uw?B)1*Hl)< zdrzDYp^VNwQAx|B5k<-SYLMe5XAOb0SE8}GyQJ@(HE+@MT{Vr2<>6B9G$HF7B3A5; zdr!X>8)z+$l$ZYybsS~(YftaDH8Wt$Oml8N(cfDeFZ?wsr!1lnPOvO+?JPgVjLh^4 z)II}+A7Ln>ogA4Nls3m!^48ibYiq^k@Dw_`wlTGWY*b!0CJz3a<32x^sY#UBiyH5v z4^@+^)n_Z_XRq#2ZE0M-^O5Arix0flsJ~4sYn@SF;8kkOS4+N8xc058liL6JwqR5S zW5)A1ocat-Eyj!bC8_k%em7&L1?mTWX?lniXlB^o!E)UoxGO0Y=lg4Rn5%Y;bi;yF zm^;G@M#XLGr|R3IF7EP=6}Q`VSUer*yn19O=eICt;=dl!cA)A=9UDVF$q0Iec>jb{ zq+fHh_t_Qo^AY3IYAJlYB=@COW>(6ZMara|FD%>5<|srY6*K8x?UVkU-mp7tviy*U zu>duFrDZLDQTx3@yr{F;?oDM=a-WXSUn+b?udiMIp6%pXoLT4ieY4*>m*A&Sjy%?G ze1S!lRzi}Op!xkVM&~Km0L}*jy-g5E_nhAOJT~%8cVa zEyPu_oM>uf2&i@Xf3JU-C+TKOCor-}tutG%!ZlIv zsqSkLPt+HmDrEgQiAzVGh)KrLV^SbFZ=5>!h*$#nHh9644EIOCy zD+!cKh)mT)TlvjNG=SE6z~8 zd6tLytLtMjcgjdZr9IO~Fj4CpgNn7pR0k$6_5yzo|9z)GjPzV@Ih*SSt*eg#BQ(aw z{*KXkY{nL$*Y1+ih8cvVp2~DROQzAnj5wE){&)2v4)>rS?fXSs_irwB&y+bg&+U?< z8;I#QS%pa|X&TaFQE_GJhYJaq$C3uHVJIcKeE(r|U=c#K*OCb}0NuR@CAcG63>pLx zwqHd@{jj{?rjOFinuWz6-GN+ubRqO6?xMbOYl zI&#%gzII%3JSlZ=U}0L_}3y`&iFiyC%708ZdKcj z24t-P_X)YrYwYqJESf0blPRFBhjku}E0Fp~hlW@>E|7Mu2FH{*s+^lyQ88K>qC94PBvBWm+m+x+UExs zNYH^wOnQ4Z02fzoagDYsnY*wI-;8e0@oGvpJ493bw9N)a`EVf);d8sY)K5qhL)~{O z$F@pD|6q7I4H&h~Yr<8Df2XZdEl8fJn77aG$sd5gP^=eSbGMmty`@$hu1fFg-g!s~hjUoFIQbmU^(?UwWHX^d8TrMD`~D?OyIfx%}$H zeWAB5AHTB~0?wV;j+mK`&wNi)S~h0d^zOfi+2!=z`kBb0fPGQtMd&f0fh8=yuPD7?G6Eh8&-H6VG!IryE(U%CR4zmYV3DA;Lq+x7=Vo zB$nBiJB51mV{V+xtI(59robeK)5aTd!BGv6>0Ud26I%LG7}r;SJyXi4H&dFs&V6Ao zje`QGzv}l6U+4w%`I(HCsnb2tIq6^=>+Si^Ti?B>J`EKc=oM}NoJ@g<=>mMynAKyb zOeTjzY&lwPF1y<-27s1rM3~}igema2(zW!5-<80zH+H$#nr;@|7|c~B7QO#@M6CA! z2!*5?``zWCLvk&rv!q8L+O}18J3ki-MG@%PTGiNZ(=mwZo@9pFMVcmpW@O@ zm8QdG?zs+xDConN0XUNgB}4H2l-6&k>7hqaQBO`!j=D=tYQ7TZR5|pFrQ!9Y3Mv3Z zF9>D)avzXS-4^QE7ehNbhOfU@!vh&d&ir)}MRxEBBm zOfM5QP$DoParT$c%k)jjH@I>d3bO)?`ycHR0JCT-hw=MxdHVf+?c_1Pszd!Y%4mbGN+^T)5@xJ#^lOgAgs=L>X zV6TR>_hrfQBc5a1romcZ zlJe31w)^vm*7BVn2lu;H2IQ=Y3A=F{DR1=LKf*VBfHlyYCEMJ5r$g{qSmEWs)t0!W z;_{|fdxABm?c`dxr|6&2-J4b7)}d}ZS3oX6HBn+D{0*oEHDJgLfH9033y*Kniy*>K z@W7ppIOlXQ?%Yh{6pE#nx*SZkSX7vKV~t3sV8bNZjb|;)7Tp!uT{gP z;nL?VjgKAj?tQO!JKlG_f)(aK$Y)r>&|<3*fv|?jm-Ox&?{D6CW2>|Dt*S7I@0Kx# zW;$2kej5`?_WeA`$18T4O!(u}pIJQ?`>*;dTqB*f*L1}Fc}B}n4`uy1N_5K;k}}T7 zpUtPc{8=9?{m^yQUEbvi7vkW_Xl=Mnw*+Hes+J4JFpI^e3R}S+SqJEvv_P@j@rH_$ z)bEthuRCodSM(BPoIATvq=hy^eLM^}!N4sbLkTZ^K_-2~e54Vw8*V&tlfhQ~GF=Bl&HIsho`9;QWU#;|J zNM=Ud!n-vKEC1$*8?4A%6*R3?nwdxSB+j0WL$8uw;~XppNeq?UGWj42b*jYJ+`;`< z;;CnW@uTC;c?qgyBJW*K*jnkwGwstzrIl1&%6~jf)W#}t{E*bb8YgwZW&IhZk`eVL z5L0q7lb<`MuL$7OE|5$Q+Ej3$H-;P>?yhvnX?(WuFnDXPC;CvZUO8xmo~3dGS7~!z zyV6qeZIn;#LrcK+yU}6s zL0U?9;}x?-qZ05$H}5YGoSyaKey7xLYi+L?Y#k88!KywCgGF%2QsH6i{-T z*!U_Q0816}1gAF~6>6eSCF2E%IRq9z0lPhItW-#%tEEaki0Bn2_LpArDf!fLi0cKGa@n&@aZYG!W=499G?qbYi&siSU9mD?Dz#Vx`HOS_4 z%C;#p`LiKBW5dy}cLAT4%{crHVWICle~PoFjwV|`jc&CoX*x$;Iqmn}_M|-Uq}Xlg zDZjbdcg5fjN`|3%3VG$Eqmk2f^=9L_k{KH=g6*6^MIt^+0*CD(N_|1W7?g6BW7zn; zBs(`B@BLVGmy*<#kfE5Wtzp971sXD&awq(PR-iA+SowQvyTvCHIpxddO%EoC1baJB z@LN+?XvJKz5f-adBdohic)FAHch`>>9sc=vShe2--pk`)@cH!>F5P}BrG%T>_n}m~ z6?GDKzDHbxju9i(r2Wk~ec{}gnZxjpRt(stb@OQJL?Mj9VcWEVDaAG1H(zF#nMaG= zez@QndcTx(qn|5$AN6LOv6(!zZI58%=d@<{!S+ITs-V@)1+6$47Ry?8O&sSq%{@or z43Si4sXFrQrBrj}LLsug9Y|c%*K%TEea9KFdnCSz;GVpyH2N&ansT<)Gmdlew&wZY z54;GtC%Ls^<1lKl@unK@6B*v z>wG)yB*9;FHsTYq05SdLFjni)^hz3YI19@dJetdAt1e74VjiNe=qt$rLm#F!FrLy3 zVDe{1M~M6EjLkf4Q()hSi?T7aYNa=I_EG!Fne~9o#UXovn2J`v2_bV&g6Aq{DA#hGQFQ3cqgGs0lqk#UzE0=&^SHJ51l(HR zOV(~whS0b_p`L5?d=&95l}U=M3^ok=jJ2HdUb2Ci4)J>HV}H{rA2Z2-$R}?6wdFz? zBym_yw)cI`j!*jJZ8u5iT-w{5TsTohCj~S;EEPXgdxwbXz%f5SNT#hN%dBeQCsmb+w_$8FC zvZ;Bh1P10Y|BUx}{;#bf@&e7}Ij^%0LQ~{~q|liK73HSMeC;hCM-{oR_IQ1aXIU=8 zZ3vEU4M53%56J253%FKlBlt_ZbtfXTpr7(b8K;lu<-LYtU5WLGoXk1iB}+w&N9bRDTo z5ZMMk-g|`}Hfgq|=%5PnH?u0L0((&_@bJkY+eOw)M>?Q)YOd=JX#e9Ha5K@uWPI*Q z{YPy$Dg?tPdFb9M+=)2xvr>yTpYQ+5B#Xv!DZYef+X)j1R+qUV!t2?0<=n;AuSV#fLT>!vFCk zB5-cC-6dMJf4Y1td3HZeU$o6%4L?4}372gtU&PhbwYrFlFoqtzfuVcOO|eYwDTNS* zLIhE=_L=#%AFk1MyQ$JlEu$6Di%2$i_AM$JLW>k2kCXtx#a_NQG0MlG4orlTRq?m} zwegzng6q$FbYjP5_)&SWFXA{%^PmV7RTH zg@peb61hbfLWRC0@qfb~WR0u6$oqxja_B!U3Vwk+lV3qk!oOo4WWLi1vRVF{Z$FsN zeqrm_|K^JZ=BxWM7U8DiG-#A=Gw-p?BWSe#{JiN1SPR7I+Du)~t`zf_)(V(a`xKi^ zHTmm3DAq4!1J2$|7D|wW&{)-mO|WjEAMb;aX?{tsMuz8HaQEAVPq&x#OawU<25q*scI(^@%wuvnwHPk zyMYMp(Eqzc^jTrF45NO2Oi(yS$@5YM)asEcxysRCE?9Y!m1?sPxBu9kd>{RyX*$RJ=;V=w~n zgQXz_7Kqx>-kSCP#!SeA=@>b2i58P!D_pK|lT05yPP6;zpyGW3j`eG~x%u9&@5Q}# z-|OF+Hu&gBc&)ya5SK6y_{&kDfahbPE{xA#{#x|Jt+cJNOcp>op3&)~P{>9sov3Cq zPVgS3fPHV$#V3di!Q2b5`^F@yC&sFNrt$ZrZrK@PfT-$|;l{)!0KikG0!YIe$dk#} z9#QYWqR}Tc;3~(sh?_3yt%=BWCx^Sb3;f1!R34oHe) zDf#A0?W=x!s}}(cCIQEz0*DjqfYsu%d9gmPxe@Iq2)x^AoU4-HJJ_P7m+(*l%)~)* zW*bVxAc6ZqhW4is2VT5fEp8rxK}XMsWbpC9wmJaz38K3n<6E6%DI~2rWA*r4Y6laV zWrIiN&I7;U#3&nJ+M6!UYAE%x4H)7ie5QLl7+B{PZFj*~;9BFqO*FBEd%9JEm@k0a z@MW0v|9#7P1=MUVL>IL=pRfjiNgdc4MQ@D@H@NUwAQA-&kWEL*M7!jsU73HUp2WpM zG9!15vHFE>5E=`=Np*>7quSDFdf?f3slCerE1TA7R`(K!%^j|d@xA7m zh7WK}K;6nN!2I#!2iQpXI(R+D_*lOPI6HoOt@ub16`3MeYsD&(ta=sBoWUHG3>2()v#Un<9W=R&y`o6|)3Y>ZMdL&KSpw0!@o(_7d~_ zeUN;M75yWKnVy1wpm+(hcDphr2sh4j$Su zWUlpCx`E{Jo2ABjh(-r{;%4!dV4#fmFVnXwMBAreE783YDi(OSR2X81S#oGzX&GkB z8~5;Zs~tm;XIX(}kt>^?=Y5-qJVeQ#;H`N=%BuA8MCCYVR5?Vb8`4@ND);1U^CtTt{3+)2o)1YD_hsmm*F_>#8%qpf)a9ROVz>Exf(6F#I-its%2y zghTU9B^`#XE>h#|t2-t-$z%|Jr4koo!86J@cL`naWvG|idB}pUG##g|f!fyEa zGl?GtkP>7({q92Mk+ASrhE7g2%WM|QiIC_@;)Ef{x&y9}n{nQ``LDlee(ZW;P! zT~Iv7{Y>RKlDk?sAu4NKmXgDG!8m3G8j8lB|Gfvn-pfI7j7|_^K0(TCtL=~d%!>_z zmR0eNd}PQ-;PFD`s;G}hvOkU&mfBu~t??dC#}l@EOwHONen9QuqMw4I z*94_t1N-i72y6cmBY=KS>;)gGr-=Y2+*5xpWV2OCkvA#a{F9}RXL13mzqmF5Ioeejwbzv!a zORP*Y^HI2jBvl-V+}9$ZijdD{y#+AoV6(&7k||=Pp4=hP;y^AC1$ux<9*evk_B~Q=FlRMv_e#D7 zQ<}K@e00JI1o^MHhd^>DnF2kIgzi5YI(W_yD?@=snk9XfnClA)UgMiWcYC;Qz4J;2 zXIPOH4iSAk1S+*^$8UMS$O?h4sLO#U$N*73S6I!w54+S1Ola{jkoG(;mF0c!jaOf{A1MQy$DITaNxFbn z{4nSCNLDgNzV0fLU`3!9sPV=06ApYIiidE)zCRO`5#ji$mdruB$!j0iMG9b8)xi*u z+kh_7nkjw%4v;}x0AJMsyz+r2kPIx7dahDTo>`-xy(ZIznXc_6^LxU*i!o1`v7U9dTl1FppX3(w9qN#^$q>dp1vFv?i9Dq_F;vd+ zYE|FYk0Y#hE$<#;6Ou*0fRs?I)B%3NsLh?)nyCO2u!*rlV2_6F{DBz{}?6T?*{=(?Y6CrE0i-r~1*|~45yD4)g zCy@O|h;h*>_gST`dh~pyPnpZid!_st#?Hlkq>v`HnGB*EHxJrhjt6m}F7T|09@5Zt zycWgsze&h^U18c2{Dfg3)@ZN$oj@#2Cu7@gg<_)U)@Ytjf=qhA*~JdjdA^&Il}DVGscHJNl<<5KRg`pJtkbrrQ`?!w|gZurWiIb%~-%S-a&jnC+e7}4@N{5~8a&XXnA{6N%0lmgbGa;brtR?`OY z7R~Ltc`MBcB~Efe4&fk8kBNhr8H_0*850G26+& zRQq)k6C#EZVn;N-De8adG1r?V?q2XrVZ^f|l7Y|5gZ4OV{`2!AzCwDnI<^1l-h}OoKVSC~A=lXSff{!~TLXcYh`#bNG`ELc+ z>GYU)gHD9d07WLHb0N%`k*OJgd% zHDn;x(M;jXcmUbWK9o2`YNF22x?BM=*;iX@xBuWaEIJpN;5%iZ9O4Wu7JxVH&(~(m>oEj< zPtbGedK-G>TSS$-3@W82u zHc?Ir!!SEyXrBNANiPHv?*O{0a8iyS#W>`D*euRN$S;0_T6006M?Qe`$e=@|9|Eb1 z2C_`At@7}D3>5R04>cj?pUV6ZTvo00s)?hzT+}g0Mvgi-J$+W0nZa6$y|(jg*_GKu%Ccw94X#Z9(Bi0B#JU;TKsF%Db0np~c00 zSoV|TZFuy0ZtwQkK7uTQ`W8dHzZbE@yku8dfe^2s7T(Gy|w;FUialCeF^!i^` zC9Ou^^T=Bq0#hKyf+nctcee+|Aqe$e1J1tN(leQcn#L|1m8^0y;wE#Ug<9Z?ankN1 zYV^nc?b=n$P+_t`uHr;z)M7_}EM1NyuVO&9CXHdXS#x;WtGrX!V0&9WgPdCMJS&;_ zJe>GaMTiMb)Q0axJmcDqGQu!kwW37h)`Y24g>hCF!H;>(jUIUd$!gVJW(`uJ3p3mq zlP+2BF&}!Q^xt_5gHgN0dsPytM-cQ*0CRUcsEg-6JbDR^_3iM+{^TE6C%2&zAjZ6T z_pgdBhoO()Q8o2`BKXb72Nx5n3F>Si^?%0|=ir`t#vxuXjmSri8t`WuS~4ZS)}kG!3I1kAZPs)4rW+fW;+P$TxR5b zglx*#rnTX@BSd(Lt(`Ey1elCa_`LFDSUq-YNE-13)%zi|qta}TE zVCc60`jQ0-ia3L6h6|Ywwp7102R1WkKJ z9gKzhEg^?!*pqjAS!8%R|_l@Cr7_i9vp0`Ld!rNuuZQab$e>A442M< z);b6U+R&$ggTuo;n}^MA$KMuW;>x!G*Z()cv8k;6;0>8Gj<|m-5UL$=eIk-c7~7&P z=Ke_xrO$|z3i{=__BycSbUhEMbJY-S_&s^QYd7$4$IchzR_m4^bcns@m&V=}^MDuG z}*0iT@*tzpJo`VH01tbStO5$b2YdQBmg02w+O$lRvDt@yG zr>O=^Z}E_3sgCze= zM_!>MU?U-fVX@V=Nn<0qijlg`r}N^PHv9hySI5=7ai!V=AaQM)?z$|ba2seQa-PLi zZ2(rae}?9}6y^a~&Qf8t9=ro}o-0quPQU@eWe^BPprMAq?DLy{SBUra{B^yCdGbi! zmpC6}NN#A5^I>>=kBH*gW=F>%u?5?%afU3EC&9J~9|dh>?Z0wekqv|z^U7fplt?^g z4Teyy723!2VilY#(8?;K{P{UuTo#vXLL0|Zy202eh61XP1BI4)IIbm;hGt^~io zu$J6Nx(n}@aEpLfq=7Dp|5NnRBqh}yHBC=|UCntT8MMM#Tc z?gbM;k;NEV@u@+HVUB+HY<9C$r5tJ0p*FU}IWC;2jf$B5QWH_%wYMX8pcJrY>Z5P@ zA?4hfMR?k^X_Hmsh7prHilg>;N&Y%>n!1*8n1l`foGAE95o0a^qtvLp{s4s}+4S3= z%|jLcQ0~4~#!l|kgYB5ri8@`gz?0oP8_IC{hPwTkjDGjMhRpY98ActfEu{WZY|#*1 zEAY`v$Pw$qnq}GRC#?5PuB5O==l4s#pfSQDi@c8Evrk+=u}b-7bk+z9mccz(1}@}x zPh(jHpjV(~p|ef8yDyO1BJ`w#&N;p_upBRtO17PGgbWie^f{VyIlk!a`E@Z2{^nz$ zk^5q0dTqKa&h9()Y&}B}%Lix@S^-ajL^0?G{&TwFvZ6OKmxF2JHcyz*la@e;iV%fN75i$znEaVs$zJcDnZb%vPyiRU616bBVK)*X0A z3Pg*HKr#<=5sGU|mHp%P6Nvs<6n;`ENH!p2mP^n=BO#8+fJC?&X@sN`9`gRUcQMx3 zCHFl5VB~gZ<-?7{Qqjz6x`%d&?pZv)8AIGtsj4ub0SW1d6gT4I(S6*w6I}7O9}uCt z%Wl1618d0B5)$Q|r;e1gib#%W!kf;|JvMxZK3TH?T>&(Dr4nnhv9$xWd>9EPk5$e)E*p)0&2r@57@=53QDeeO?!B0Z@7fV|NVx(r( zyc*SATnqHC{O2&FWVX<&EbKR#{*jG`j0qHI%od833u~Uewbh3Be^O~M7YWj#<2coH z3;3f6HGXhz@qowobR$CcBNza_$13_o$TQjf;EDT_IU(#L^o>vTRY0mghLGl`tzi@A z4^eR*s4$tqb|c2W`PT~fjs1|?#^(7F=AHE1-$N47f1iLE_As~*vq_Dx{-yE_MJVc{ z2xk8)4Tyv3MDKlsBzv4!(ZO=A@yb}R{-4=b!OerBoR_E#L-4_WmYoK^6qYwANUon= zmcJy z4nh|?cPcyrTZcA77<3k?R$6{QW#BTzn}fnQRBHV%r4}prRG9()&kllYBA7;s%$u85 zzrPc0wURJDS^KPSZv7)2Y+U$eG|#2)dm(N$1RRGGGnFk9tYcFz_Sd>vc@i8oZS5Gu zA(kj06!YQU2#cLj%HDkOe_LodENHGUhUqQ&#H80+R4Cs2?=0#BA0ZiZYr?(n6Zl~6 zoE{$tc`RzPn<1V6j_c33THC>CSCtQG|Bs;zaL|x1*uZqRzv5HzB|*m3x=t$RLd9d4 zjoJa=3kdrB(nxvMrSU02=PM%P=Ul@}C%dGebqVQs6S8PQ3oZq6Lr=t~`wM-_s2>lI z!Jd}DAh7Z`?j(wN{j&22L@K;_Vsdd`59LSMs34L&- zBNiczbTOS${1)A{sH(I%2Dvo;?D+Rtn>fU;8IND(r{eDPwJKRP?;V z(5txw-Q(}=!>j?C!!bGiCN)mBG{}--dyHnf?vlI?M8@ZCtDA&WRpN;IZB5<-8WQYf z^7gdS#aP6MbGNcJwg_oGt-H4f=*~$XP zVOPhLpu70IlT&fC(2Np45IqN%k-oq2CSRO;zNkQ7jt+fy8vSqcPc}>bt7+x0p+OtX zaD8)-xI^gsF#PU%&y|V9_xW)Hvey{jI}uS=${sG}s@(+cW2JhT-tq`)fw#CMbIzj` zR{>b&Uuy=a*|i23yP10&1g#9onmR5klksybzxC$9?D(T@d8U`&pe!9BoCVjwfcKRG zTWA=6>3HrDR!q|}y2;yH;W?Dwrz0m{5Gqvtsq)EC;Cf;Oa#%lVi#c=y0|Cj*J*ni) z(6<%MBy09-s=1tU{&tzP|KZMHgRlD>FE!B6(^8R_=KGYP9rL-UvwqY}JpZ8OCX?yj z=$H_GItZX~k z(I=wt>X~c)Hfphw{{IQF#>x9F>4wc1hJg^?$4M)?S9F|k zhI6oW7a7JXN+qiS#;-xC}ooJ8~Jcp8R_oHWF(nE4W0_qW&d{U>9I&BcAYW z|6yHWGPwYgD%YVKO8v<&Xn3JdAYZ3@r2X}2LmiL?$6bUiKmKJB=5%|2(%Gh7LSwoEv6^&%exs3-r3b*piU*zhlV0kaOvRKKzx{ z;^oAkHfU2#@%`^O38@t((0DRYt8&@UJQ`w<`)jna?ehfKfM4zd0*mw==Oy)qX?cOT z$n}nS(orQ^)Bh6@UD?$723WV27{JhkILNo9@DNY(V=8{W^@inN4e7IR#I4 z9?-#v_x+;L>%3?NN&OAJT`+=lL8uuLAMFFl1)}B1$6Vm;5WBrq79EVYDhQZbw6dAk zF*b1>Cu#-F(lb~A1~Nm!7?uZsbYc?Y3j7Wdi628m2h zzs&R^xOd}0?mFns?1bS8LC6FQ$|s3IvKS=3g)!FzpEdh#7nGd1Y5wO0px`q#ber$F z2uWU_bq7e?BLx;uy-&!Pk)57=u=e1^s$U(S^9f+kA{=etkg$eo(krgDRdG z5$zv6AvhH~wC!@BFbYCNsfjlYw#$`^qmA{ z?q6Wcvax#qp$zk0f&v$mlw=MJ#G8|^t)nJAj}RjC7;N)Orq3?hwX1$%RQgbR6@!ww zg|rciHke~HF6fKmWB{*x6ztmv&;fIFq0NVE<0=|=XJvgRt2R>*HEdXmivS8uUWwaA zGvKm$IM6HyExZ?wmy<^U6_C%!o31Uv$Qhi6JYN`NUyLdSShf?PVme|NSn*v+MYXHp zXdvvT8!V=vFTyN%Y~g6Pem~0Ta0fa}KV!BPA>88Zh)N*X1W5y~SR2rMGiZqdCQrGt z#_)MRJ;a1{h$oYE0#8w7Y;u)aB&5h%Hw=PZixDG7zN#K%={!}PJI6-CjRe%e!4jKM zT40H-Q7KateBM z^P;SMk$5=`v2T)-mXSHQoiu1v7|#EHMtLFE1gci>;tr7idgY`aim&PBsjvV;{{&g6 z(C%iD?3KfC#*S2>G%^or4H#LKqT@@9s?SnL-vLexzSs$-GuRpgE zSac4MAd+C&cR@{o`e6>1b?D#qDKW#ZE*2fD0Q1;?1@_wNpGRgR!NU;b-Vt~E86Sd$ z--Q@@5f>Q}0MDJj#Kz>mlhEpqL3~xP|B{xHsLp4C3`gjJt607 zko^V&G6#x7J?*_I*e5AS9|7{d39wJNNQN_*dzKkizR<{!(96fMc|$CLFY&d+NcV1U zdXe|RpJY_c89%sp1y=`a`n@xFI>;W}!Mhq#0hm}G2^4RMSu$h;n-G8nKZOmtWNvf@ z9&al9&ZpV10lL^4wTc5@h`~?j0}1JEfi~f!c(7jFf;__suhOWJg2HgCQ1(Q%?pXq{ z1Qi5igC)VRpx^1aEF>3kZJ`DJyM4?SY}OYjXP^RKLlZgDD!puI zk+)JFE$5^d99$3QnqmrsGAIuW{W%R5V`@tn^~ zJg#kK%236I&^3BNE=TmOH*k(gsVtZNDRW5Mk!f?lEj&J@S$OY3GQ3_y6^ObyxVgTK zRo*06?9)JerLAM#f)AJ8S;&?2-sF{;4ob|+`%Dc#5LWQu&r#V!!;BDe~7>jS%CV8^sHR$ylQpr)kEK3G-f#I3pAit z7Z$MeYywYu<6UqPX=JIyy4>KQRpIugKEKBcfecHc%m+Q451fvQg$pcOTtQZ{zjL-+ zTz*LlUkc&4Q(%!Db`3}3j00ytcjWc07PvS-<(%MYy9L7YBPnq5dkJQEL5<25EZ5UJ za9`+Q5Fe0rbq%hLP(*Bfv&s4dJ0^L|Gu#{Xh1J}D zH<@%9uY-p!gINtIB1`>6!39Fs z_C*@}8qlvfscBZij7VI1oGdG8l{QoPJ9tfDH=Xa+h5eL&m2=@J%#1AhC?~MUFUQ~CgolF`neL8y3jZ^(1R~Q&H zJ>uHw0V6ySwwnu3<-EK|$@8@Rg=~q<{_%F-kd#P)WICgE;8&Uv2GCZ5w(;6a`mOF! z7Oz&yGh86niFj#ok8uWk3`r1RU(FgyMOdX`c4A1Dyzj(y0dJK3kjP*wgM`a;OLiGG zBy;{~y-64ASsbVrCYQ-IlngWC9w2x~f+KZ2T=WHK$6b;nkA*@ik?}kT!fCkR!f(ZZ z*+90~j-n&`@pzZG#(7Gg)%R!2`Qyc&;J^}8$y>t;i>oC!Q+C;cQ#s;$!py}YBEUBB zGI6y<*#AMh?a?sKB>GO?RAHjM^0`kq0(>H9^X)b z9QKOJv@EFB3(0aAEX>)8GJ-Xj;n_ZS|L9FJaDD$5GoUI(2O0fb+ajUmX&06)6|8tB zbRI^S0sg!;YX9b&5NefcD=5O7bh;7ae8vLpTsOu10kruHli zsaisyYN4P=Q|=*E$&k46L1x}K<2(oYeZ*5!_1*R@78E$V{kWGwrewYfzX?Suh>o|< zAy@hPbGR~0O377F-D145&x^qS)ws?Mc#uV+!}q9d@8}C2qk1fvP6rJ!5PXM1Fsc}L z0ULTV>dk14{ZN;)1S1=Q=~a+tNEGRIllser(wl4PBXCIus;oUbZex3L43hiEz?1#I zDiZ#ynEr~}a5Ll+EC2;(1p1{al+M>Y> z9?jRFLAoc6K?_eM!g)E(#(0CEw0(%A9X1d8G$2HW*N)-+KdRu^yaST-X4hR5(ATSQ z-%d@Fssc(5clT?rSuupG%d-uj-f`zRk5`wbOW)V$oB=Q`7Y#*sr{Tk@ z`eC+sc%>9#S~-N*%IFN{samZre12{KXFFJH8oa)vN&`K+zy>N4(Tin2p`L=b1|@*u zQw`if?C_o?q1*3K$z`|O!V}@WTgY2{kg_b_yUuj5IWQ1$Yap&7cs~a}APYSxGbA}c z*NPXNdY}tgn>9{)FO`5=K+c8e{r>=SgRdimYo`li0|R2c2L44g7y~O0df(jEC#5|n z417P5?{3~Lywu;v^pz%)Twnk_GZ_jY&Uy`Jde6yY4s>o36+0|eTTDE$47ozhjGqS! zS>DAQs8V>d);-spiQrU_0X6G(AgirkS%H7mp~C$MRu(r9ri#oO>vxui)b&dYS!7T5 zbEUR>gw-ZM!o7%3$)k;d@A?Zk(Zdoj@y^{}8Rlir1WM~ER5dE4#&45cMC}HzAr%`^ z#Fm&hn`KNv>!d2Y?^jg=RAXz>ku#tIg z>L~|WKLxh(TbVeA)@k$R7ci%UmGcsX%6tiZ(sEYz(dFs;236(`JPVMMDh?hVY->jV zM}Q}Vv2PK}I0tW9(VITMab7?#DB%Q%6tBl{S_$!6gEG=i6zz2!p}Swd^=;Gz@x&qUQc2R&y! zg&7ik?GGNN{4dpgbx>9NyEot_50XlVgtQ0>qJVTtsDN}y3W79%g0}m%;Xd40h(E4rL?|o01jPQJ z9HBRTe7Bo#q1o+6|LJeBPyb-4NOlaVU~$0E({7obO4A)<`oc-~UKfm_Ec60}z7q|E zEF@Nc;NeA9R@r z??quvcY{KgTmzft8jEpcC14E!a$dqnYt10)mPWOCY7~;;2!__k8W5*RE!=CGGzOq7 zukc>$mC2(p5{&-V-F5bnCuL=*f&>HTn~OSwFta^A9BIOkgTC8^qYY-!WB zhIssd>D;?leSu-gXfX7|y&7Dhh(mq~=sH>65;BTHaqMu>B4J+`&~Hl(!JPUq8pKRW zyc>~Q_7xxbY9g--84Q0OzOFJ{ zQ)PD-^bnjeIu4704pR68%C{rhNFPi#-A8`Enc0nW;V%3C%MHj=?xo^gr?< z(24M>vo?-k0+Y!(qT91Ma+wY9Oc6DRvS@7pZ!6>!RLm+5Jm|3+UB~2;7={-MJ%k~5 ze@EnT?Sq_8>`P9A^GyAb01=8(x~HGi`;$4@i9vzQo|O2I6_ATUSWDc>&+BFXIynf7 zwrLv)k+)5mp!F4;NDjYXjLDC>=XKn_bY?#Qa>!n*xiQY(zDqA}3{rRsYU-H^54LRS zgt%a^T3p65jlw4w*Tqym!ZB;o2nAJ?il|5`Z6;A-M}RBE<~i(=53A;amE6ogt1+JL5%45iJoOCL8haJFyvZ2o}3h3rr2M96K!yBw0LD?W9 zC4WU7B%V>BOM|{J;9(;?weVqXEMC9@WCLAPY+M%*5+*h?yWsMwTko*mCR5Z}GBA@C zH4{jv`Tza;2#=U1thHsd0y4CD*IXV8)ou`|vy!3zB46yH3%K-e-B3vGrEP_X`O|Ba zQ>cYRe@TR4zE}+JN_W&{{rxUU8}dyE&A16GP%O9Z`EnHywS9vx7qH}j3m4+Ud85N~ zfotKf7{^X%SG2lL$iewPMUoHC<2@1|D8Fq_MkmF5l?iE#UcBvt-~zYFVPG>^YzQBe z{98?wWC#JWEJC1Ko;Xa_+JC()HX%yJ0(W6}E?2ki(L!fR;3o-M=PdX&f2jZaiq7;O zl=H#Yw#ofq%`U##!mJCj@Lb5qbnXVGD4u zA5RTQ-xr(*L6Hgq=oZN#o6tU>`A+rr*W_2Br&mG80!nKPDfB}d7-@q~bZ>oO~?0666%~7@3SLX=CmL6V5*#s<%58O*QkA0vcLNJWe z){42m16Uq3;Ir8PZHNcSq{2XfQ32*T)QebgZx3Xh0Sk|8=mg-_NZs6 z2W5$cytEGhMsaqDu7*==K;YhU>T=0>rg&s`d5XAOaksS>3 zfRBTNmF^`7A%`U<`ySo8j9BGI#*_po`g!wxLLrNwyAwU~gf`|IP&o zpkIrm0`cBJ2Ff__d2wfet;>gbt^yX>S&!v0|Cr8EK9(L|K~dXlHi-xO8#S6KD7)_1 zPl)Au2c6sGingyeL$wQ9BR>0P*Ka!N;ClM+3&?IP?q5<=Q z`BgK%4JpaE=zN>oza^BHy$a=_bg#?wzg|I-P%XK)`*HM%mi;q@7-O zN0Y4&VpUH`hw`Y;JZN-AqkIbGOZ{1h-Pn9lBi{A3yvAd*q)*>)JaRHbeCS@H1X}ZS z60j~WIuzCXY@)lJEo!g~Sg00bc$VW!;J8+N4>o*bA;j(vm8ywiMtAp?d>^Bv8(_P6 zpy7)K<)IREXj9;biiQq>h$`p!IYR?LJ6Mr-4xY*_(MM$L6(f}D>?0(>ej-@BJpPRV zWP2!Uw{hpWgu!yPwDkJ2`LlxE_q0!mDYwVDYl+;=MhMPu0L*2B>uebq0gnZ!GIhJx zfJP-7rP&-Lv9cZ)xI`eT^)<7^q>-%iZuZkL}jWucKdh0|hlxWp>dhQah} z->8*-?XW(aVu2)un1s|t=n-x{ra*R}-$UITj7Rzf*DT5Z6pecz5f;D6nfUj$P86VY z9=6gC{(LUeO>`WYmhivF@qpI@)|7g(e|X1Jz{Zi%Dp~w}J-|}CGyCcjd+Bd+#4ZJz z{ym@IE7pSU*FLAt7M(_+5Vk1e(>iy|`@wurKi@d_5)SVdNI3S&8WdW@kj{xO^S}yW z@#r41fFlZ6^Cn&SbHqBB5C6aT|4f$&5#9>?COaVS4lJ;GkstS+>7?hgf5$dK0>i2X z;#|X?2F3ZU6Ao2->!W4O{d@Lf7+CoR1QVzLaIAm}_HKL(-tEVMOiGH-L3~Sb9f<)= z07!doiN?7NgVpn2bq#bq@!%v1Yx@@a-(pgSsyqe%HbgH&}2pkwa}SrgwwGaV}p=VKaJcnV2?oP#?-c#Ie{uWE&vm9}2bB~=IqZABq5m<^D1yW#9_#1XuT@z62uT%bME zN2|be3dhX{i;DI9|5ZDPJi;Ow=O%F=krPXnKH5*?YJH5 z7XeFx?U37r;S(6nUjvr-dVuoA36|SFtTz@_Q--PmZ^mOh^R>7Ree}l|7D6~R>3kwMwI`tZQ-~s zkgS?OAkm!L0Q375WNjDKx;4pTIU@dF7~7F_7yb#3PrJJDO4{#;}kdGab4IY8~l)&Z$ zL}GmuoNIG+i~<21=dXG3He{J^?MqI9xa@U3mCJ{^ ztq=#U>ApkiJzU};R9B8U>V;VRO z@I!t9BacZ51PRU$%k_vo@b}X=D!h<^0)mL_+eEe6zcIVz-!rfRlem8Zl?TrXL_LP8 zG+pLPkOAqktSVI|*ypu9fA{5^zx#5@`65-aEjy4sj^zXsxPI@Id($05)rSEV6+zc8 z)cFpgNMr$YFdu&FA}q2VVHI#sm}$Rt_0}_esUc+42bzh6tR&yFBZV)|BqXl z@&~@2w{Vus9ya})u^82xzbH+R-bmFb7cLc}Z+ZUW{kvTJ>T&UjRJSdUab+{}a6%na z(`C-W9h5HCxDa2K|IKN7)w9XHYGd7K$@}2bf$!+(>ejOFW9LrK&W$G<-kt89&Ykq> zB#sih%Yj@g@B{oaJ;HZzw8R6Nmyhj{dDtiQ{Z|bvEOPE18>8z6W*a5%HTh>UdQl)x zE?o6K<$wt4AA-HB%-{Gm^eTV5$yt|uar{sAsJAfM@AFn6*cn)FWCHBG z@GVel-iE-{>{{A@j9*_KRsq-leewjkyL3^m=UOb^~EIZcg zM&+?ennm{z%Ck?$dnaAcuf~KObHVJB%r~RpTookNv0haH5H(a664D+Ii1aIFj2{?q*Il8i^H|V`2T@<^=FiF1g+<4r(rk^Fg089hWw+VD z>FR3K`qRtDj)ZK?hf~6wb=cII>C7Vs~?tr(I|v4 zQ*2(^PSX-?JJf5E5#wmA)#cuLbZF^gO2g#N9?|>BEpUj^@e-)^;Af`lDjoj5eiUgL zq?U~(pHY7S{Th>`9AmhL#p%Nd2}j8;cQ+vln`rR}P>Eb?x=dky5RfwIWL1^8tjBfl>ctfW^KkXw4r%E}7jDzU3Z zAazp=ItI_z4xvIwY5|+u2$C!EcbH*Nl=v%^{-QnC*3-`v0PD$uGUe2FO|S(jS<9I; zjPRY=w1o+%2pbWSxf{YPEHY0^0EEfyvI3h`pO$U%(e<4VEK!zv>0gB2Qv(ar8h@Gv znzh(ZhMqiMovTKxYy`bUTiTZLq6RJ07;tdKW;oW5Dea;Nbg(Arg!4V0?fKveK^afI ze)uY~3cx-i#-JzM@ zHFq6D6^~Nx9vk_b1{v&t@;EGe*+6g>LkrOz<9^WyrSi5G!g+3=1$BDu}9o zmhw96*N^>AAequ!K^UdJebq&MpJCgRj^bNWlx14p%XRo_6%Ho;xLBjFE8{D-Q>V=n z12$h~U0+lEfjr<`vyefd0I?G#ADcl=!vg@2Hvc|=LG=Qgz@^*(T9K1?No_?CTgPR4 zW9b~2Itq{wbNf{LD4WIiA&jB-y5^l8(*f_-#wnBD8LM^SWz0~PIsh%oA z8LhlegHm~}?xeH!Kk0?gg{f4}#Tb|e^WG4%k3l@_4QLBvcnI6;)8m$5rfI$S$slvp z4AFTI&Y@x4TjI30{;BQh!R{@wpmW&MeUKwzZNDlQMF{+G?Uq}{QC&1u?tWMCRdYUj zSymMfNT=@ajC4)=O?{WUeN;!aZ@4_n(S5FQ+6r1JWwzS>Fahib6sa4*61!3m%i9W< z-l&csAFyd*J((n$@x9H-sKJ1yFYG09M74xJ?xMbMXY{~uIWhngaLLkK?;TbL3kk-D z&o(=-fphk@&`%XZZnkZv>W1H65~2wK9jx`rJwbI&{7S^ZMIoE|R}_?GAu$GmW} zy$5t}$q`Kt)wdDLMUdc#=b&`e^^?wX(14Ox`q|YDoqIpIQPj*pd6mQXYz6S-NWgJ) z`*F>#oX6U1db27k@5l5&$xD|o%jwcL_Q61-OkbByq%Ky7Q)V};H%G68-6yHqo$$4c z!k$j*8J2C9WlzQmM8hh=YK%LrSsDW}uU2UPT!@HlkFx7*k{DmGfccFFf#pbjIQ7K2 zGY0&gS742pf*9p!SZ@f$-`j;|Uwd1RKTebB{({K4FrDCV zNN$p*i%BU0dwOBsjIYWsk1b)iC$S{(ULm>kkPhs5Ww7S=k>U|y4wc@ewIDS z7)tmRK$*VbNyBCSx>;2tb~mXd9a4E0o`T?$MCOh&M5k0x*uo`mYHM%rcLQB?&nlR= zzHjlA4`s~2GvEFQHPq`K0QjfOL1i1T1~sKq{kj*OcX;*kT z`IXUD7+VTF(#as3X{NZ4!wV_|&CdE@(kPm0&>WsXU*|+V9E6>Rr^5?okiiMKJc{*i z<~~OW^g@EfwT$8Wddl6lHmKh*(Y1tdO@J_mq%b`vx>eIQhs&B zc_&)cy#2$6?p>xEXE@b8Jl-1GLsm_2^6_%*A0*dR^z|$*EdcLtj$tURK^(ybO#M# zk`#*J{REqs*w8l3GT`)GVtra2pTu2%F=@*9U`PK7(mIssHzNNFwWtId*l! z?#;sMul?r{+wg++i+7amiqKIVvoS}~HsY6DWSiD^U|+AfzQ|uu&C5#!hD(J^y8d#c z6J_r|?-XjhucN8PfZJZYu5aXi{StWoJ}O~d!QMBy7eOwXbBqZ@&1CI~GnWek2uV5$ z{p#-64? zJF8o6YCYF_b(?hkQ}t(6qWCFRByzTK`S`~9me>Ihhf~0`3@D!%_-Yo)j+xd7F|x(1 zHjoKLyv0q?CeXr@#8r-<33g9gifBs&76t(91$78C4|TJKz+la6MRCsJFa<_(K)FH z<6J=2UIbG}y-8r^9SK8_0$4lz7G3+{*`p;cjR<8>$naf-f z`vfTR`A>8(oAZ#2i{`&tBHTSX^j=`g_rz>>6{ID31h}uL29OLxCi?T$?)1f(>7<2j z_rAh~7Q`jj8%d;Y2r?@6Oqy?ksF#IQY%N-Jb1aa&J2F%_Bke$5`SJzFWUTpW_s+ z)1Atzw*>nBq&r;_2VOViuy3&loziWY69^EMt{hnxka*ok_z(3rS)7F1fe%SCRfH~h z;2yyhLeaYHt)$T%D<3Q+kT&%N|=nd$J4bhu>3H1 zS1s4Ap<3r>gUROxvf3f$9L=$gaN+yk>693JXn4Q9#T}pzq4#Gij;MHQ9d)i^49B*H z#*vwH{Hi1*(sVeK8*q1?b_mV54doi(1w4^zn^KQYH zG+(;xgYdW5zj1|v+vxolS8m&S0-mJWN+Nvvmj&yQ`#+`jpi-VKs9aHMr_-)@dh1&0 zxJT1sp{O!3Hm9qXD%umrdEm1+ttowJ_oJZzb1Areo*EXn4O0wW9_o7CJX>pO1gX3A zTdh1`885bT3H!B_GUpon&kw$nCw`8YdNc`t@X$3j!VxksEw$qj+)Sp8hJi-7u7^tc z+RV3KRZS$RNE2Vfq{H}rfpFqx!?Qr0HiACU98u$SKj*aLn`ca(KZuneWbFuHa4X_x zm>%n~4%Q_}RvK$;WEgNr2>gEZovawxSouETdF=D1*R8I@W(~chnD%nZsaf25M-GlE zx2b<{IBI~rn)SFH`Q9}DQN`;IX>a^4F6&=c5fIG~eH z(Misw@eu_qZgd2fVG%J99jO`waxO1aFn;TthVY2>vR>nT&dsq}jgxln5rPa|&X=)9 zKKzREG}Z`C`M!L;$%3DTelpM;myydR{zx0C=;C;ne@&uahthI$%vf~r+^nQD#mCDw zK9x0_g=j~+fNys=@xQS8j6>LN?VFj@xrq19p+ehJ&Fg}}b9=meqmi+{j*m5U z>4nkOE#XkM===0oPAdeQ6(=uuv`pe0=Xf|sLVI3TTW;(enVMRZ-{|^`t4E&vg)OJ> z<6H^*g?+~af_2t0XYje1eRlnslI`q#27R1lmfVK2PV_K#QAHij_7qZBTK{mXG*wpW zkImt{nJ;b|dhGs$lz{c)NpJD77G?kyq9xTRYxD2wWDo~iyj*Jjz~KB#Ck4Z{G{Sc- z@1OVWjaOmrw9p{_9mATZf;H{1BOgDu8GeJV=m#N5+j3>sgKvI$Dtu*1l5$V=)*FWq z=pmU;P>Vl%pU$q^_l;6WjQV}7nOXIm=Ly#jF&B!Er!c+lw$uCt2Gs7u>sF29{D0$Z ze^RDpL_MW7L+6J+uaztE%CkA(IbG)<5~p{$}ke(8BA}>YDAGe;CgO6*z4l zi}1hcJ*Q>hwfXZf+P?w2n}n)8&0XPqe;7|DNQdotc2$!v^KklRC6j=OdNYylZ`yAb zvA@CIb<|QlyDF8~Q_1326KNja@(zDRv6>tu@i#1OBrVjH{~$Dw4z-N(59DOFVh=XD zse#4jh*^Zjpn)y5HXhh1ZpinzGDewjCxh^U?H3K?)oU~R%-~f=Gb_7m6D5V4XeP&~ zVa)y4Gnz#KOzAT8@RSLp7TsU{DZ$tumW!+MsrbXDM`^QC7TiB@X0*Sq3Ak!u819 zWIQa2Bqy!5U_B5N8lzVC(N0yV}`& z2v`K{v_KuEHu9D3xaTR{5pM3t777B%TJUNFHs!8{T+ zI{pOzq)5m`Z_EKSQr}=ne?-BiqyS)WHjJi_b601hSPjBZj{-=!LZN_zVDfZ6fM+z$ zx$}~I+R*Y4NM|l>WMo%abetG2u7JRf+rOGdsS2l;&>jOFqp$&vQ?GM!vm>9{_T4Wg z^Ph{4U~!oQq?n)5GiCW4&dATJ=lP^tkkkcx=-2@I~YXqb9;bZoeAbhb(2<>*t>(m zjSQpWb9i>Zk5Ov3+zy*tY9XsAaE5(eWA2pzbw41vs&88Hi>;d&vLi5zJF zt3)Kld5uR!siHfW$%Mzes)KPR#eOk+<(W9u^W!?d(|nD>P;EA3B;~;1n#7(kF?2pk(8!JTcvZ4^ z6s%0j!q{G)JB22Rok0Vj$)Fkr7|8`V?cPew0>fdH3H4lKmw1N9?d6u=gr6GXMxU^z z&Hr

n2L|QNjuH0gY?``98dz!+rP4sz-~>n0h2UJ|yt@5G3@|T*xn29kD-R9|CCI9twHC4;H|uCd8~Rv$ z1i0gpqA{5ZL|$$I!iAh?9>Ym}j=j0pSuQTv4^>>h*3HW|X$1PVY;dwU0R*Q6?qQ6D zdxA}U_R*+2SIvxDYn4!7UZqVJ%iiTHaQnecp?sd#B-4k9_pvqzT46?A8mgI2vR>8V00Sy@-%Q0`L7Ra6 z((r}bZ1IWwl|DS#t(R%aLo=YMzMEson@yh2VC%I*7y1?M$%`p{$|4Mf<<@lJMiO$xC-czcN^$OG@a?3J(6b$buiqzR`N zdk7_ik1UJ>v>A_pU-wg@ZXTCDi@xnES-|_0k!>|O__lavyp=AG0Z>V+%sG2jyXC%R z@%l%&ts_b=x1`;KCv|V{LgiIBfde;k9AJRC8NqQ>`w;jl3jtVX5vsrP-BpNV+zh`c z3&mvnKg4F71ySRygix%BRf7K3OL#IB_t9Y5Npx}p03HqiG_->usp~%Bdy{*SJ(i`V zd|`5KqFt>J5;vc&)A;6A7sJen%RyQh+4l88JB=Y;09dSm#au*cY#HxanWfdI7uqVT z)v!czU%d}wdKlabCsrkIgUd)hA7Gv-GR zoj8S{4z4PdYf3tc4&3Ut`Xi;L!mjMtZojd6Md>lvr{j7vw~)9lQfNXsRd43;>{xPe zn0UWi{7|Pi_*@-;PnNxXeI0PR?EuAE!5%p{aWOIJEuSxa1fuzQ;4pX|uK`xtA#t+e z$tf%>95+Q-DUE|Im8u$`d{bubv(QiX^NkWeyU@NWe}%3y3@Ud2EtAND4#Cm(>G0qk z2b_6GoZA+uRV3+*xXU1_w=J;b5K?j;0?E|D^e85`olqdJ`*Rnr2Tgw;Y65~fr-k={zlyfOERWx(D8w-jvNrt5Zvf@F-*aRyT&H&wM4sOv8lGzq$i7_aN|&5s zMhmxh^_PglegVeNKCSZ$kn_3blSB-R>#&UVv`A-_l3PNn^<$l6MqKDYUGgZ`m7xQ` zOkqiN>E9w9`Pn7DWAl0^rnlUk^x15Txac;BmD0j0prYX^J83azNASWTaKV`5Srt5` zb74o4UCT3q9}BnQ&Rh{ap^7rAz9z+=u$5>3_f$}T34aRks0DIq;19X9bq`AmyVmsx-HtJ33JiH40FuiB z1<_X4y*Fp@surD_qlyljW##kOZj)w-lW9_cS-^rh0SNkI@t)w2L%m|Bu9IkL&dyXW t{`B9d77A&U7VZb{!qJ#((6j~HnNeK-%)Zb~CNyMLr_wfZ6Ib^eZ5npA9(WRnkv0@}@nx)!0 zf;6vg_@mLt5TeTB#2`4yzqU$=OGYp4&il*s(vl-A8W)XAhMxBQR#^XS*Y~7vrzzd4 z(rM_O@zCBorxj;c-pDVr1Wbgqs26`<2xQjoykxnG9fk7u&zDY|u_rRh~JfX_t#|MQt`Kc<G$S()49>g@OyT0UJI2b91^yU{%mO~_LA@D+atkfKeEk+0*|Ngl3r)sM6y?)go{-yGX8YgKBFP2-_ z2zgq|Qs%AtB|4S&di={1H6IH!_J04!E_5skC-eLKima-1 zjFnr*=Weu9K9y%478= z{WzJCo5a_gWBKico}t?o2MZ}rb96qK$ua2^3I!E#iMefviyqDqy)&pOqm7``EYOJI zG8VP|QI`9Ayh@pjtK#v9mRQ5=oAZ;rm>T6bq8^E6WS762tUR_@`AGPrjxp!Ft!n*z z)%aZP+)B`CLF1ggT6_AuSvaZx*Oxqe!Og~u)tRx}U{+c`@6#i9Ny*d3lP&qC#ax-_ zdsKa~%#jYBFs}5$cj}ZQE5bXD3*E`>H4PrSYKyrF$ph*I8mtv-&;P(q2;1&U6`W;N z$q+B9=dp~S3NdyMi4-F&WV2K4K&CERhyIG?1J ztOi@cei30-PQTR~{#2im?6nq!fZgvZj-O<&`5(WHuZ*2a#uT;mic) z$D3bhC>IYuMyyXYl`M>!_?^KUyxu2nvnMZ!m?>5}j_1C{+E8vw7iyOzNWCIuWgqVC z_+>Ire(O=x$Y$Q(nwI=f_jw_toDj@C*~x5^`K}IxGEcihZ^>{?ulFW9Cz??RaH7YiU^o z)!ny81Jf-5maD_>ZM;d8u~Y9j+%x&QWKryNZ~W*JzLWDmqt6k2D z`e%P$ZyhMSz9sttkCKV2>7;Corl8StcSW=R^e8vcNUfgJpqlpkTSfIJw2^lAJ97Gh zRHE>PMRe#s2fwQomqZ)Vrr9@S$_X?1JG}=AX+oMWG1spbqrZXuR^WBKzn-XVN~N>>#O9-C*BSn z&bFJ|KU{o6c}yehHZ7aTnMT3$srA9pcApqt7yd9W$L%x|mq*FJOLGFEF)bH6qVLD! zb{!7n%IjV4YxFu6E~SfB=1y>K^gIl6DluxXnfv(U2d{Df2A=TKhTQ~H-Q;9EDxtwU z`;!gNF+TEIc3wLsvXRB?YJMWyw9$(D;I6omlaS9@J+u4P+=?5_X-}}Q$n!moW7CH0 zGi>X}C6^T`&D+CxEcD7OUM`1x$-XH+xErf1x+(M`TxEAYTRM`0w?%ET+97)}QtV7+ zetoQ>%W1xogiE*abWeZwL?Qr#{L8OT#AKf}jp@z5%XxNj@Ap-o<*6!o9(jL_VI;wH z+t?6$OEhY&e0^-I+3yne%6$jjftEAnQ5WNHyH49Ne&!Na`Zf-57UaB$9+XkzpFKU} zif7Z5+qmm>RL-GOD*kM3f;C0psTL`5Z|kE$V~?GF>F}SWg9I+&hYa1?H%zHWvDA8A z^P_L83Q1JLeMxD#box@aoe_VA}MGLJBnvje{i<_=DefQb{x@S5^~?|Wb?~VwpLO}Uku|b zQu=9LQH7rE-+n8rB^hmT1ls2>m8ZU(QwmG#Jv)h94y-0?3tDpbaHH7pAr3v3GH@wd zg*tW_+NZ9KE@?k3ab3*-@%4}L62 zyh&ix@FEZR2D@7>Pbn>otjNS9Xqx||`3l!noBpgI($DK~wO)K1+Qw5(rD;jLzgEOH zmybQfFDG+{Y$`5zC2|?>;n02ji2qrWxo!=&ao1_J(#Lj2i{kbmrBlD#G8*x%2UQ`% z?ZP+DtdS8`4l5_eVYW=}Vb`zDc=iuUpHtJ*SUF)oe>EpwTxTS|rivNcy}OmB{Uw|- zL4mgG&Q*D*ngtJ&s2m~;ZNIId#iWv<f~O%-RCQ5eeQM~V&2@q8bUU1+j+v=u<~EE-7+j3klMt zanB4J&yH4Cm`=O-`tqd}c0z=Um8fMHgwwOmRX07PCnQ~jJP%3Go z`thjIVLF<=ewi&P&?9-92mf}$P5p<$S>{)$ahQy~A{sZ%$A#@j8=hCqcn>c!J;PqL zZ}vr9QDvIrH*3Q(mqRJ}Flas79`2!elVJLLp*Gjy?~i?hJ1^2~ZK*pDTioJU+wP-R zER99oHZpJ`B*jVnt5weNC~vc9?XC=|D`e<&d&<IV7TaE1K9|P_pjO0PE>Y?Uw)x zs^Xn=@1Fl%N~0@j+y|*V<}W6RxywJ|laph9FdOl@5t3!#fD>Qd)AJ-<`Llk^j-tav zwL?~Xq;5@nm0LAtv`O;*Qvdy*gBcl{d)$Uvp4$rDho#S=RPrE%cP_s1;S8~e>{49T zf7^>mO1$tOGs-l`EF=57Q=dql_H0Gq=eOU*&NlJ+F>&UfM$`^}A*%CS$_ySX$4H-- z^1n-@MMp?Cv@5)WFLsjgR&ts#Nz|)WZ6Tj8%XKqMIK~hzKPSTrN@9BHi0i7BI;Yr{ z#_fe7GnV%(L>JU}xWX5sTBPtDdEzrMpZd*(aeWs*)cJ$&LJpUGt>Z=u?oJz#GS=I1 z?$S@^-H>jwB1~0cj`2lKTmEF&+Uk`h6S0PH@F6QU^-Z!ukaF&!N>ZK>* zx|+f~*S_69y1p)G{Tu&*IsUr92AZb96Q{f^4YJwHRvVC#Ke?fpeM?arq${t-esrr%Wu!y^47DBD{RMa(^f&T<-W0omiB8r*#G6)mA6XIev&zhOw{Kv z+HN1Ep@Nncy7t= zJ}5leU1etIk~9pcZNagbYHYCRN>HrX-PB^Qo^`&e!@aKEenN}(!o)9u(@?Kr7pK^3 z&>D($qo+NgCC!g8=fdl0aktRLBy}(;g^8zld2;=%;-XCyo6}w)3aRa$xk+q16l}Xm zJB_I&@|L@sVCGo`|5hMJshsL0eWdU&5{VMT_BQjN7kQsKJq0!OH8X-4wuyt2!|m{9p8Ds>FA*}$A_zC*(vtVq zvg2O{;sxL9e7LvK?=LR#*uJ5q0k`lq@(`-Ohoq@rZpRHwmZ9}Sa5uf|P!r~KIz4jP zJ-E6+$}<(}sxuozY#+cEje=brjIxfqSHXYN`dEjT`5(|+8jq4)XZ!hUw~Q|W`PVFp zrZKSh!9wXDkZi(^%4V(az{~gV@sTL;WP%%`fNubfapN@D!Qe1urQY)4Oc#HIb6 zKH36o6_~CCQJ-bPV!n$5qv5f7iw@pGlmN zYN)5H^I~o#dEsaoJCRPMze{m-2xdCF)?HWd;%FJ<(O=(7|B(H=@L8`>;+-`FxY(sH zj{XWq)7R{>+`YIA4di94ELhH8Y!kX(fOWEOL>piNrB(*t(SOfV zPQ=8gcH6WY%#S|eK25vpI(SBA?5S$|M&$YMYQ4%6&-2s$GWYET6^kC>)2r9`ZQ^Xk z;oIb0fLY{UTqe(P+x)dv)pV+etj7*&V(RFUn?Lr67LFa)ET(^aPjPnU3dLk`XN}vY zS?WBn1mjBq3ujApqRvYy@rg~1ubiOpV-$3pi$ZRTh2FGP&EoZjp4E(S%9(nrF_y~5 zWpc|oukWEYou3`^J{je132x5J>38=zKXv_4q+L5Zai9Hw+7Ck+}vL3lzY^u z$-9A~sKl^NODSF0y4>p?>gZ;Z{s%LuuIJy7n96m1GX9qJfZI#!wYobSkTg4?D4Ux) zKdU;T67|%y#W(@< zMhQ%L@|hGWrrcKSO%TLHpS_R5hHc#%Wgc7fat_dcVZ1{ z9HsEq&JJert;Z|hUl(#_SqpP}F^g^T4P9!u>Z$SW`E39fl3h=}oUbvz@ig0!*tx%5 zV%e1-Cy~gZtLCw@G{aa@v;ONf0BtRIpW*lV%dfX)hmvxhk%IqV%2zBOHFZvh3u1x5P&lD2D*T3e!(7 z2C}4ZYh2gUx2_{xq7|7p)2)d*u3FAsQ{0Xqj^inVBtLCsZTKjW`WniT8re-1OA>#{m*Q_Nym>#X+ip`za_ zg*>S3Wsr!SX`z>=3f5^`?1(#2Rwl#e{Hz_1@KiRh;rYH?-LpS&g~X(|b_ei%&F{=d z*Bio=eI$(UjU!s?1^=I^DLj{!ho%G?<4CLlR<$rF&`rnQ{XRTbk#eE zNJoJ!Yf!?g^zkqxZlQei(d&3m7SJT(JUIUxNWD|G58~TZ8>Q3dL?d+Q+fBIgbwEuN z{JgrZQmA$FN2%%kREEB^yUey@<*NH1zQ!`k|0uJZt#9{4TT_-<5;#*Ubr~F?7Ic&x zt9n|pW=+obkl5fHur7Oz&0v0}GSSrYx8q02sI8gou&R34X6YkrH|}8J>}Lj3mkIUU zdEukRqRZksae^dsD#B~#R)z|3&pvcxZ>QX$=^~(ifMu|ewC;I!xMY=1+(hBGy3+;iDDwEfE*VWP0wTs7oP_LsEJ)F0xFoQY2qc)gk?bI) zx;U2GcEjRf*AK6j7D{Bs%iZzP}j`B6;Yoyd96>1D6dV_ zh(KW2GpM$2VZS?d>??Y3yxznfVUnkwacR?Nq%Ld+ws2k)-xWz$OePdI`-w?Y$-aea*(?Dk$ zgP&RFR%tOQr$K(v!)+d&)dhqr8R5jVLN9X$y$yu$w}GDrF^Wn6!9K^vFQmX;dzcfr zN2b%appkhGz2fDmeZ|TSgy#ZHoe>%P$HjUTa=JqHt|{Lkn!C;uAi3JOS0kR?c&9{@ zc#_XPeMjX&C;W9q59+@$S^XamIiJ0!uce?SPn*7Ze{0ufX?6pRZkoht@fGRKP?G-l zeV-g#1F<`Wf`cMxIP`UT?LX0nQ{C0#mQGCy%rm`ef*VOV@lz;MX@dnLW#U_(zh!jX zBI6$?g}1L~btS4pukHj`W&e>$I3c$utBP-5A{24!7A+*gZ5ZFwNKp)6=iF`7=Q zs6|*w$l+IDritm*gUWF22R?W^Jpo?V7=d{v-ch|g$c+~}8kk4*Ew5+0S4bH#x8j|@ zkFIw#%t&#pn^Ak$y2(85mOoQQ4nDOoiyH=yO6wAZmcYWFUvf#tlC7y*m^7YOH=M(S zdstPNuMihKY)n{{RTdZX&V9<8wC}Z;<~q2&d-OrDjrkE{{y2Z;DdV>cUe1!J%AHG6 zD67@D-6bjN<@?H)%K(oQ6ok2(`d|kX{TZ(+bl3LzLiAA?zsz&{+o|Nb^b^#Dt{DQq zJkDY_3IlaHYyICvH7XZ2;u+h<HcHo!>o%*%p zDAG>OPn*t}12FLufQHlM9ikTtYqaW1w{_>le}IWP!ENeJ*GaKvsYAS^Um9cVwW~SL zc~iSMHFQE61rP19Gmfi|Uzkjz>VzJ(h+AOdS4C`fl?~sPmHR4bPg}*QCESu)#qEh> z(zU)B5qO2Kqe!n;nZLBEBGA|yR9Ak7qJDR)ypw)ZXhni1j2y1-6F|$uyKP=q7^bf= zhP%JHasNffQh&ClGlPM6BenpaF!RgaP%E5SIeYz<*M3{U`s4dIEkion^QoK?+$5Pq zWzRpgEmEXYdY~maeJV#}J?@{hC%Z2}kAZ5Z=HwrYE~_t-`I~FOan}QsDNW-SQds1k z$HeJ1QmtKH6LIB*l>2DiKaQB(9$dw4GHF307qCmEh_Wuk$H9tJT%KF!|InDaTeGSA z>wR#NSi2`LhR2K>*ZoqLz~}&>wb2jsTJhY?{nXhym^GiG5!Z?CV7LlQGQ4oVFWgl7 zeBX}C#7!7AvSRwjL*X;TW0qos00Wxh#*^HVp>WD+{ih9#d{(`ekAf~OY?ZU~eS0+I z$vJXg86}ilpa+n4fuLT!5veRDRi<$=5jBtd{!iB-&Qlo;mg847PZ}cpSSF>zYpfB( z$={*W%`_rX7N2`c1ByV3~cFgDbmB`5m!wn>I?3xoz5HESR+n>rQ(N#_f8C z%TB%5WIy!;C8AE#5U#IE*^?RQy7Be3y(9{F8`givKrN&Z*qZF4FTp89X=L>3hzte` z3%r`Bt5KC6yEeOFMdKXl8I&yxM48`qQW-f^-0qv)pk2Bu2XijDL!XJ?QYIdCZDKMc z|8pu;5qCBEW){JA7yZ2rD`z8@~u72eSVh?JQ^S-FNW6v9RkE|H$mzm1iE~6;%L-$T!@!atMJo z9-QAc4}M1~Q}LL5_QL0~^mhn2$CT_|ZHy4V^R|O_`kFjN^>lh5I%;n4Bk2j}OdV5f zbB*x*yYYD~i*=<}iy=^@pS#$5S=KIO~Y#n}dteXV79fSQl z3=iQ?`$Bfi=_py|yO`J}E;HfA)tkShXiMcB)CaHy=9of64Q9^G?KWKnJ1kIA2W5%XXmk^#M-$omte z5x4)|Ck^yCUh2ZO$3dHqDBs?(KmTTDmZl9?zFRMV@w}D>LoZ8gEdH0e^bVy$wlC_L zwEcS97m?Bk;y2Ip4Y&B-1QN)+V6Gx^WQ|it<5HAyD6VDF8@)m!Y9RG%w(y}sv^4GL z9`0U!2mRF=!dYxg1@cEDjm6mM8Gf#)=~#C~wX(JqyJJqjyiU*NuzsxS9(Ms6uc2*_ zPV;DX-%lYjyB)PMQu1`N#|4wfelWE>Kv4A-5*&tHLvz!=My)C3doz9Ng;cnpjqoxW zx~(^nZktADuD_aWUHOczEg`Mn5+e#(*_jG1n!{$oU15m;6g^3|;MwR8OSzPjuT^D( z6Xo^pm;dVp5RgY=UcV?Jaf2@QlKL#~8(d-NfpI8{Irl>olP?f%lz1y(TIN)$5@1V^ zs=i+o@CR2|jUHZrSaCFLW^u+e`)Bnr|L_g>fPWCmaZ48bhmT-L1svP@V^i+$*HH{-NIYg(~pIzBbm3@ zf3;GCUE{gue@bI@x2`l`RAc^Nhc6{GEdNQ2JE0pU`lyU7QV^jLdpek}niIkSB*Uv~ zx|gtP&A(hZ+;2LkTC#6jk8JZMF<#=g8I1DSUC}in{qyIE?jh}!+RQfyzm}~ylcQuy z3{_9=mIP-8R)Vnm#YPPjB%xd}M|5YK8?T1jymy;!QvUgqI(pXS9i!vF)|l=zux95o zoc?$6np@z~sV43ahhMz>2`_*CJS!5pU}gefCGJ)`uuxpQtb>?E_qKX`9RS6+XC1VGsk?&pCwQGy5^1kLWep_wKryJOVkhx`x4Qg^gm@l*H zqjph*vT+Wiw1G-neSUEs-US@wcqMt@eqgYfc=l(E)%BLaz)=+(A0>AwSJ1Q-p-CNA z*o1h@4^_M4TnB-lneAw4X4x+uvdd(rAm_%{oq)2x)r!m20gz`NfYCsWla+3@eP$C! z!=7Q?vzsRADn}icJVZ1x0TPpqO%aqKl0ox$-++gSMhE??A3 z3}9?fYxHzSIwb%#OCiacNa3vzS|0|Te(*S4iOc=q1GWLw?Lg1P-Zb^$qm}?n%ejtd zw9jc8%b+F`vaRG~^*O?R6sKk6VZieXruL8lx_e&J4fv<}%0Hvm#JuYrDLw{(M1eZu$ zh1jESw2Z4zTC+geC}2fWxr-FsVwn^r@i|(xxN+%_5v~dATTH}OvG3u}U!Py$5m>4XjB){C zyfcWJh=?B04=;7yfFpVG3sa>2E~olE>&m4kq!MThS6@MF1_RKKI@c1Rhw+Or*BU5e znfHSkypDD{=dY068IHax9Bl4pBxJA*?9V+ZD$X&EV4#u{;YQ{nu%kp=tq3=%1Sf}l zy0CkW<9w%DiIE7nkU~Pw+C4V`H*1QMEz}fhy)P&ZGE2K<9n(A%v9vW*s9l)<hCI^vOPT}HI76rue!V84U;u|7N8M*W!eAm{F1+B-1D_c3+L5&S-Sf2%?! z{l=(yB)0$8tTaC2UEtrc(`O>~YF`vraa+OinqXBwkcCaE)-c9B0I$*XLo_`J z`qsNrp~Q9%T52I&2KvE@UZedOvP5g5_026$@M3&OE;k>W?>wJY9G!}!r$a7Eyo~?| z+-j!m<{2h<&+ZpTBtk|Ug7z>&H3O;`B0u9)e1;Q-TMEZrp|l|-eu?NVK@FEYWaYz- z61PCozrXjgcWy9zt9N&r75RCC=_Bn+%VQOVVUz-#MllAp z&a6g@%ySk_s@f` z&hIRLe{n4spGGTT6)5j8uW$Sifg~6f23nK74y@~=bgiTfVR$XQp0ixt9*x`mMN>AW()?{AG zm@KJRod7#bJ3c(>r;9fRf<*HSLJrv=eX(%W(OT0NIF!Xs%N0`Z@UReMd;_ltBa+e5s&0G)v{c#S z5za#(!Ym-I^7@>4)_`;8){{|{bhlsF{K0tWLT(!-F4sp48$2{Qjp|t-Q^f)M*$J93 zViCc1X~dw>Q#ajfPuKcqZH3;OyROAMD?{?$N6XB)Dw%Pa2M&|zBF`-#o|ORr>=KTo z5i6d1^8Iai4CW#|z6cgJLtgRS#J?+Phg?Y}>h`US`YY7J`9-G0xQzQhtlqG`;>$F< zOeLgY(twl_b#t1|jtV!micv3N$P{S2%YjUZWC8t#-V=4U*Si5xo(llPap~I)ua5OC zz%``RO+0TRgn9bvK?b!{72ElCe+&4}F7VX6!@#y?g= zW}ia`@|0>&txZ?uJB7CCB#~5j;npm=byt)IdH_pVQ zkc8lB9Gw)7`^L22rp*dl?TS^3{b5cAXb34BdR6${uJ`o>2)pw%NPt z&lBWd)_ESXcbi#fq4Z3J%MAgveHQWWP!e{Ar<3*V@1SfkL3ip~KnV7XypEKb6~d4) zdDZe6=st7@Y_LUX-O`G8q4R?f#t0$oS{hEei)+uxsEsCVCf_%)I1 z4Gv0*pwnANKj1?3d&pNOOjtkN#0z>F|N547RuP|$g~|cY%W6(dkmf$rDp28SV4#g= zY=HKmZuy=*r0D5XqrbAWnGez@diVM;-$IY;@6j8e%T#oK1ev2ibc0)5sXH=~T0~FC zWra8|wcia5ZHt?$3F=Tcv2?Odye$K5_1RDT`Cqa;VHt!9^eYsD zah7-jw+%8v7KYzJR`2XQI+%^z|#6<0$XgI&=tks5CQEjG=UBSl`V57BztY+mu)Yjik{WuKM}|o9KPrYLokK!H3ixlkg&=#Qm|w zC1#~=C?N0UsB3U9zs&sdShg7-^?kO;Ro2Gq-C&dX}@@?+>zr7t7eEpEy*`yrOro|$B;D=RgK_pwNbSmVN? z-$vw{h1?3Xe*al`KZL>mU;ut?{~}J& zO>!Pg!VqrwZGvGFVu3Pji+?dEAu_FtdL8ATLJm#@_2lqW3hMMus7B@hupJSbyB}yw7OR=pgt z%*tAe{Exngmsxf(f$ulY`ukh*MdUpMv55J{N2^2t0`h<&Ea58rJ*Uyd1%+CWpp#|X zIGs9D8tLc(A{VJ4%L0M%SI>J5(7XdmryOtt+xTW}_KoflJ1@lbAcw_;{oky_-mm#$Eg+I1aSfGz%C& zA=Mn=GOE{MT?5~NELZ^>w!+q5XbNg=WnSl_xh&^AR`34NXg&t<1PCVEyJ`<*(1sBv znyqrT&Zk2w?^f6h$(g*I4HmSHyKCNl*^i%^rKRD@D-Xb=F+wgj5v#s}pa|_=ZzwyMNpRJ5kJk>#A6@B-%5ihbNY4 zn+_F|p!viO@cb+z0kIHaoIO&^p)1Ht&CG`xrR-|JBXa{OnbCdg+@%-H>dH04(TNuj z)!{EhbzB|3nOl0-WhG}~7W6!=?THqxHaQIB)z>-9zF;%j-i$}*RaZS$o2jINSXdcA zw@vHELjNsCmWTvh^x1D6zt$ka_UpSzJm!O*JJ41VKlJ?k%uBb{`Q4L=YE^>fW|XyE z&`%4dKfvzo0z>r>PoZ!Xlpb*t|ADL{xBi8!6xN?!;2~?fLYFBqt&7WlN`6*Zk@a?U zoQ7&m{=dS})z1j-#nAs}F!qu72PH7FPuoa4AA%66sd4*?-uz1wAw)*zWfu3i0AHRW zdY9z^sY&SQID}GQ=yp1*G;MN|1!DkmM;@|&nG%II2aRej9z=}$j)kI`qr*YPr(pa< zq&K0|^wV`t*3YjP(0*&5kLZP9e96{43gS3-Ww`Zt%KP~J?}-}qN%zIHZ^Tz44NhyC zXHdU64gO}{^!=)CPLRP!g^TTqEI`vx7Czr~UYge7u~xlXX-%iZ&9)r!m={L@jlq}f zV>90RTn-6;`dur!{}$8fctK zuh&|GWr;vH?Df!|E&4n21L;QW5l1>8Y zqV$!oH+sI|ArB4VH;AbG*qq25LyL;oa94cbz>IR83p*6W_#q)NQ~F;w+ZlG?Jqi5= z+RshI6O=Y+04-x_d`?u=@|37nvg)_;82k%XiL2p`abErl%#yG7vu$TB4x zK)44MOtB3J@SYHqb^I}5$c?%3g0K(PnHDve-~brfIb;$I{^Ii=mi7}v_!9?c%ZbY& zfpe>!aHRfo?te*IcCd#}%PT@4js`}y+vyt;0YtX zsi(n>@(ueCnLS0kHU0tZLV;Y)(9@y}+$b}ca$_Jtb^)CICH!`KNZzLmgNZY0g(c|h;JGatPYm!~PBSN`Y**b<^ZD(-|z!KKpFP)#1IDPT>$q*78{9q?#B04KY$yLA0=7fX*$@Shf*y^ zhu`IVdhr?^x!Zlc5CE|mfzH(cUiZ`MR6xmdJt-qBInt5TompqdHa5s_a))mri?V4I zu>iNIy6AJdJAzht(jzcMd_CYE(tuKy4=MD98~Or}z=I!PmgNETSpuPALw)_}FMms~ zjcEzsZgR>TRqxDUJ9l60wUX)zExQ!|vg*0WRtLR*EqzNU_!cXbl|Bd%=|zqT2KnH} zJ0B!}{L3%N+(NA`oH3dNs`GX53x3?HB0&-k9TnSqrlhQDZJsi=un`RGsU{W{c7XGf z9WK(H|A!NL>0Vhhc5Gwlg0wvQO+C+OW#mTg8+);9=!VT~`KWYYNW6*~ggD zWDfcEHkoD7W8KWaOYy+jjOnb|RyTv;i2ZBqc}b5u@X7Og((OV^U2MvTDf9wNiy=pH z{}=8-3C>A=AMYTe3YLH$1cOdUq#7vXao390!EA;b$F|^GVo*c(#{1|UlBz(DHnkIk zfVIw#e)bcByet?6bLq z_$Y5W|K`oo(vS659tJEKNJ4Qwwto zsw$Ij$1kW&SIQ0dMz03r>@Gtzc7 zU5#=glcvx;(#kw<>}6*A54F?7PjF4#&^AD_orz%!Dr;C;Q!3E1toau`VFH@EgK~kq zA+9r!pgwSACUH-Y{~ME|9-+A*I9?OoTz+I{ID8=k(q;jml6Kx!bwjQ=t=}Ugfa`hR zRr^oUx&>zpu@l9)i0W5+khrqOMOWbB=n&-5{XNn%7l|n=8JX0pI9DJ1{k^OGa5R1G zA&c*CvfA&gOjZ)C!B>{XfW#5>3FvlIfSfAtV?;1J&N^nRUn_=V2U;x zLbeZ$`7HKQ@svDIYr}&Dkc2Byx^?fNI9k8#Gyushpn&8cbX$VEqhD8cI23VR3Fcx3 zPjXX+_8k_Ig-4o`-QjeWP^yDqwVWaOqtUAlv@_{4i?4J@SP1X|w=vQHY->oqvh&h7 zl79kJd(C~gKvNzZv9?c~f5ZNBIUZupj3k8_XYsGguFN2dQ3{G z*R1@$MA9U}7L@~Q?;O+>NamKD$1D)3s{#o}2ev+K_aA)2#GBmIAKo02L3}WXjfsOkq}UyqJ;#D&>G* zkAYN$-K!kW7OzpHqpI6gngM5KqZF{y(AtG@L4!8uUjjFtT^kvdk*?jZ2{%(T_kGVU#UDUlG99bkY49KyQHyv1N#zx4UvS3@Q?`^rw@f#e> za?sg^xP+oJSX#ucUCe=Ol|@R;$d37v)oLhC#a#uCK_XTXXI}!J{Eu|kJG_CE9&lDz z20~!FH8x+pe5tzKrNt}$jz+{|a5WB8WElYPnzs%BlW@UVHTr)+g%A(MYcJ9wHW-y`mlhOVrj*1Q#-_GyeO>m@du7W7~d#cG;<%{?`;4hK9wjZN6 zgm-&(mUMf?`a zBkq<7zKQWhY%NE?jTS%?HwK5<>U1X=KZv_MF{jcA9KV)WYT)YTBQlOhIbUde40=>& zg@Jwa8%3zKPkt7GTWgc?>P;MqU4Uh-%`2a7z9*v2zQ<-%U#(W|Ax69oNo)7*2rt;G z9k&-0&5dE}FsS9BhsoIo!4?EQ5^IL{vFp_kCiFiMq~yi=L zV?czlD!-H)0OfQx2KJpmnDP4*{$`T+sB9}s*7C!Fh!~`pr&fj16)A_Nrz&kZ zufqyc`mankcNeEO-DqDfR2G}RPsaFvg;A@dt#Vn_vL48J z2_pJ^Xi;W>PU31X?+;Y}0)OBNUm&teb&v7cJhX7I^})tvxXo@2{U7jr2e$r0Nk@zW zw0^MrmK1OUh(|u`LEGio`s7Vyh8+6>VGd|y9LOvvH$T|V+_MJT0g^U@(w7MyY!L-f zPafo93+Ts3E&gSfN1*fZVYu-Agp(Xck@=9h6BN*S7 zM?YkLP%f$8Y^UV)*m>0OF2O!|ZLC5bq5$UZ*H);8o>T2t`9(HoUyiGAT{nXEfdCG@ z@{T(sodrO&Ii4Ii=+-@Zul81Duyk$n_aFGe&>@snT9&PflhA_UzTU7uh`BKHHW-su z1-$1mjx#jSTA-Wgw-sa-ppDwO`QOYU(SP8%B5{m3mLV-jY>EgV?Fbi!hni%sV6R`o zV1&*h?GcQECQWy(T>-?=V{`@Bbu7TmsRfX*EC6L$=yLhGk^4N!XR1pX;$J72(b6v9 z=2baJ?{xktJRFi~$j1mk&qE?Sx@6TbL0)G9NpnK4K6}?D_}nXmjcI-l6(*Kdnht3d zYAJ#&mV?hKPaR4*%(L(*S_{wznAj8k4^m<-@l0(H<2aObvY_=*m}o$YXh+|a9B4*y zQh1isEK^7!2rBTn&))wc6q%mGWkKT(q!yP9>4ieP`I4W|yIBK_;*#NFI&fMHEC`A17+sf zg6pn}RZp2py@57+9AcIXE^Ybh>Gx`I`wf{Edd{?`^FL0Y;AjrzgbtbRWWFyQ@aU3q z;Cfv%luD~JN4>hf++N9zZ93AAw_?oeRp+|#SV1AobL%T}==Zzp_1PGFCKy3fPwq`+ zWL-%h?Sm4nG{v_tzFg~@{g2n`@Z5mGlU306y6c9HsvTzIG_e@%;!R%Cn|kXpS0@E1 zmWCl8yy90;NZ-KYiuagm{0P%^7t--y{p|&ho@FR=7J776;kgCs$3IVAa)CBjksph$k0<6bXi>yuM*qSiTw2VTToZ#vm} zWfp|m3X>sR{um&f1GLFT%bK1%g(u_0wYD_k ziynxxD!&iB#7|u0fMD-Q6;$u}(Th|k{V;^4T*W5(u#5uAp2O`qWD2^eS$npm%9%=Q z(teP^P5nw@mKNAzZ1U9G<1Kn&T8C!E6w@vjkB;0V% zsy3T7JNkRSSqFB+-I;Hv&y}|p`Ll;^|AlOTz-vgtKGDN#94h@|_B4#Cx{z0rsUNO75_z!(AG#is< zAG8T|wq#tgyb1jK`_(5`ck$3eq|-7=fql%FYw;AqV77k@s*;As{@Q!*BOxF8i{ay5 z!jN>Mzp;e3y*uVK{oIawTw}0B<(DAT5zmNZ7B4(H z%3_*J+^uh5F(M?&kbEx>$=HAXMJkxIPpJOjg^VE+9f#3>7hHNvx9K}_F zXn85B&Nbpw_s8hrjctGV$V{ebHb%T8jls+{wf+<_66c-Dn~xu$DKSn9F^0d$$a-eu z>RfD~@vYaEQtkm-ze01o90i`76jjg9vB_FhiSe%38psL=W!T3QA^G_M>nwI6u{dMz zZ}xe_`to5_>&a$gmuEeD;G}|+NARr##pSg@9MaZBg)k_|>F-tt9dns!4H|%EIT!jVV4kjnLC5QLDX4mYZv0WS9jd-#fywVd`tvnG zM$%y(dnDy{3wAMNZYJoNA^m-hq$a;Bi@Cf$_&aa&069;ceWB;8v#_L*4f&KP2rcsO z)N*s|XIi($9+zNqd<3Q6&5+-+6PV$g5R2|4Riv;5rkhw`|Jh$1K(AbT%~)9wI)q>? zB*gkpU6RT{2YoOqmja;Gxf77W;El_q;3%R6K_(Uoq)zCS3zN@y(g}nQc<-X1*p{co zr74^5PFAmVUFQT^BmvT5p+rcNr>WuI|Cz(oNY2OX^nkX9T)UsOThQjz37R6_D`zlX zAt`W(x3A(@RPX(->b^Uk%f9cMpFNVj_g>keNcP@QRz}F?M~F~<_Ff@Mg~$$tl#!KP zl$}k=3`N7J=X2;hulu~N`?~Juy6@NPc|Ff_{&$|8bo`FraeTk;&wG74li4bh%tvNR z<*8jwZ{S44m=qf-_29=dgrBH+1^1=^^fw8p=0Z5c0ss(cf~JBXmnDDURr0Mm`2TNxCYbt*J{h9)m0ld}8J0*s`VKxFZpJ=hB0qy7L+ z!ltDC$@*)%rGhw)8`BLJPP&h?!+0p6ANe?p5@|Fmha9fJIeu4uX89LE(D3E=6MU_v z+31@pZyLYN1cf#J1*hyH5y<}nr{LmH@T-to1L)$VuTk`TKLNa}sX+jn7(Zh3dVbRI zu%h=#=?jiQ8zI~0-izwsYt3XVM?jhJGatuVAQ`=W9DEr_)DbIg!zu!Lv{RuxYd>NP z+Y@2n6|>-#`Q;}W6u;&=*JsmjccYvD-8*67J{ z3;q~*tX5Qd)1numK6e98{qDu3m&96kPuG&tiz%9i0uy`-_-MhxdYC{Rd+OZA^o<|t z3ef*|5DO9AXTVM)SSVJ)VTw^#u-T3TZvlEv&^biD_Od;u?AzyUdw`;_+)xU}Sj*ha z$ET;)Y&nOO`5{6lwv)*};uH&Dsn!N!-bt-7#%Y7r1A^~M4 zRkligw_{W9*i+c~@LkX0TYflNbQfh<=nf^=MBCUYJ*k`Rt7lBzUTgJYEPbX7% z<4Nx~P&AAi1H7fp6jNxZZIFr{Y*MS~53Qsh8uAV(#5aVFJAnPbOZZg3TzT)4w4oe= z(Hr2P<)?8gl_r`FzfP`- zeaN;GUo_(6zrk=8S1=dX<-}xn*v6CS zcbDAEACj&UPrdHzf_#C5QQx;p6KaQ-XK{5}DNr|>K1M*;ti1S*L^gcVlw;@U-#sR1 zN2n$|)FlfnTIuT;d?S*~2h*6Nb(c7K=-bF3hF1HmZ%8~}*rwSErm_d9ytzj#U`u{%XlG=Q zBWt8~jw^uV&HGmFvS9J9#2CNya3z9Xi^hOzi?KkiRVHHcb9O}BM;Kb9X@>0m@ReXK_3PqWa!6vd*w1QN??kBB+|;>5-Z86GYt7*kk-Isx$IynPt&?BVL%n97bLC0o9gKU&@JdM_M_us+YS_ich!udyRdQH? z)FoAn?!1^>PkQKdVv#dc0n^-ibiG;-OxP3s+{nOZO6TkOY@O;ijPm3FwlFjE@|oYr zg)<4*nGR_Nyx$rtngnQvr!`V*TEF>jZ-noD)Wd(|d`me(L6$W;u=N&d-Q-1TDtAwH zB1{>S0J>Ot?<^bb78U?&Nxx6o;Pd&tR>ns_IFxrXq+EwJK^e?g4(2=+j#%6Oj!ChA zE}?7)ls!xV|FhMZ4{=D-Zo`hAaHZCfXcLC%3uKUt7!SJ50QVIB=vSYBWa*)c*aM9Z5# zu!3Af>Px`qno7x|-3`3~;2nV(Hd?u2)hm=d6)IEtCxGb5$G3F81Gh&D5x!^Qo;VHk zBCQP~n0$R0{*kqK?C6CkIbvH-M{LWbf62B09Lxmvy9a5xQiw$K@XE!XCxLz}?Nu~K zT1Ub8)D8abgmZc25TFr6sCGbif{w&$$JST}Ht#_-#9gTUv^C|WHh>-OebcuYs`tla z&d6h7fYrE_UiJ-?r{3qJ>7vw3jl_mYhVC$7r$G*iabo7VpN<$gMxtX5&(nd_f?eh_ zVze&hYdlV$j&S!EnTnB3ab%W!xspa~8q%c0pbm(c{eZ^DLa;)*vfvPq`lPVo1Awq? z0U*Ip>E0jGwuU8hG$!f7)31grV{aC7`eHp=DUHuS1SpWv2H{&(^>Eij;Ofq2E>^zy z=^EIqFb*vnjvno$73V8%LkWo{n{lQ*wM_XU7+p&ak!F*B68$lb13t_s(;X?+Fpa@h zhr+YOgM$jh!)`uo_yO}-52_k-!2E2!4uv%7km41LAI`ad?@MIP^mD(Pa51IWxbo!1 zTn^B$laju z1I&!(Ri%oNw;w-;dt-lcHoZ&Hrt;P@lgaRHe8g2h&kM^XEX`JRA55SfL;BZg!0KN- zYZW}FWF2XgV=Z~64pPO=R!~{>xAK~GvW4uVw!9ljoyls50tx&uyn&Go(<0u#dwU2t zjQ%^-qc?eU9)2}n3R>tO5&)*3QP>uZdO22dj?W$o=fczf;9*{-5j=(t2boOf4=l+3 z>GB$94XLL|TfAj+-KbnlG4v&=C;CSFV;R03?E01vs;@^jv6Fz5>j~@6YY`X^VkNWZ zL&-&PD1ZG`iesdBF(GWhTNvXbIKywYh%N%<uqiP-4X~aO2EoK)6ET39WxEpyozPY$Y!n0uhfHDWudl@;+ftQ*Y z&XUrzGd&cmaZFaC$H#HD?2c*<#geaIJ>)s^di?>+N(w5dYocbrk+)k+b1yz>B+|*1 zLNOAHpE+JgDHWt`Ej6ijmS%`>LtT%D)vZ95E=4SDx-W%!(LmZ=q=-|0HS5^{Pm{hj#Tu6z46MR6W`o zu$B@uvT|DRAhO|XM!(h@??4f z?!Upoc)GZZ{*m2UK$JJDn?|-rd=B0(qfK8WMgjQAiJ(RbznBY5#TlwH%q502TFQ){WX5 ze+hsM7m0ryxWDN)HQNakuqgw-8@OH`+qL7HgPqJJd)&B;-WPtZZjWzH+g%;QZyo+I}{tPwfUE^ObRsB z8Kp+g{};?c<{LM5{YWXI0nsslm8WoD^13FwxM$Oy!hYR8ML33t%ovN%!QUQ2R8 zvfOkw;?4nFU6Vgom2MFjmAV1Og92~dfZ5gOce)E9#k}6($tqPlkJ~mj*Vf0y!Piye zlhJ-LiEV1LDjzD`x$uao2XB?}SZ$pz0a|pm%Cs46KkyfL0$|DBf#iY&K0h=Q{ZnxW zT49kZ-VN<)oNY{M{tteL5?z}%O8oBIhmA=r$a@=osr;Fbfqvkka%Fz+GF%;IqAx?I zTVFD}hdCa+(O@GgzsRzZ_{`00ZG7}>ec84VA#=M`e@mY~3H^tE>UoE@dlXW$UB;>|f6a z=6M+!SyC*B1BqowiLOW^JFkSJF!d5iQDYmSH<3Pc?Rnc-s2|ErFFN0bVI&!qMfH#u z^6~ zIN~x;rEu)P@L$xRfMG&J3{sW~6zc)(s4aO^_?{I*>%wI&Vt< zqMW$vRU)$P<6mmB23R21c-tm|XL;b|28x=)<5>MzUJj(}J{8ueSU3|(j(xgug<^|- z*-Q7%e#Q^xM>j*WvG!xL@y8Z*T1+#2m4%nTqn3WW(F>*Z^L;Anrh(Gu0^%Z(e?gJ9 zW{5HJv+SMF^TUL}zhiEOl$s2^{dd!4?u{f0{kY=Q>31x-iTsgyZ?F3x8-}VLis^nsWY(Y^RKGGZd) zz*y>1V%_w7Z_mtG+xAf&ckYWB>I)3+AL|LX>uE5*W@r2qHB5cy=LwW`3ZglibT+Y7 z_+!O=l`{xA)hJM%VVsN^BK)RG*Nl`m^E5Y`v^QC}+3#WzX>}ibY={p0=s2DEN@U&h z(;2?NO}%GcAMONRar)FG>E>}(va9{>qS^Gu>xw=K^-v153$B@FgYOsbc?RTP zZE>d_88F4!NEZX>^^F6U=3vp6F{=9{l4nzYU&wKK&~}|PM7AJ6fh1H?>OY0xy&RD zxCj%NPo=%1-Kh8pCb?g+kVTIHjwL2pVNWv$7HCaux>^`u6DMH^jNP7G5gK*g(c05fQ+!)}VPGd|0WHPWnc`>WG7lpLugtI>;pJ$A_mAWA^{#E7vC8G8!@oS#RL5m*WImI4FSwlk?FM)dr$$b z22^neDB3GHS9qDHn}i`dk0&n}x<(LB3XyMs?+jdEJg|XraAeGs!%@Mj zs+t;+mzUQ~mvsuvMBRMp3~O+9v?DI4=z5H#&f-Xb0otO3SrvjcZ#Do>i%&@*_Xf?Y zFF`hPh$~c!4%ZwukCmW1;`ZuseeTI#SHV$g!KQk4_*bpx^le;UP$oOTc$ikuxYryn z4}>23#sCbMkIH8Q`0=w*3ue4uWKkR_M3@!lY>fskxOPcmaeZf-C3F;Pi zf!$XSA9!6Ra%wxXq^|#1x>fVp=CMO;z+$3IxuC19ZLaf3X+}(PGT$qVY4&=0OqZ@4 z_6qw+eXbr(PEJ*2Wt86*s8mc9o{%5Jb(hQXP-gK=NSL_5dgdB8bojd&fk)Q(tkO}t zt2eN7+`_gu!$Mzxi0eG|h-D%U@wm-g-}k_#$2!L`6j@sHs&~uNI5H zIh#Aux<3>0?INb9nBx$*T$E5bI8olX63bVk8X(l3RxtW?fIHjjJGO^EpssCDp$LPa z+ZDe51Y+@XUoD-@wn8{XyKj5Z32lI{>|8_ZZ0p$A1&F}C;Y2rVFkB5!2d&If}! z?U!XL+hA1QEIpUoTJdM==e}CL%Y&FsDFXuo%mIvJ1wLR~)#0IvvcK%Q=EO*@e{E4p z*@8%EJn$7oPB=RnFuRvXHf?k=wK8oROZY4}Z`B8?c=o@4Ck!^b78XLa;(Z9kpm-)* zRae0ZWWs#PCn60A7UqMj<(rIQD-|WY`fw~63!kfI4NP3MU&C&a>+%eCeBa+c$JWwv zT#`6K$o*bi+N2Hp;2Rr>%l-V<_cvtTK)Vq3$nRR$4uBp#5#iSfc6r^G%uWaZ6?(fE zYB9baew$T$ldq-*Z<-o_pqG%nR0SRo58vX9`Ow+jm5V=E%bW*;Hk&jq5Gs-nKdDQT zo)zIW2S$+`Z5D1ox<`aN1(T$1Gp%4?mU(VzC$5g(g5G5@N^fASh(o@-l1c2~6(u~x zKlA}uSdgLGWQPiCzcZiZlyJp$)v0rJ)5PwrIwdOJeBTHvPOGIdy}iAAE-}KF{R7rV zejAuwR!#{D#vx^|`U5ZqYPZQqNTju?C@a@&!31V#27OXBVpHW7wQ+6m=$AxlsHz%{ z!nE1 z#Nq`@oNk&I5b^2yvol|pMX|B5JD$SSCm}9w6y#)PMjr|1zvz&-y{i$3gyw2}lJ-}i zHVRD5&f1U|=>Hm)D4gqj_w~uii#~B_DJkqgatk#zHWuvFEiNt=K7IOhXKO137F?~i z*Jx(LRInVdGh^;@o|}EtHU^KNx`syCdoTrVcW_BbnUF{}5*S8YHSVT%@f%o0g>&&osLo57T|8k?6LbF6o^1K( zF0LOPp(~pAqlRMOu)_6&EMk%&{PQ-*gKiv9UaDj$6YjntYIbG~RypmR7<91(4ZL1S zp4HFsl`5VAy=JkZI9*W$%SDPh;(jk!80jjE`bo5bFW->25}Jv^eGv}+RAadk(>hkV zpy~DoMn(p;!AuNkEu$c-g`#X@n}0L(nEwj!E%U8UVmo{9{719A#p6Pjx--` z`aS2|x|7`N6;aca{*1oPYT#Todg^P+CXDY5AC>*|>C;*bwa#zC?g3LR3V*Bq%@gZI zotX;cCK>EOyL96QN@)LKD&9QyfZT9fuShSv;e}2@Cmco7C_ zYedQf53zHyu?S9R3_>|(M#PB-glPUHnXQozc2zg%K}NOLX|*jcsoI>K=*mYl7E}l| zm2{S!vG({zs6xe4IiY*#Zqq}<$n58gqc2(ZvrC@KovR-rbdl9l)I_4$w#V}Y+SVD% zJjXYLDc0lJ$J4>?>nw5V^1W*kBe8m?%2vL7K3vmK^mt($PP*OH3c-rLLui^TY5K9U;WNJ3;NZ;?FJmEiQ{-=A_cEGymq_|W!2x)BTBR~ zIgXDQn8*f`*osE0j+;oaPgDutSZ^}N2@^f_$##?4+=&|NGYU|-rQA^pH?Y5S@P7zCu zref8lM<<~Ko`k)PE0&LnwVueTZND6Q zLZXLbx0Rdkc#)qxs_I#*F!rWJjm+>%^1&^o(Ay>kgH@r!#678pYQL=gY@n`wwa<(q zpEN3jILK2;c8X6r)!5HoIP}$MN-hJ2?*JsmmmuV5Q%TgWhGVtW38-}@#Y%UW7wWyH zDPnEAP8uY^d#!T83g2xlMvZK>9<;+>CPY-sh2nb#+yt^bDE&oQd_Ng|%J6CO{R>FU zk~`Ec2vt{W>m^xt)=t_XHa9Bb0nyjpRC7*9#0?dl6lAEMaHxL5V^{iN56LiSpoUgXnw`X$HmzkU$q!APcdZ4YYm zm$Nj3NuugJLQY?8h(5>Ctd}L2-u}>zrTRXz{tYu8Aw9y^KL}pkKWn3tbn@io585kF zYV#S2q; z2Na=mvb(G0w%IhHzdj-6p?jtoMwuy$29-m`kuHvIf4~-@5RrkG(~Et@4`@he?u#q%)v~hl=5INUNCu{ zCj2AtuXuG)VFt+8V%|A7wtZ$DqtuP{*BJT7G)|7(qhrp>1iXVP~B-vrs~X zhnTnRfuZZAmqLZJj~)lvKQ7wKD-qgz(8PbrPxS?c0grx;sdYS-?OV$k{!{kFO}zub z?keoZfV)qlY3u1MuWJ-BUGk=L2Yp-r-uLgOlDuOoEyzZCca3B2IZr1UK^AGctIY_m zoOABQP~JAyv*+`_KYZVN*d>2Wzc%fpjY*Q@?-{cSAKRIvN{`0h`An-5&Ty_0PKem7 zoH$C~@uvT!oOXP^EVu68zho{SxrP7pUvZKX4Ivkd${olx*m6Y=-}HclPCj(RnasnJNa+ct zIIS~Svc(VK_TdUYIeNs{YdKS3=%Zj|6#t4e&8i2)wLFQ1O)M*dAJUMF9;5&&7}USU(__WlvsO^ zdI0K8K3HWrL7&fullJn#T(JPj5G^wi%esUG`7>wKfZgnX`k9*V;Dm_j4Z5G{JsI;k9hIotR0*q zcp2k^1Z;I~K(5^g&KO#V{E1=)vO{GXj^^TI2HOn}BvE*km6iFYm<>$7L6paEa_%nD zt_N}od2@5K30dRu-3tznSWFrHq@yRnoS_QJsVdB!nm5U5X?ZW%+Y6MPSfZ3|xD$Ky zI0vy0LacrQ!ZDXJvaqmd2;S<&)D6+4Zph5^A{G+FwF0xW@jVRTL1Kcv$l=4F&J6~^t#O1&#W9+U<5-L%olx;i` z62{FCo%#${PMs_dxVT7Gr!n<{=wNFuL_TllKH)Anv3&t$hrDJJ8t^bdj!k=@JNX(Y z{Fd=HRkoNM8822Ml`Hk&LOeL@7(Hl8McR}CBw8l8V2Tssy4Eb^Pn`Ofb50u3T25$ zUgk?1Q0r9T2UOu@QiY5NuE~VqmD>x)2Aqn2=3Tz7Xw$ASlRf8w@xPf{L%d%3ujR)@KbCwuKV$pg>qTSl@tSNE`xph;OdV5zu%( zJdNq7kU~UNbv$PRiT-L3BX>2&Ad>7h0QV`P_&#S%?9Ykzb`?oC#Y4|h7Srp(DTla%x@h_oP1tQCucwC+7T{Bf{y z!~^w6K|#R*UU^aI>f{pM#wUx7jg32T5y+9sGxlq@I50(=ItQi`yYi_Q4o{1VNqi+M zsP3@LP`ZJ4)*jbV8dsK=WcfIdkaY0sa5ko}1~4w<_7;CE+B?=JylwKs#InTc+^+Ok z5_AR{Ppr6-$$aObZ&n$>3$sYAT~fa0&wQ?U8~N_n!xRk-Q^p@Vk`TGbQqRLW9dZd~ z_tms~>rbO~NFbdqu z#C>g!u6o?j0A!WPS8%ww1@1bIp(4rAw78jr<-rg-$OlI?w?oEOTO6YW2Ik-sOcC{` z9(N2`S=)Pmc=txE>C{veqsGM@k0>oGm{Lo;7(PE#sNmZ~c2!Ts{^x zB6mdPq|#qQ))dh7em}CiM5sOtIIIW<}7H~s&cTNzPdDF7ZhEo1b34M z4bDD=iA$yvJ119Q(@7Z`1*1`qof{OrNdOCNamuz#{R~uTsUG62P}qP(DOlvgECyyJNo~(Ay0)hN+>j&hZ6W3U#qb7q^u$ zCh3x=A@U3*Uom7yh7?KX6R-mzTw_7ezpb7RWmRccrJ3K%jpJ5&^DL9U0BX~`&fVO{ zB%JOmB-he9?kraJ9^1Kl9t#VbPfJzF#8X={{H%Nz69@syHHsJ%CE3L|_)WI~VQATs z5j+8deTq|qV1u=0I}_t>*aLRvCD@qXLuPXMZtoh~%u+WAskk>LoQ_wA-Cp?}rGdSl z3}3-E>y-`XarpH9^KE!-)b+5g=mgZZuQ563JbTOjz*NUmzvB-uW{6vL{2H_-IDGV9 zj?alXwcdyBBqk&r)E76T%SZYS5qap-Q^7RoWnwZ}Sf&%R zWWDp9;V3n#^iR8Vn5G7s8%$`TBUq(bOW3l;WI2z2oK;v^MMVvqsmsGpp^oOEh3*cG zliyR;W6gg9+c|{T=da2c*`tmAfMoR8XHdTgjE9x7LPN4EvEUyqHGS1b%C=$u3#2zz AiU0rr diff --git a/fn/vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png b/fn/vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png deleted file mode 100644 index f13a2987b28d70f81bda7096a54ec479f20b2690..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38780 zcmdRWcQlrN{5QHQWZa>Wd6P|cLS$x-WJmTWTSjDrtVC8;86`53k(E6|W|FMz>~$kq z;dx*6UFY}5^T%_Z^PKaX<9p8c+}-zeeXh@Vzu&L%z8+mylP4ymBE-SLAy!n7y@7*+ zrw0GA;-7#|9MZp9Nu8EL$=dljUBrC8qOcZqwAp>oGp5>Y6`LFzYZtTam6~jq zl(O->Yv}U0x4S0Q8_JNEaN@t7u9aS2%^r1;MNd*kcE>vaFz z5`!y2a9wzpAnfBm*HrLpe6;=dC}A)=ihhkB)vv3_{WAtU3bpE!|9-z-9X{|4FysIC zNnSWjiBAaQ9qex>ynipe^-RF}iRHnK42757b01?{H@w+Mb#iW8>t~h;CXy?v@!A#| zE;Mf?No$QjeRCr6Kf|fhR%So)ZE^ao@hDl4(_ChP5&vrOXw|)at|5biD%SkSd0{MB3haB+ zu?K4qm?#;BPH3G{z`Q2VdZVK%dT%w4(xM~&;%j~3(skto?zsLpx&}HtpZ9wV4(NS% zOkdSl_2f85QNz5RfcZRj{IUXOfuO{3+z6X~jm2MKO~P^O!D_`Eiie6I%FhvxCU>(d zl$|lO9+v)9us$IL8z$#QX3X+XvGUuSr51<3b{*b+Dq^_vIU~C6=+MB(bLI@YUf~Oq zI`7?$jrCUgs?65z-dFN*bIWjPD!8EerNH~Oyl8BNWO6$H|*flr<}TKpI@tT-lk7%*fg_D^WJ#G!72ZQ zhC|$g_&mRCzG1~wyx1q9*nccoaHfEc7Ig9x8!xA zC@7Ur?a=r2^(cw(Q4WjYpvF~pgM|&Q@z0qiFnHWl1j6Bxb-o9NjOj+j z);(o;w>G|i?BD%X;X*xk+2Px~QXEH|^V@Ejx}Im3Iuu5Vt<~eWuZ2%B@`*f#Vnd303ey;0#-cUPNOQTi5y4&vQlKu2>sck;hC7b3B`MJy9J5J@w@I%jnI~@r;Yob1TCDB)r zc{-f$eU;xBbjsjsmPtcE{ss9bly2+gDG^kc-gT;PzbrqLky8BFayzs&*edw2^5}5? z;!^G18Kr3s$E~$*6~)dg#t#lGGf2M+%+VJLfZ~CI1p<%M^Shc4d z?RzOawF{Dwu@7ar-jJo$_o++Rch>d_tYCd%l|3GfdrT8ocy7Ml`wIKk@X`gFFWDM$ zqMSURS4PTb;3@FbyCSGBTm0Htm)j$~x>G+WHwF`;u^&pVUu+e@rTPhIXBvy>}WvM6J z>3f|sjr~43tQgCp*q^K2Uu5jNdo9)Hr+%sZ;k4uPAD0eahlDInG(229q!hdzWj9u> zFZYCoJ<-{72W#qo@7nXa74PlQdvPa~jkkY}ICfOJEoI0)BHN3a@yZ_5J9~I^@@Qw8 zG)dSw-Fttfe53Ic52a$3T8fR`$;#7h*6qeEscwE*d6pgV8w17)%-X%>A{|!P9bi&cq}6dT_zs3pLogx-*SUu0NOc zUmaGtuiTxaz5)kz`srmyqYEoRr_P0Y$MYDk=E5<31LuUh_Utd7+fq!0%3apN-`wuEdD{_mRDM(phX&uM^$Bh4wZ~^a{#+TcoNkF| z=k(XYTIdbcY>(N}1YbT?Jc{lK^dP1z<1u{yqRjS7Hbdx2Z*b)qZo@$~8G7~3Dwq%- z>z-8YYxVnY2CEHwK zULTSo3w*BCk@5o3z(h(HYNt=tjvw^qGRJ)~j~24hzORl}afs}lU$+{bun%}yH+lRR z>4TA+P5kfTU+k*;i4W3=jV}ApH@$jxeREsjOk2%&{c|27rErK99XC=?G|G^sif%Q9 z`Afdhd7EM0`efatK4neBFl6Eek%mc|oI(3j_TxN6Cz+J3#Q9f54yZ_J6VF=BzOY8A zc~J#Y1cwJ$)-Jm2ZdkrzBTDk5TFeev7wbvdE~RYdE8gMH>K(b6ZA$Mym}+&^ef672 zPv6uV)zou4N|AoP_V`vmiEe+LyY47(!Fc7%!0`<_8=r^ak@z~pRWug^nyCDPXQ zBv-@w4eHN!CPnnk^`u=c+iF?p%i6NKKIYTU3R_lAMCX)X-8PS;u*fSBnlC z`0H`oW9vPiJvnN0=5$_waqjLarSX}|O_Svbl1AHZ1q^USxl<*bC_D}0%$;31{24zs zur7I3^O0$I4=akCX#4A6k-3j{#GE1~4{DIcu(Y6)k8$wQx6oZ3Jau`8yH^i)rrY~o z){xiIwTvysz%e+3da}7gKq7X%1maK+KGi^L%pNT%`+=swxX*UV{lR&Dz94E&<9+kn z2_p2xIEJ;}6;!nk=T1s(plvBP0!-ORQt|Dvvt7v|nrlic!=(``8N=__YWKtQ1`3QF z?aYtic8Poae9Er*I(zb~R|gx;4eQED*#+NlS{*B4x`V!0UP zPB0RT?=V~~6<=>Aw;?w}1v7jWbz8&_R%TFh8ZD?h%gn*}o;-G-a=dZS zFa@fEoXH>e#ysU|TMFsF9)A_>XHFn2em`aFu{hqWzT08`bYPMrsTkIe?@E?JG-FF8 z?|mPZPRS~t{W3h8)QG?&8LPfxEH-~->ceC1PBrQI*xjJ>R`=z!3$6NhHR(Gu`j*BL zmmY3C#x@q^=@&09dim7d-^!)F>|!)XCw{-g`Jxp~ne9OL+k@F3aU9xrO?}wy{rO1* z59lBKnru%J$g=d|eT8kMfAB@y`S@owLu-d~WqcjzS$mzEH;VbFrix6hURz5nGU;p= zmG2pJ<(~Q>a0cIeGfz}p`3w=KR`!d*X@RN-N4}#JwXuG$@IOs|*Hi2WJ1&jW#7Sus za1y`iMCgGOhPRsJf?W7@eTM8k*o;I>1{W@x*$CMWGmTcdX%;UQ-rcznDVDBtIp3f> zKbl*M`u0ftMS~2P5R#ZnwmO%X9%<+Eg}*-bLPOp-81t>5c25awUk8c7X3M^L`GqP3 zkGlrfsAW&*x0#sxILYo3y?`z5d?S#8iBL!TZ_#pL(<~;cpl5f+t8GvYwI2 z7bJ6H^cQXuX4MIIW5gxHs^O%cXvCM%*?B}PHc@KoiE0)xedI5Bw&}~jrua{ML7OiM zSNE3%`klNNc-19C#i{K02nNX~?xG;q#iM8xO@7Sxwm5p0I*i_Y`!Z1P&GA!bDH|qd zjjQijT+quYB(LXKhYBmcN8-q^|G4tK!~{otujBf-WKI~|`^hwe?0IAaJB)=;uiW{X zmo!lCN|aM2=Iv5Cpzsj+@~?`*zB zZKIc^(4*AoL>s)qTeSeYAmjGk`>dibd=nWS318}b(G;%I$(RB4_aOO;){m*AJu&~F z3_mG9y5D8>-@rzpJ{2b@R-8VS`Cnx75I*q_K#K6c{!G*%G=7w9ApRGL6tYTA zTJ(fiEB5p*2*%^|Zmy~LKUYS-aN@u2^zdJ#wG1$6Xme8k-z)RtXYiIu zzt?yrN1UVmMh9Ox{B3$LG4)vkVs{VQb6k2Dm)XwfyEE+~)vxnXFXOo_eu+LoOyzNf z_LK7%Dek11*(tn+YWj=s<{0Qvjf}~)2R{w&4433Y=B~R+cQUJCdN=TbL;kZUvG6n2)2siv|2aUFM)g?Uf7i)_41UJc=<}cZOT!L0MM=l< z?;sH{nkxHV{onf=;a?Z_#BFN&&ma*<8;igFpZlMJr{yv6Nc?w@Vg#}&PJ4jHW_IV& z5}M!Th(41mp;EDz?7$K8q%p=t0KES4-Uy^C9*{hFP zQ#~3DV*DZv4(e>+AacxgCJ9{K^xByWiSW`MFVr>F1T!v-q5W1mQcO{dp`JmPI78C`ifRUv&~k7mC=;-Hzdt)7JD~JRE&`Am`y#!hUPBQ!l=*tKhco;27k;@Ct49EcIw0J? ztYzv9u&QT_j@{TtdxGm-OL}Wqa&PsP#1NqPq#Ab<{KLIp{7x_ zFFo(q1w`{~hF-+usreDJakJ5-;!YpSE>dg$+TC32Xaby+0oncCy|uBy0J7<~K_2t`ajnHaT@T~x02n@SBfK>B$%OO=R6 zRG$lVEuFlm9M2WQs!rEXcrpUT7>!e%gU}|v2YVlZh3J3COg7RW${Jn|@stYyoOUCD zDIstYFE{F0xym!@2m_Ajk9I;fqI@-Tj~TTez^RvMlcb?V-x+@Whdgvb~u9P0`Y+bKGqbN4x6mI(dnA zFPPuXfJ*=F-j6%wT)bnXy<2l>50st@*?$rC2H=z_7eR%mTVD2r_DVaT)VJ=Z9|t+_ zkNfVcrp;a@H?#+n<{mT;xV|K-X3i*>ik# z8aD46F(`-FA|(#rQ;9ZzB3yOzxT`T{9x2YU^KSYM;6E}CO2RW78Px=B-B&Pg zZ5PkFYY;8JaJ;uN%)GVOPyTBSH|y9*5oOA zgQixjt(@?G%uR-&a_4Kk;%jJgrghq!nHYW$~Rzs2a4C4 zvyUfbiI~k3iKp})B>lMW|KSmEb8lV&cT_G+J;?CQ`27O_D+O{~pB#u>E@Y2x9#w01 zSJS`EmR;x5rBp$wrA_DqWj85(ceL75;Drjr6KA(kh&*51E*g|+?YBnK>+U1f-2Phi z<`aWjh&j5gBc-<1G>(uVMSpe*^iZ}I7TNT3xK+zX(zUDuK7V80_gYop$#L5YgLV(K z3-r3J=Rdv7KRnn!N!$>6`ofn3*j|()Vn80Ie}1c1B%S#NjtM6>clTBksl@wR<+pno z@K@OaoyDrD`<3W7LPEA*Rr*iv+Q>YIl#%m90$YD;8Q@849Osa>lkKD8#~VGoh96 z;8flCu;5h)G>y#>WGbMA>e%=qw^9qWP7*b-7MhIihZg&r-VGkYGM@}idcfN}gIPt( z+}8!^-ca=l`{Ui>=fbZ}kg2?oDO^*r+s%yLHIJIjtr%TrQM#XL7W zj5(s`v6D@~^9|=Td#<{6%pcV`J*#t_>TrHSBZAuImlXu za@$18%m_(5hTa}yMCK}IP7xBNm z@Zp@XmW=RBM}ls##OYsQFZAqR)^2OaC(bR4+@e32L8V&W^v1(K?{|-8>@;f7i>mfI zBZj`I1m7=s;icw^<(C1EFijsZ$~8R$LjUQ=t9#kcgvrCjRyMHZ5jP8`l}qEK2Isn- z4=hqdeqck=Ii)7Om`7nao%qeJPkx*OeOUGLw$CP-0=Fc*&wuOLc}*l0%plU!-@->t zGX4X7Ie`7Ef8wF!$-BgTF_IV*`nf*?XQxhM&EuEcp)AdFrt>X!*xbgF5Dfd8k4we< zD(N7?MHhld{Kbjg-TIpnyj%{-kJTkf`zDX*&HTiOQ{_6f3`=fDBh1C4J1eyQZ1rS# z!+E~Bt;G_q!;)j`kBLL&rM<~y@3Vbv-z<;Vc^4t%Fj98WC>A4HdQxaikuFppPvbPr zlb51-_)i9j^AtBXdnBsd*j~RiyD9k9sPP8Q&g+#9-etdfrM!nz3n`5f8&`ey+JFrB zWmMQ@m`YcP{Yl)K96!Q@#>Wk17JDHVPWjTz7b_skOUoPH6FS5jv9~o~tQ+!U14q$w z?Rm%7k)8C1DgT#*TxxfW6nJx1eRxzgjCj2#J$Gjk z`puFPqeFTw`Z^MmmNn;H`~J4`w(t`i+aKc9tL{gSF@}X~%)(|xN}aB<^xG>Vy0jsQ zxVFu)o`lbv59_2uQDO$@wwoDV=EiStUOJ;Tx#Ep!J$C0pt=+@}@v}TF33lENyqX#h zSkTLA7jRWE9Aav>iGnX+S16=W0^C^O4*gs4$D6UwRTEDvUG&pF##aX-g2BNmZM8mH z#pC*v*VRSO@TSN%W z*~q}{6QnryJFSsHJd@5J!bO{RC(CJyb}+suYTPN|Cp=Vz8I{{^K4RnRFZL!w=usV{ zFKBo2CTRt-Fxg7+TnnBA+tv_2k ztrr=%Q*_Oaj2DDC@ZErg@@n;=<~o%T@xG2%i|E7-*QBREJ!Oj!SJa$b;bM<>vNDF` zjc!ooeKe6No6wgoB4tkt>a)vbK_t{dDqXWQqORHYqN%Pd{wCYTg_|xcrER%qtX`PC z*CbV595yc#s&>K-9-lrL>sI^7bM7FP@_s{wkd2sHLqmh%AEDDM0;k$W@AV01RV$f1 zOdiyevrXTgIOA!MPg6`Ew9Wnju1rxlItMK`E+q;FZe>n*N_Uc5Y0C&E=~AehoUY~y z10Z@$!}=ZjA0!HR37c@JaYu!>G52TU+lWU}iTdZlX2|ghE*0RP!zIjeingx!^VKI9 zzrjz|X+AnNG#U&#jlBq3pINx30W3dRrPF!xH|(uf1hAg`g*xxl->}>76)+znm0o%x|G;k6(=VLv-f%4B z{s&P%VF4cC|IUC+_oK(5YP&*3#3>K?$= zjQ8}f(3&wq?A>RAk7nn06p8@QwZoy#gai_O!*T(b@Rj|lDVEfu^f%Zc7O zPf#KNLJc>Rn-k;<#TE{~g{n`3^jfPw|p^VrtPp zOhWRjohI=LdlXFZs&hEe+kSqsM!7TH?}L){crtvp%vsd50$U|11xU z3M@}UN>lDX4glfDusp?k4@(*TrY@X#VR=s8BYE?W&_2Nh%hSC``Zf5UkoS4Z@SO4vv@EKi?58;M{{`bbdg}RZ_UrreEm^y+nSjZ09~83Hzn{AhLNL?#|6X zQCq5d?jq|!#Ys@MxnOIZ3+)Do(gqAbCNRpME8T4MAs}jj&4S}Uy{{93AAjBVz_&-=eC8@ zf9J=3tSW!At`msq%s0a z)@b{1<0KI!MkjCL8c-RzQ~p2DY|k1%K+ZI*xSPx6RO!9@PJ_o$Eb0Xe_jBEY2N&LY zHhnAC*{%lz!38yQ^z*#Xuf0vmj(y+frNf1sjDQ{kq0+;2%%` zMb&b$F-T6$N(U5iP3b#3*hw4VQvr@3PPe~#=#BYxxK;4rBk06WaUO1;v}!#zCm%t@ zoFeMR-%ey6Y8$IAgBdb4U_#d0lTi)5R-@-_r+l4L4x?8+2zg z-d=wh0Q!K;qcC8S43!eN-v)a-kr+Ki(5yjx%Q}z^_q)jPX1;%nU2Gs)>juosq|bj; za%K4E!S;9ua2O^}&$P8XcdX5ckr7O%2g-&O!BBLi09&(N)Wx@+ssNvL(V&Zvt`d|w zg-2w}PbmXF!LSmj$PPfwj$>7BHIiymNfy?aF>gI)R1N#3D(+kxiJ0p|$o(Dj!*84u zSMGdfDqpTY9zeED7=c7RvlKA&kd5N*Zp>)Pu+A>?Sy0$fOZZk(Z8Uy5zfn%EQn?&( z{8Vn&!;_@#Mdq!jCbi+!ZxyeCm$BEo(>3Y`##4A2(~-BO`!MfJ&~gw0<2L7d zsCkGcO?Bu{&A^M!$l*2J<6U8vE-VwG9kfqirEAOP41Ul1D`b$4NOOT1-?hdyb@tvZ zmgZQY&@(+AH29AY60T%I>L!u$0)JuT4s&wr(x;a)qF1(7*{NJpX1h~3HD9ZoCfc5) z3dQfDwwL3I8xrKaLO1(^U`WoJ!t4WY^}-z}3}YcCaF}NH?79^ArCGf#XsSxBux8fk zT6nLDq=8;Fzee_9uff+oSjKt-e)nZPfBeQKIzDytmT--ARmvyQonL^`?Jqe(D(!Dr z9J#^`fD%vHq(1!w)~jEnN{m62SFH5jnvr4xcM27EGa2DvFoPuBTw!W#Rs(s!Nl`KN zVP3&tN(PT2%nLNp0Wd$=49d-miNsP1l+T(vWl9K|Tf~ zpAKTHJTI+9Fg(QhHr=oR6T@SUb-dT3%uF52t*h$Vne3lf}QwUp>ZF#*b} zUQ>@)zjR(j1zdoo|H(&wdc_Y1ru4NX7b)uej)~5=`1ZF~va)MqHF5L)a|A|}U;&ck zx9-j}YesvJB9D##M81OH5|K+fv0ga=9g7N~B+k_*y8@_1eeNA;F@y6TV$YnRgQ79? z5}q?RZ8Dkd#qHvn#IF%aH#~$}E2W4`l5lH6t+PE%SZIRVup0vt{{AKb=af$g`dqBm z#n%a$`1EuBRU`)~+3VfbXANG#Geo>k!!n6W7ABQyNJ%GwZL@0@TNgOBRnP1NF8JmFX}y85bhh5YZ+AP+@e_EIR;e5eEwmFYNqobjSv zOc6s!a9!}4Y1TVMxDYPXfrOOjc`*Ki7vRfYKHXu$)au?iXG}<{4tC@OhNUXzweBP+*y*fD6E0uQ`J4kM~@%Jf}X?F7{aWmBw%v{bxbWZ^8Q+e;l0(nfFv+9lScPzSpYssM+P<`+XuNQr_ zQ_0~03t{!{?DKc73pYgEmlOR>#%q1rn_QrLo7RIe*y4Lz^k=Y$h=6Ud#CAZ4vQm)*K2U0l4rRv^V{Qa#(#jkn#ncxuN_t|rGqe;yLk6s%m8*O67hcgp39hjVnL~AE{H4rE zsVm+y72w{>fw*ANovaV2n|-=9@*K#&?puO{Gry(pW`G+I#eV78>_N)U^x9s%@!rYQ zbGfMXJHnE@*+uJ%xXeC<$>em5tDT2RA|C27joPC_Fa4VPrCfT2l2AvnL10e+yGwQ= zTj(=DT{)ZYSHT|%#pZcLU0nWFp;`NDLs^Q-(Q@LM%Uri2R79Ded)at_)$uc!IT1DP zX`BD`07{ukC{5cyLAU5lzgFf+C+e!uUksr=_o_mqSPAbB9raOnB>YB6a={i3n;DCY zE|a50xbTYo1nQVJ1*Yi)&m0ycG?fXs3#!Sf}Els7@DChpHnMYMC~~P$Ek$ zs}GLZ?>p<$@tzA0(YBWDPc>+wtRVwr0`TE?nN>-VO*xg33Oa=5O5>$ytWq>%5Cz+H zNsQQK$1%H-=hVpQo_H)tXiVUdiegN`saD0G<<`oM>ZKqGY4F+vU6ZP{lbW*Zoj~Y$ zd+-RxkCZ!hG(0@nkH!RlFBt`;lz3>t2cO0MV=QX|Y@Wqi9oyurYS|~NcfUiCR^YB7 z-)!HY`4kl93OFTNSA6;pU~z2{dy`p%X(u0jK6_gnv9+EDI;8g7`r7o#bwWrv|HPRFt}De250CQ@3?J_qdae5W={%y? z!k}6e7B8yfD>U;b5pM|^+g?2vF@k<;a;jk)D0cj*Z^#*pP_=vxC13!6XZAM9q?BTO zNkfyJ#gD9MTBJ&1T7vtreqo=^LWaeapJUq*)Uy=Lb~tQXpa}Z@mvzBApR-?zc_g7j zN`W;%jJEcvh$yw<&q}f-Tip*1;7kApSC`I55TIQng$sa3I8m*2O>n~m8SGy^ks*9m zoJgW6JuzaVbDh2q+Z3M*y;XT8sOb5E7Zox|-I?C^HBA%F*} zEhaGa-2lBO=T=o25uMl|f6eZTa(>?BkdvYTJsRa%X}&i-lQ^_)F!#R!#tRTAh zOpr7eh1Ji7>KTF_vW-B=<|xBh7S-SWZOeR3E_-u#hUzGEERwOv3NN7NHqM|;@3|2r zgC?E$jQEV=IyK*?i+1S`@nt)#QG7;v>9|EUCD_xU@%!xFDE+%Pz^}ueLm=>g4C5b) z@uR#%RPY%=xAjke!=R;2MT&*zz;>5n@WXij{!sQ9#GBjm z*UJ1+G%7ghh$2gp&+v!rID`Yek#b88$OxF>c-^B)9*kg~LQ0r^$#y{bx8h#uP9cz)oEArGIyvX@U}uLjB1C zp-v!t{0Qcq`Lv#uGG39A-xRx!SLzCj{~-9LD5wuQbz&qH2(F6-QCvO+H^*my;h`I? zG?>uJD7I9yobn+ZvQS!NO8N%u*a-rY{m!a0@FX5b%wCdYurrhx3p?T76Lly|s*v5F zBn=W3(Py9*NKdC8!Y6YQ!VLw_84UYBTL#%~NFl-mUy}|<3i^xv`v2}blAqCkmpXU& zt@Gj0*I}+d$TS{<|DD7{F)N?de`ldsolY%y8{@P8Q@i!~C0iC}XXp3=#%p1r1)FV5 zV7GA?$Wnc-1IB*_sDhs8x--#a8`V@U50`S1<+h8)NNRz6~C|b z?6M;_xJsTPF@4LnGM)jDHDG7qt0_CxQk!5x^7ogj(=6SNW>^9-;)@VhN)>4TN}Dfg|L zQ4&OqQ=qu~jL&rTo@tLW+LS#|OBKthz6nsODx8u}8gnpzn=bUNi9^vP!UqcBk+EVO+L=P>my@ z6-_Kw!j|Pfyd~nk)py+X>izLmTA4)r7Wr^0us}rsZmZXDOdjOSBZr7@ zP9W&;O|Pq#`igs=Bua{sVfGH3B#znz@J+Q}aRm#NewkfTo^hQ~>2Wh+nTJ4o-@u(h zJfBeB=eoFiIt-U6J_?8CHYKF4aAFAVFC>=n6YDk=XB|v1_GS0_xvVjg`e1p{1lb;9 z9oT!uk6B4Zc9p{yk`Y@YyLiKWBnYB#o3Ve02872}f9aC15Z~@J@_LBD#1FFvTmYrpb(3srZTVQ6lYrSZNV_!d9U?unK+7Ql~!; zmB(=FGhO_%<@{BmgK_4%5^wJQ5|>;5&cpuNh0I?#fo{V6##3FSBox0>jHf$_aU-aLRh}cQ!D@)9WOc35~Y8 zDQ5bX3tGgIpwfu-3URyOkI%qsKP*^2{-DW;$oR+-hx8L-SKHV*@@ia-5B6xRfx>_l zF-5Db(td^A6j%>CrAN3Pf8mXjZXZXB;s8OBTEopWrMZvy$nb@bVW~dd{iDmER3uM^ zp5xTvny~f61lzXt&IDW$3PPguWW$k{E_)MG~eCG-p)g=*2~zTQ4H z!mRlPG%)<#N^u>$)+rC>LQq2v4c zbJC3sA?5PL4>WPJx>7x#ym1yiO|Sh%m*#lJkV7)E?1^;E-WLiV|fR1s5; z8rc~gWOQQw5~dhKaGLu7_h|>cQWet>zSiy2_Utcrw`4O~(pyt7jFm7ewBoqF#_!35 zS>wX%WlIJ8zeHzY|Mxx8@nxTTOx;DDcH1v{_vd+-ChYJKs~!MBMd*qc<+zzHOMpZ? zc%W+_U(C33$GF2~a?15||GKo3us`I4Q(wQfL*G&LGxvqh*Ty`iBRpC!EWm;?ZJk|G z`aSECcqFU{(b3TH{92`xS$J%D>WuSW*MP~vLcQ}HH?ZojaUjqQB2(;TelfMbe$IqO z`@?Da2uKaDlI1g*rtr0!kwvwCet>&gA(kbAP)Dww-w5nOaZu8=E%v`T)*kZ6haA%N zec7JG%~9#ogsczP_=tbYd6Kf^J3yU%2kgHXl!Ha9LG+9P;4bf4m8`Q1)_u{pN>T-+ zL7JbU`DOkLH0yL5NMZ=ctU*vdID?~?1`0ZWg|-AzBIbfT$->vS&yluE7WGsvARXdh zPL!aX==cJd?0CQod}*m_tyZ&<6DZpod3t>QY< zz|IWqs5ipEIaJ>#j}k#0}m;Q`DY3!YE_JN5_(lG}x?_4h9W z*L$|h;C6i0ttt^yhZIl8;n^%UDj-`v+7)hnIeT1Bd!%QDB)l zSS6JxKuj>MbYHRQ=Fcr-PfpLhGDP`j1vv5E8kEOFRg`_sEP28WkzanJ+p!w2do!MU zV5>rssS-L9;U9GJ*pQ-b0ihcFw{yXF9N>c1Hz>2yNX-HPhS1l@qTUa8(eOXn{futV znbJxBJ;3*t&7r5;sRJ7|=pE<6+db{(E7R>9EzROqL7qJ|O!BaN0m=St?1v80Bc~;@ z=EbBZDN+vFb^gq4BECsBv5YxnYb|G0?4F6jM>{atXjP&45YaE~#(oG1Etz2mgGBKt zSQBHQ5*6{lzM#3{u3~RaUw|Zm=nL{0I&W_x9m*YW!s5yaWW1p$(jFR25lAo}`|4K8 zN#0}yjL~IdmcOci`Iib+V&pe;UC#FwcEvTldU#GiRHzzemIbk&+@RkXC#L-_?T$7?WVLUHI&+>S9qRGom&8izm-eTV(2Bnxt@B60EPduN&c zu0=y&W;(B(AF0eo%Z;@o{W;UbU8Xzm>D?Hvp$7*n;8VLH6W^wH(tTv+5ssJRisUV~ zn(#2W$rIvg8iP4@WPeUzBS8rW1wU7-Nrd11?FATp6VS}SHIWI*T#4j7=Jh<3cKps$ zvK8R-#9XH-Qi2T<3r2rVN2F`+_<&Rf=I}Pu3hzKKJd+&Z9euW8o-)0uY*fVfZ6PQ5 zx@u^zwAi`DO6XOSK}#!`&EJJw%^~Rxj@aB3wjOSEV>eZ>$D4(0C8zfTDk`-e;{H8n z{;1P_)&w#(XmYeLpnzxyB01-R$DJII8sO^!&@=JB+v6^G8p26W$pe?`-MQ|Iu(8hm znplB~EfY2$hcNX%noBs?#KvwUWwRfril0vg?LU{zX|=)d`VGKZ4&~4?#M?R)5IS@d zI)?dN?<8auR8Glz@Bdi10CoC?_ExAk{oLn=&rXj7X1o<{$%0N%3xxSw|AoO4PxO85B1>yKrzVQuF;PP5d7t0fPOU5gs_dL(lZj z&H$Fd>GVWi<-hh9CbSt|Xiqxx6C821N7Jou{>m3nt%gEGCvp>d{&z%YiV_Cs`Ty=a zhByMwzdU|}>?pru_!HYbAR!RK5MI9MPr-`;iv1d8{>>;R z*Pze~BaHlWLXjH554ik4ZB_q#O9!SWFB?nwdxyeTfWrrT_wN6r3;B`aK^+_qZ8E#7 zRcqgFKo77wE%f|XCYFzqIfnqQH$#XByMdx9cXJHNGel*g!{TV(%Qb=~S5?1iiozJpg3yce@)sc1vSEzBIvEu@`?h}BI zM4UEEU}AJ1+63}SG8BM(5We7J27f&!oL&n1R5Fj}+{U$BNZ5rg9R&#dPfvnL!n5p_ z_r`Zc_M0Wy22{&Ub^n2zZV2B|}%6By) zj(SV*OxVO5Ay5%%a$sD9f<4RW?yDABGJGCnRW_^_n9y;kbPd6muQ1O5?=hoAfQ#TY z0xmxadHcX82gk4PER~0IL~DigQm6s!;Xy3E-q1>g z2rKM2GD6GkJdlt3{j0D+NP>-@)=Vk1s}8g=mHU9 zx%4q_#%p3_!g{5rY%iN5uafYUcy3zNEd5LsvcC!K0b$NukkH6fo08?4-~|w;)%@tP z@Yf`D(oo6LnBi-&n!?3&$k|{963*z5D+QGvfMXSG`jbYSNuxK#4h4~e7-+zu`bmT} zJi_0378YrUt#SZU>@^IOZHrlU-kH;$Wd9lq(brJJ<&He?+LPif;f42UY&JsAV^Ao_ zuPnfWDg-1aF%zId01*c?MOQ>gs*u;`@pFAi1h+@xY<&v|F|rb{IP+`fWl<<;=@-yH zO`*=f&?OBI-U$75?8!g$g$QKNlFSeEgW$59V9gJsHlKmie`h0NIv2dhwa9MP+NgUX0(bJX@cA96fgU& zOMeT6|G9YLA7&BV$(3{t373HgqcHfX*aV?k(CWX*;m{Khq20}|kblN#xntS}&CA+h zDj-b3FsNl|^)cpJFeyDZ5F&9fk;*{X&3t`pfY)&BwC7Cw8MmZc&!LqhJ-;XZ;&Y4y z{`IE=|IunOR^po_=96!YDv6OMXCyI5_)|<4NPizNLEe?oH2po^ZoKvu68V2SwrpzQ zEi>!D?Mb3l5_zSFC*ydG$Gny7)2`f8gLY0fggAtk0NnKbwJz@ojYV-gP)3Jr+&w`0 z9FXR7f&$hw?;DUTSdk7kL(n2`xzRL*qV*8&6sV+jP+b|ezqv$;?JpP0i5$L;&{gd7 z2MwO>tO=f}3nVszKy>?8s{xi!X8cT0^L~q!&6HwyvBMW{Rcc|hpnvq$aL7whdx-Pz zEaBPErjsQK@9UaAhFHQuYd#Ac2-(oMM>bHam!m6~4rZ_cOv>>pp3cA6iBr#cD8esizq>b{ESll$AN7h$ypgYhOk12F#g-awz=qTyd!C``BV|Ps3oH&rO(q&enJ(`yE}W4500e*pjU3Qc9K@JjV(7nBDc}|pjk6tt-QU)!-R;8$5Mg!XpndCrtrSN}ca^ zNKnE3g=*=8C;Q&8c2BnRf}fr008J!R9I>+$_LSZR&f5mh1%Jg7-ck~sEaK7*8*=(g zcp{@mKquaD={NA+n;XzuYtS3mi8ipLi)kzZE`cz34C!%1b|)yc*(>KF7}R!vlx~ zNkK$(k#v-Mc3YZUGedy^kbX40I_DO&ZZJGJ*h(N16b_zPMtW?CXP9w9Fjy+h9ad-+ zEO#6~i_)$`oRLp*z+7NMX$P-n;=rLUQUkb&$ zi6v*0g)n|}W2~VXsDTSO^uhJsL2k12rj^*h4i{xo9>V;Pl2 z3109!za-Sq6e#JaQrF6ttekBsGzdV`67Y?Z3I>OkE(z?U?CM0_Qq?Auxd2i=BWYOa zwr6d=jLKIq8h=?952Z#qpQJLeA9!t@>M6&eo1bSz%IHCxda3!Ispm;Oh}vLmy~=O% zg{?f-?7hGju5TRZK-#b@jHJa`NU*0>G2Jugf1U2_BJiEz)21&@fu`x)V0KD;4;D z^p>2<5`dfh)5kCIlRfp9Uo(@2yk9!|V&zj};2RZiApN5xo0uuo2~e#aj4=p7>Q2oG zPQoY@lwV}H+K;njEgF;+foq&k(|r`${w@X?M^qr{*WTMo@VlxYZZLKb$jEd)BLU@c$D>noQ&uk7Vw4s90q%+(^ zqp{nqvy{Gbwn?nw#S3GKYn_(zZIk%c6OF$m0?MA=?c|?}`Ezl|Rg_~(=~n=Ihhx5& zoZuP<9+Y%!AciF|24QsjA1fWI<)yO+x-7IlO8jAC+(|IWV-QN*9RXj<{KgDl;;fIKt>NOqS zGjt`~DxPDF|7kf0TMwsPK>)p=fvD7{+;?kmOWmok+25);&tEr7Aa0N}8?3`b5+`H) z9MwiAM6`n*H3hFHYuj;xjb9vxQ^!#z|_4I0|0bD}h+FH7Kk=ge6 zW?|vWL%Ls}C>X(;G2e+59eg%3xp`(d!p8vb3Gslus!ID@#%Rx=6Ik^XRydps8n^1^ zeSWi9?x`3y*F{tH37w+JK3TUKU|CHMhxWa^X87y-2jVQ?G!O@4zSA#{0bQLFBp`|K zM8#zi$qpw45Az3yw(<5~hR@8wwg*`UE)e-L$%o=)kvdI%+dXUUY}Hu+`M%b2*b4TD zYwi-Ud3yDsSBDnt7NXvxfwe0%SWJzjdg)tmqwde5UG`W$)%vf-1ust*3SQDpmH?fFY!pO|ABVyN%j%oJ}zeScC65P8OOFVZKp&JO%9On?F4Tm@X zm6qY3O$*~0TWL)=mN8f>Ugy0I>JX{@EZemkJ8Z|%rh9RPzA#))8iIIhxo*a?Rte>w zsMkYteiUfFZ-2B8oSYH7O6KMXLhk|w46cmM(-YhOu(EVGT_^Ue@Sp8ElYHXw$-$2U zPR-fzvPj*s0o6Z7+0!Wi8rC=P!4EgLLr)-6C+|@C2;~)`m+Tc;WW(8$W8a>fndn&} zI)Gh~-tbkm!y8p?<%H|uUeyVUU|{WOCly^rZ8_l#=!Rp#QeJyZIYsqSy|87wWOhEveI8W;W>VvZDC~zN-_K*Y-CdH37JJj&sa}s~ zG#h=~0z^^Aw#(prJOO7etyVzUH8BIQu%2K^$(xzUZ*o>X(PQAwXXGP0`c4RC&S=m- zC>)sliY|dnJGlJs=|5J_o|C-ciiGj`U8dEhN*&v?cdA$o;s_`p+r&*SaTYySp211< zqtm8Px_={LnBmedtUc(Uto^vQS2S*cv+P|w!8PE_7f@U#ur#|6Tr4~~XBV*iS~#*C zqF1Z8YKzM#L$eEJ-vH#2p_{zgGwA*0ia@D`a-kRDb~S1oNvq{BgT^Isk2KNZbLsG-11dZj^xd_db)r$_|3(ZlHzTE!HsUzwcZw!26ExZh9qT?jCxsSRijP!h=sjf5>0FyzR_}O_=?u-1=^R%z-|zB7<2Ij2{`t$hO9m^F$ED=-%jE-$+62ISly*D^&*!*i%(C? z)-FcL?s#=?Px16zmu9_}Ymkp-gMF9b0Y1`WN?q$#O{vq zT~GL#G>IH39B55E%Uz_X+1n+!@%iLY$c0ps^USgj{2#`V5 zksRMr68o`>nX)Hvo;Hnr);KwVeXB~n31HSIz&{Pv8qm3dP$9xQ`BVEmfdK#sot=hVV9w71n>8xz`Cr7gg|HbiO^ zEyxMW0>1@|(ru!F+S#5H$~bE}Frp>FVT}$;G-^Z3zPDMj<4?8AZA5GrPKPPW$ZCg0 zTo}xAPO`m5_ciN9_NvK;A@zq{PTFgy5Dy@Sa{sTV1wH(7)?rOVd}gNdLE%L!EsH5G zh~uupnx%q?;l<7_o=fv=JC(})M2Z;i?kQQA;JnJUY}dNWbD7~ek&bqY)(9>fSO=3q zn+VJcXLVP!8$bE|Aj1l!>o>Nz0gL>x=kRHcVis*RVU%oHMNOiKe15MP*xr^B7Z$p@ za-1toO)XV}LmjQada5C+iB4G{*?E*Sha1a&1Hj;zihF@>LeuCE{x$pDqgOwe5S9;w zX&`-V1{lXodQ}@}^7;ovm~uE93Q--Wn|y5T-E7x-|2EQ(O=25|60bQ8cpXPR1Z4TwGAP$W{J4ecD(duP> z91%gqXFx1my{Ssm z<88_61n_hVMo^tnNTof!5EQ=9S6PI+OOH#U(jMc85YIHw(g4K$9=Ak zt}cA|5^~kOcS;7#I2aTw89y>6e2&m84lu9F_md)2-r6NvnX&vqY9Bqntx8(CDvpCE zLr9Y_@7cwZy@^f9J=NZa_&QfdCkc({`BPoS%XGIIYxLfCZK#?sq057xwDjWBAD$^$ zC{I+KOY_$kHtXcMcT=@|H}qo0gwq5j%tgFGn%yo6t%KA8zYo?Lts%nZE5m{lhltWD z>r=p(L++)!O5OC@EQ^Bl%}kO77SohR3fKqE2{t%>1udBqi>HlaZb1|e>Nt=sM%#W8%qGW5*EDL~rg^TyYhz*I&g3QLVTZ*&?nrUC zrmXfQp0n-`D+Le%H-mvPdg&{#ZL@j6>X$h_y@66>e|w+L8Hbk&qRZz+93k#>w2YD& zZm}zF_(m**0%#vgg?*cMV&}oXg-VDD9Sd8;-&{&8}~13Mg&D+lamIKjO!yn3sg(Db~S`2Hzjb zhh{b7|3uDoJF5X<$EO0wRHQo_VJoKsi))k8Q)HQkt^-MEPmLuc{pkOZm|2kh`Gol9 z%h9O4s?l3_L;<6EZn)@3yE*|NPkei8jGmGhN%g;?oh}LTd<>q&8u$}gc|s^MJvkPI zu)hgF=WT@AeAu(QeFrLg2|1F-^jPz4gw$j37_Y_e=$nfl9R45!>@gat2y>qznr?1v z%_U2Tk#-vN!QEg6!mN5FX~KVb8WXJcdU=LtI-Sc*;>)3azjI3NGjPYd+M$r8+*gPr z<06q2Hk^(elk{T{uP^dGy*4_iQ*jy!m|d41EbOu?4P;C&L|wb%vKtJUi4vy2{n>6| z;lAdaoWfnO(lReQ4;_xVw)bwY;j=4k3vC!LlUfJ$U-$iH+chJFFg4{8MZUr1UQP78 zfbnCHk)^4D87qri{vz@N>6;XGfqvATeV+b7p8vQ2<0vd0fhZG^VfwLw+Bp)arvUG! zVm

#p8!y=e%VSEz5A1!{}AOuQ0US)*&9cEQaDFTK4dXv*Ss2JHEZmqF*js$nh!g z7(PsTXuHQ67S_@q_Z*)uCN6R;d{j;IJ4VvHGM16B>XM=jEF?5eHM4mdr{asBpRXTD z-^GxJxM7R1ouXq#q|^xLGIbCUq(55SXt?xM33H$-@wfl^{X<6I_pCysXP#LpuhxhR zo^%;aF?uNv;)&ky(2Ff<{K}6w{cymn$bDnuNKiS@#<&?6elVBqC{bbw(29dI_9F@R zU|=cxcNhVVYnu2gxNRKb52_ym(j7}$7px`TQ^QsHCkM9oxDa?B7J<#xOKcVI+W9hU ze8-_dm?jN-7W^L4IjFt;pNggR^8T%+@utFqo(7y{VaOFsuU<>q{|-IOTka!)rPCoI zkt^(-YD4E3coMQ|@J;GGd{0n*! zxwy=q?<3)U7VsR^;NT)nPdpFb`Wx2=(GRHE83by29O-%2^d?LAeEi;;ncvg=M=d!{ zQ(5koaBcsL5*Z9)d1= z(}}O&GCvN~-p!C4*%;RdGE-*0RmlKfW!zfl%Uq|4>6hEauJY)E+hV^aE;e2EV2xJi zJ{{5w3ps2rj>EQ8i$>dp!De@!{aNJxI{Z@OrL8uoO&``;SD2tcH+@vtZA#ZMWV6vq=&r;om~9#2cM?*N|YfX zDc%r#TYK`?;9&FJ8_QG7Mwy<^0bNwCaWH)7Kdw#@*kEch!2kAnD2~KGvQ=R}sTIDu z8uE?3d*5_1`i;P37kXVu$zN>W^bmIKfTkXP#u(fgYjfsn&%)d@7AueEIz9QYigh^f z^f{PvOBDAo#Jyjixs*|dfp+EhYQh}Nq;BQ5ue~nvHVvo5M-!0CmHn{=F7z(6_9qk% z`|AW#-FWN&_$(au@|r+XR|oMgh!>omy%MMBsa_9KU_{730g!3FWtoprh>XqV0Wjb z+_-do-4VSP;rg9C^1CYL9#N9YjB*%&H$b48wNjVLxzAdRz7IIW_%CbWp$$m^p*USt zKh*t}YiR0@u$aF#zkao%LWmIO@vfFZQPknl?14^%J_3Ei;*MWhrhCgbq&qH&PNs83EiZkbbD&k2@2Mq`!nf$? zJPl=p{Y%9enL`+)m<)Jy{JAJ%Wn8~~iVEINZ74IU>pwhr*tBkI-QD#cc3GvEJhj^3 zUC?J~oZaU15h7N|?|NOD<6ZrNqPVyFss2mC0y%rZQ}2*7b4~Bm5lYcqRr&4j{gbpo z+czkRo#V!zZ(?o%gy|IH=q`4;{Y||{W+0+jRcd& z)BjldMpEhEUV`3sXzDW<*wqrB2z7N^NHRoz{~EkPat0(sB&Sz^m%}@3y2hN{yNMXY zKx0oSMV>{Qw*bUAM|UDuC1xhn+7rHoNT1Tgf0CjP4X|{nZb5~;Av`jc28i;K*lCx+ zv0NDb=Ga(@#x8D)&p6bO+N>nDA%?_v2E_xBUU>BJrOs4eS+n1SU*%Xb=_$`qcfj}n zE8S6|Nk#T08FFB?AcHKOUH{sqX(QRAgwT(Xl8A23VyaoWt zqeTKIZ!VwdV^11FfP)K<^gBTLUDn{{T-ER?!dbuGZUfkV zkvQW>=vf;+k*wiW71WZJIkq1%bP?a2Av|b{1;KU89V7mnM+s*pYm|?F4wH9&yE4fp z&))VZKoctVwq53~CvU3(PIE3@>I@0+n@3PlO2%8oNxp7hR8%1G;03X#XqrH-d1J*# z?_ax;pd1dens!t7>Z?z)ko@#=UHiSvq?jAqFAVO_JvnBa&~kiKC7QSI?hOmKSN9L5 z{h4OuYfPaw1*PSI8ZQ0p4tUa=kzW=jF9GHo|0zWieCV&d!9Rd%F^}x2ty@Nfl54%) zK;KgF98FcNOy!!fcW`K8SbV1p3T+X@<0vG;8PY{)CdkTLAN_pnMb-A^^2?0d_>PuI zU1W>R)|t-iJ7SU%@g~_&l%%p2XZ-pO+)la7y*eQ-o%zb;TQ~kZP3I(N+E*Ce-bSDK zNLvb75G-2!`v!dd=cM<2ZC|`7`59bVaID0^cu7a${C%Z9>1N*6}@Z^bc^$>cIFmHJ}_FfpH0dc+h*```U|`HCJ$eu;mC(9Wn~(-F@dP9la#bewn5O|XFu6qM>@Ky>DS7_F5)u;qCsInqBDpB;5y_OEYH3mVX-0Sr zdZ~-w-fFjlme0t>c+sbq=R|I|@_MtiDd_0y2ajfcv5ej+IMa?1-HMFXj@15r%r#&_bpC;s2G- zhB&ELl*ligFQ2$DLxXY zcuzaMCQUV1k<#?%*2&$g<5ON8fjQFr-E-idE^FYhoHx@io}e61R(TelD)R4^e6?}g z#|*U1{xk0X!kyUJJ*9LjX#R59NN^Kj)mr;E*85W8Oc4`*w1o)F2Sw#T8dk~w`xnF} zHiTVkU}+hBsP$gW<|k;MW_%kS9=2Eixv88RFm$dsmjBT83!(Umk{A2QZq2SnWqcdN z1HxlUzAUg3y>ek*-1R2<-jz_%2p2NWdHiEfdmtMojE7^W3L<6hfkH~?rhx3Vp?;G^ zw~wQYnha=^xJ8IB_(C2V^tVbx@1jgsz)@VM!j2U(kHD5_bf~*zC~VIFguK$qEgsS^ zG7XI|PKbE25Jn|h-%i0CjTmI9jZ}-0=yDar&9G%E=IG~0z`N%PibffJbV9C>cm#w) zbru{@GAykDf{a8*DlO8z5qw$EQ-sf|qe#nVjDUTTH2@*I7BKv6FmlsPh-?uLH?!BZ z%u;Y*Zex^*|0vxU-~iFX7sh9?IzN;v~?`7_Jc z5T*B7vTh>vD}#t#dYHt+@Mybxq>oGVD>c{Gc)c&ULhVXy)F)tnGoX}5CM>%Bn*xXZ%S=;q5C z8+H`I=iRZpxV~ifi4*rmw&tfxv1FkR_7qovUmDL!8?&rZlz8z3amZ?LMyrrCNcp$G zzB;9|TQ^)%EV2Mw|MqI1%O-T$VjX%(jI8uY(t!HuM9I9C8)!^T*5F)UnR+U*9<9iC zAh+3-_n|GCkLDLYl!&8!O`9D{P6%4}lDS6&hXTSK15%#>Np*0lFo<1l#@S&Wi?5$& z;j1o#;sMHgC&il|I-UonskHmPPDZLl@JsI`wJ&_z{GV}ISM3R94{De@kGVuN9;g%> zaBoC55>v>FhHhx(r=yK?PlQSO zHLk$3cE9@WPA*qYKky@-1G8DBA9t@k;(Yqcbg>0~EiE=Gn3SeqfWjg-m?ovKztcck zjN>&WH=Ska+I>K{ZmHys;GFp`i4$hputQshr^{JbwtS=27HnO0LT{EOw+E zP-SQ>=lc~s{^?lJn?H+VO;;R?dMc4X~uHZS6alTo6a#>JCv{y4{Mh)u(!I3GgOTEkS?>DsTIB;n} z)_Vo#4^eX*nJGLaF9>h<&(C{J9IH#W%QL!FLvQ^eL1R{lktosTy6A%_QFL^5C#Gf5 zUZR{2js&UIv#0KFc`^Oqoy*xDbbrrv_g&(ULW+prZat9kwM6H5;oprV-ix5bCxlwg~>ze4X) zs-(+p)r+s>SVZO-IX>=ANC_rg6YmMX9G-`hKL=z9@s`M0P7q!$XcUGZlRPw6{8-xt^0G?LsobyzoIsguZ^Jj75n?Ocx%Vi^b z6c6KQCL=+l(eYQC>`fvzj=VytYSlX4Q(x1Vx;5q&-!E4pQYrKPsgMz3E}TSV+ag)w zMAQYIKl{JNurJ&QaI!G_od;dO!LIUiPE0wVxu4NZfW952g~TGZWs&f)BXhC-=ei<* zzVa1m%eCW*qu=AukzL>dPQWoRjL7^*g+O8!w!_r+Pa}Xq2s`sqi#7Ecwe+Sw^^(+TtNdrv zKii1uQBFRgpX3~0ZvwTP45kxoxdpUKj6^9gjG6Va=;)1D?Yj>GV4TbB(0aFQfS-v0 z&OS18WCXy@bExzh87v9G4M+L0D#4EdX{6a-yGWbG?K)4TC1u?(Sj-zjk3X0lT>qD{#j<5%lo3DcpsIK7gfHg_xF9If?6(ggMnofwW@HW}_-mn#lqW-i7N@Aoui|@=cvq%wlWC5>`yZQM zj?0w7FKbHknWS?R!|z1zzaXugSwtKZkJZoM;fuxwey{8|$fY+!4PQ7b7RI&b$%19y zX!d_c(jryHTx3O*J183RI+Mij(@(i*oQL$)ErY!dd-*C19<^snB5Vs{xjL~1vQ`k03UusA{>r^ z9Zi`jn>QK=sIO(;@&*TfNnd(wEMK0&wG|hQLl`X9@;~79EeUKoK~3iCm$WO=@4sSl z?xLRWv~wK4jX{M^cnTk>?m5w22>oPqg z+zhOA`#XSsZNH%Wx=NZ7Y1Fleu2r%kCdT15dBb87puBv?*H@g;0K_r8{o{8z!5q>b zocnon9j7%%+e19UiZ?cbut_#Hfrm(f&Q+hUc!LX9_P2N_K1()y?v{gU;v9TYf2#%G z^CWxHacFSgL{Z%LPwTJ*aUr@|FL^lH!1cwyrsEzXTrZb^=~7cej*`s3pQK5*M~D1+ zDRZR-%xk!AQ`i4X<29A>E_=0QOWf`MD+n}D5PU!4@D|;wKLMs#0+)J-&QN~-4gZU~ z|8BDd9}#6NEd8JH@iYl;&%(F;)ji`(y^j0HXB4aWvoQGoqXdMTUk~M-=W7o;ZhSaI z0)M5t_(#9E4X#JZ=;`ZAianNXIQ;(678KYr7i~^$RTP%J*pJFjY<*XV3^vBCYi*n{ zx(V9Fch#dg9QlGcS9S`XdM_d(#w-yuJb&q89R_rSiUvD0;nc$-;mYCsYxF>VTZL`S zCR6Xi2x+G%DuV{J@E%%)kBQLkU>I7SM=yGI%Gh`M+%A#Zj2-;XKzs)mz|ufBc(3vX z(ELRhk2|agy9f5a2^c(Gm3?_8Vv&343iU6AMba5jS`v(??!5+dC)iHyy=yk-5;V8o zl3WPx<<>V%<}dGp7y{_UmmBA{*4*;P8DM3(cbrw7-%`pb9wqgam}CAcVOL<;Ob+q( z&3P4)H^W_`hab~l+-X%U(_-q}>o(DFe@TQ(-`1x49v|D6(t`NRhE=y0oa zY)OB28F`ejeWR`}jF>N_4T}A7tIWz_^LHvty9T$qPe5(HOcthP#;vaGpw8c^FXaXH z-qnLc|J!V^m1OVvmP!5h@l@n|AW{6kyP!v}TwC)U&#PeyrFoH;2jPa2nh`xwIWYdB zklJ|MJkXRLgXGQ7%d4>9BOqS4GYDHbCu)uosWLJv<6|LrjIMCIgVbiFq1)c%bL#!k zV31sN5tv11r@m+qJ<{rw;y(Hn_3)`_l0!3vTguQs=lAa-;;|z01sJnd4|pkcZ??K6 zjQvuvFJ};czFSCt5(-5~2|b8WHH1FPa*laFt9!b_Ma{70OJKN@MRYi&p;b!OO1DJH z6ZNmSgoGYTnOP9`;cUoGs?p(eL_<0sGkOT(1I<)HlP%m=hz1kB2AM@P=Df?8*>}*9 zU;<=XYWnpoTpFdGO^uxQG>nCkloZzuH?@OvvxIm}m1zq{vvxicT5beS>9K#%}ZBI^SO|~JwB#zOx9tg5E z#>HzzO%Tj5(dT!&`OS^=j#2}d^-Ct1Fda5{zmv4#h|=eV$LFiQMrcnHscIC$_aY+) zeagprFa2lzk(>E1>UKcEZJl(P@9|Ekx2HhJCGgzXi0fFN6>)LsZ+j@0pQz*RvWHss zh!2F!$qQG}jlXoRW!QTRiI3-`%w)zy4x}Nq2hjeDBelNmv}$7@ugy641jCZ49pRS& ztIrK7Zw>IHlwPFKvq(xxnzJp($lNhI#wt&MC2WVO?pf^k@OS%EoH&7Q3M+wOv7NXHWVD zDql?Sa!-SoW&7cLft@MJ*Z1}&y5l1eK2zH9rAwH9{KQ4PevOQ52Lp?sYU2-T>C>BK zNN$oLz;c>gc%sx1o-fW#Zm_Xkmb? zo_2>#{=R?Fg^N4qv}7gEFB};MS(09q1uJfm#29&wGX#nmPjS{U+aI+&w2}wffUInW zV-Tf%L&u4s4y|WFy23ufVK!as89g{RP85}fSfYty^}Dn#8>r3*D2a>2&%mnO0}9#3 zQ@@~eA$C(*KVrE%*7MU&Ki|!8|AOiu*oedvs_(QKY7GA=;`HE2yc|@oj;l+y2yHm9 z;xoSLC?%tgb$fX`6q!YP0Y=^1^!Z%b6$Ta|zMbP-v(3+DZolfh4%Wb`lJFyJL-Jal zUZ7Q*Q@t4y$e8UK1C*uHW$l0y&_*?c9QXvr53Da_?&m6T+_n8PWR`I!PbO%1% zBpJp~0P zP?5Ko9x7dZ)92>w8K@qdD(S{571zbJdO!OzZMe^8RKKF4In0t_1>b0%9O^`NZG%cY z&|G3*Yh0FT2Rde1M6%-_uro^g0ptGkAINq+YIb2NK*2`{>gvYOVo!G_0`e~cY{Z2m z``5STy5rvHA@#K)C+>Z`ui5Xzp@9b3OFe(vH4jAi;5E^wfg<^CG*rz;V!q>kEWg5u z>@d!6e$t@bTJ-q%m-G{#i=6e_$yBsUZJ3Yn3180*1#sMwvwd)g-*863XwghsEOl#_ z5&e3fCyXK0nz@hVcqoW>7JvVFcgOPdlhDj1!PQF-oL`n{?~^ECMFC3bA$JW&W5Sbj zX!`q`i=e7(DJ-!G)@A#72_ML(tk%4dpx zZ`gRrHFDXsBx9e>UhA!4%AGDt-muxEJ8dmyVmG428t$YH>|t}?QF;A0A=vj`QejKl z{@HhrGsUP=K2mcNSG{aBT4Fh&6+CP7gdnf&yuDB_frDxl6xmV~u|?u|Do9DH6>dhe zy=I^qyM=6#q8QyLZZS{4I;6i^cEhtzdhM|X5bv!W*n8pA)1R}7)a@a>ZhR``sX}y9 zA5hw+w^~H#P5ZF@zL1;FDjKJ1Cni{OD%z~dA&7?AqvJe=2u!tEH+(%Y9*05p0u`kO zi)W~Gft(UXA}hj#G;BMLM@Rd{G}|Z#a&9+j`s%B?=YdFL@eH3#+K}m$f^Q=(UBTWP zsNKx_ZTdg#)%Rtu#71j04wqin?)x;MW4*tM>w8yKGC@=*-E{h_e=z3)#$+*~3Qg@g zA%yxYX%43CCbZH@wV$U;tpyok)wOan2FvOe(w<-Qf>ut2!Db?u=Gn4wLHkX0yNlHi z#qJAakC>QI5E4zu-Y@pixs_TvdPSv}qj*&EXI=pJ$zy>q3ToTDlI0dq&G$de&*?yr zGt9TaU4G0(TTaUd^U8Ls4j+qyPoLKApo`&YpOY76>gl$x`z?R)s?CAq-x)=Vd*3`< zeeu1}0{LUl-L~?XxN-${-h=sj$r%wpx{uXVRr>aGcC_lt8HToaB4jO)sA>yUp%m26 zsU%JI@>;b@ za?Wi3Tzq=KN6}6g_9SNU#dX!YefIL4HFz6H(t_ZoLD!1*{eh?n#y2{;!k4O309oLtUKF zl;C>itVJf9^oL(JMg0f+RI(z@sFYMIlnqci6ntMF4ZBcW@^M|qSuVDT*gIzzDEi+} zZAuDLjmSBgp3^pb?&<9g<&XCcTHJT7WV>puurVu1sb$!FK(07f4uIA(DckdAblKE6 zYKl5Icmz4#e$(&f&URlp$Nq!<*BO8NP?-77R9;tLQ822R_^x!J$<_T$Ks&=_8%eR= zrK%zxv6m@a8)=I8WAjU$#7B1-xs@F7_gQ}D@C(|cbOz5rVU^!Sk9T_S#P~?J-S#>Z z)$xx)E#DT=UT;%)Z=z}Xrt>$ux@HlrXt+(9GO$+YC$5l2q< z-ea67ms2VhVw^FgHdxr_6ql7U`gi_ zzYPb81~6w~MZVR_^OYYrw=mHhX)U3j}izPk*X~ww&N_5|rl+mNp5iE!nnf(RiM{({(?J z7vm%Qa>kg_SDiz9O52tMW#nBID54JSFm~;zj8r^%-FtTWmp4B}t!nAApv!r5djWa% zoo9R_@aXZ7(t_aU<%Qu|aunrwL|)5*(`668`o1gbG z-7u3$w|LrjowKsPLoJ(ZbVr-t76v0LyW^XF==s+){aRqD8}8qEc7J`MmeU%O%<3}l z^X)sz0#-?%=A@T-!`j?w7yh6s^-0qyPL?HyHZfjk$>fq1d|fG5IOJcz=CClc)AX}* zi`dw2rqx`0@BIBT!$&py2Z6@*m~cv|i!^ha>lYsCc(VNZl277Gf3aH2^qr=A^_@j5 z@z3p-oS=L(*%M+{LBD)C^(56kaGm_rKJLpU3y;|~X}(ARd}#LUbnxqyd}L->Rx;^P z_m#EhK2q^XbjbWztmiv^tBhw;qT>_4ksqpzzBJZLzY49dNL_I9d!;tpb$$C$%q$Ze zs)z`elkW=UbsPFoc0|Sght?E>(z5cSf^q^3Sb_WMMG5X^(?ezK{sW85*xx&km)A=3 zSt8Dly(~S>dEb#qv$to$FR@qfL!PvFEB~83YIJTE)0sO=ZCZ8$OWLn`hHieosqa_J z&m{kw*}t6=O-FY;*|@PUWaey|J+=60;1~Kcc6?O~j2?$uZfo=?U(w__nX~EOm>o5F zW1jS`mzL5Ihb-uyINr?5%X3=0-^eJ;QB_Yh%J;*-{)cM&&6(x0a_M;A#CvWxcaCUk zXcJD@Nqb)$Mk>`PsOx}?f) zg z00;hWT~M;|D#sz`Miw!QH~`0l-iM{rP*_#9)w<%H(4h+cqow`dBRCgP&+%SRU@0@W z#j@|eA?q;ME>0^5Ik0cuThXAH4XeK)tGtmer^C(e7%(Uoc%vpJx%aOJ9#)?zSuP1KySf2M+x#s|7q3>>oQ;{ z!=_xn5yUNv?+ltIh2yFwsqV(~m3-$3qQKHxUHvRohG)FkA!OBKq zWeMfWb+5BJA>Ee0YhoBWpF92iD9vNzHyCJh~NyFilk zk-H6vVj7%{zj&b^PPDao>*G^=pRsR@iReb56GJ32qz8&JD+-*j=Kvm_>YRZlMP@ZB zAYu;i7$&`vXVCLChpVz*p6@Y;62G5gCYJvn8gln&lkr;DK9>$M^?l85_nF;gd`eKw zy$B8H~V|x4-*e4kxoS4J|xx(%OS#a ztEu<`dX1WgcS;|bm4FV&IeR!i?DJa=sqIK|JLoHlC@gC>`p}XtM#eaZOR{XllL4>V z1X@v}o9xws?{Isxb_q}r^62IyO)Ww}`|3Ah<|CNdx+Udxg7te?I6AuU~cGUdl@ z{&cTym~~u!jjl5)8No`3Cc{pDe_X|#+Kn~i0YVlml!FHTnk^x(2J=; zRpSi%lxp6Ef6~e{B#qRSR?8U3L(wabw(0!R-T# zHTr56Xd5o2J{pHOK>#@LmSO9GUe2Lke!LG+s|t`R5QL6zH46ptKqh62TgoxqjEGwP z<&z{i%pXN+g6`Oj<;(Z}eG>Ckjx<`nLc5Y6e@>xnZ%k2FUILS{_1dc?j;WqRIb)oJ zio8ygPHqCm3ReiJkZ6pvzkOX3M<&)e$qa(k;8e?Kv1uHhOV{o}JBXTA6q5!yjLPXDP6yn3>>L9CbEg=cnZeS zCh^GL`=S4wVpGL+8MJ*C1>>h(+Z^{tWf6qNkm2zAOWwiAMMc@fZ%eN1GdChL{3sA z!eHRV2lM*Y;Dotx{7^v0Qelf=yOI0f_g$@KF5{pLn~L#!WS1X9li(BiMXvZ4qaH7d z@+b~CRXq4Rc;u27neJxWu76kU!x7h2xLebQG3xA=yE9I%1TOh1TRrPhN!`nbEW4KQ znW_e+FKKwAveV-4E{N6y;YWP+Y7;<+0+sEcA?<~w73rUtW(}Bp;hg%VB{ecw@NIR8 z6yI`pGK0m(^-!g`=Z~@}xM~lF%wmVe3@_3j0ZLPgVlF!17jfnMO7#zXeaS9gzSe(K z$}||{q&{uOmX2;5pEsQO?$?XNdd07nHC#>rDFGJp zQ^!v*=&F0=rCVn?A+EHyz$R-L4O;;af8>d@rTjobG=h?B@F3(%{Me3)wjMpayy&BQ z_DE8y4v&kK>|SKjFcifkvGmoCwN8y@jwSj|FsZP8)fp0O;fV(}`Wau`gU&-a(;@yf z%>`vzJA&ccQJiX%cT4rG*ZBII*-{#qTfYo(|O!GSu@%Cl^ zC&kl05B0=8h3jEY^ae1}M8P0lzM;!BJA;Hys9(sRKlz+ z?&1Irb}PZ5p{lj*cKZvkGt!%mepc9c(FX`>5=w&F1E82iZ11@fe|ju{E#qBBCQBLd z^R>P3is;K@^e_UD4;rG)t>qWYXeWxz z9Cz7??Y}gGxVs%HY(dG6*#r9|&IKlIsx~!x*Wm2MXU8TvLKtkkXSbia+ZkweAa%*& z1;zYDczPbR z!LxJZOWhH>wIbGSB4Tvj{eaTnGzPN) z-PS&R0`H5u`vw~lBP^01sC|Q<&RqCE!(bO954%^?mbAvk)=5*+m~(^y9Z}eSVb&{~79<=wxrS^Z#E_0V5ay diff --git a/fn/vendor/github.com/docker/docker/docs/extend/images/authz_deny.png b/fn/vendor/github.com/docker/docker/docs/extend/images/authz_deny.png deleted file mode 100644 index fa4a48584abb3db280b8226d18888cb0539de89d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27099 zcmdSBbySsY*Dnf)5~8$(2q@hlA|NS9OM`Tygn}SQH&QM-Rir~Y1VN-Fln`m8B_*Xh z_Po{i+k1awpYz`y`w4>|{)(SmnBm_`vP$p%`-!N8Fx9lYaf;>N>#OjT;s1Lx zpD;Aij7t}k<^H~tPiU0qzn?H4W4u7EAvq-JEXGqk>Gi z80oWdry&CguGZU=9&|Q?1@>}#Eo4UJn`31bFJBtT>(^}eDrlWvy=|))!rnq0)XyxV z#^yfhxo7oI=l#R`) z?Q5OqQKswG{N}J(#MdY~ndle|sWyh7U|Z2E$(U+M4Nq8->25k+U*8!r>iF^gk--?F zax(is{sU)wh4z*t!Dr@;e&^*vC^rS(87+K!>m>T|*}`QjuL*%nbHTg1a*WjRn8Wo= zWjY>LLPL9)(!XCwHuYiDHb(!WgPf^pdgtp~3ew?JEU$PiaG%WQCb|V(zJ}?Y z%kfh!ynnvoSW?pGgpE&M@-1tQn&kaqg+nZS>R1}DeTg8;v8qzxgV~tF{&#|c2Qh#Feh32>)UpQAunIX$j7lRR*`Y)mte)jbLcG{N6trxoib-BB*ddA zZpyEYR5%GJ+i<eGv#=TI>R^!uR?-PY7+XV6U;q3cY^EtGN4WB4W4;CBQ9h_{XoH=eh ztFLn1%Jx`q#B%suyN5d{-It@x^#YfaD6`&m>*3y%!@;J7>)yEYd7IT=M#et;E$=1` z?yB0w1c`ZGG>Pf$zE3SMExNbhRo3>?6_-FIR)da!hNf6b>g@hoALX|;p#k}hj zsJlh>lG>gItLLgQ#3d$LzVKctZF3x6`SCvg@sBc{jcUim^v~N%104=GEp)4%rp-O% z3Ob=Oq3ww0P^aKEe~Kq&(N1#h8yS}J+;_q_HQ)ZB^_tZWqZ$XBb50+}Ix3ADF=EPR zHr9WB&VyS}+0UjB&(3N$Q8RLFLHaVa@H<;rmtK>6K5Eh@Ap6-XU;uLvjtx8giyf;%WV|V4p z3=s!GOO^eM^gyk<^YZnZmfvZ2PWC4ApDlb_SjK^*s)5XVXdyn+3}WE z(iIL}?Slv!u?dH6*imk9_XgT%ybJO)^B;a3{rm>&La&TVK+DSpWwDdYzU|D&a4Ny{ zy5miS1hFIk&2M*pmkSIO8R#v%3|fAa;+Z{Ippy}ZPh%U`Qlq^;J(WwvY@E7Nu@#U$ zQf?JfwD7AZqmWlZ?kL*(bSp(`e`z3}tp>X@Swt_6U8mQflC4#%P}j^VQdW>8+n}Lchp?sL2MlR508JOpRC}c z@J065#>wDF24P@VWQ#Mw>x2Ki54K z#z)_hdc1t&dqhX{>b+`}&aK+C_k41;OAsFCS~q>vboYm`W5_AS0hC-)k{knEQr67R z{=O_Xj~BY$eFza^WL*+!%xo(9al#*-E;20(R$dKc-^8SzQQko%gAt~(vC|rUn`EQZvXD#_9ETz zLZ$O>!)x=0JIh+b;>yu_lcA(+x;my|{p-z#uWmPvOuxGM$T3p4!g|q3pP1UlwsO+z z)5rcc*gV*S>$Jnu{uQiS36YVh- zG3~JM*4a8Cj_rXwlp#!;p8}@iKfcYtiT4x39;AE!sM|Jc`&ULpJG=@PoiysZO+;Fe zv)WO7{fY3$lMal}F_DC+n$};w3o&`@ENMP*wZ7ZH-5S%-Z~>n{Saz(%z=TnTaLUAj zj9Dhw^;fchebZ*3Z_uaVak#&WYpa_+^(}q==cuWS9p>6eS1*1}4WjWHW}x477lo6$ zxz#PEx5mBFK!XZNRg7W%T3CE^!Q*pZZP!@Gqua)QJt3bOroJ+J?KggD!#)iCBoV*y zeyFYLKwY}VQoQuZ=!g4PLz>&#^rX2-Sv3gAtz3y~NIKu%d5~({g~hmD_ChqQ9#DXK zwsHz*p`cn|j@o30Y$|~+ zDzAGaKRsWatr#+V77|W&ky)Pk9!lE;9oI&an?t88^YU`O5+_cz0exlN?sEr)k!1_0 zJnY`ujGr!k*c@ii`|xW*L2Z>!Im=#Q zs-gwDx1Wq;7A9;&Com{nxrXPt`|43L;kJ?cIF!A$XuKwqSj$5f=H<6Zb#r!N%C=_? zzMo%45=;g2&}-K+S*J*^_GAR{%Q;8zn13MUH6%^q5x;>ITlXnB_h5JPChuKwg< z*xJhCaGXEI$&Zs05iXTfpIBB+`RBhUI1*V~u2#GX_4iBWvx<(>I62%YeD=(gE6Dzt zkm)FTw(9j^swa;urjMchm3g0ec|uuNrVw&`w^1gox{FmW;_WQ}*FehzL%5Ouf-qmi zwL94|+Agw^8DvL=g=-mJ-tgj zLc@hV&u|f!qHo?(=J81h+oAE$o$qxP#qVoVevFTItLJ-vOzgbNJT?0UOifAH}tjQ$>^Zkpl^1>I*sRclu~zd%V`C)uJ8Iw zbh}p5=+?7ZQUe9o=-DVacZbk(g-!|Knk9k;8j$W~kJIhKGI? z^_O}R+k_5Zd|CSv%eNj6t1S2?@L&&$l|JZA(64b( zjhH9AY>^~jpB~GiF3o+&$Hkx6dV4jbXXeiDnv5gi!R7T{|L4th(sU8Ro52BP6T21n zy$|DA9;CMQFB$p9-+cHvXreAia2N+g&eK%Zzx?BU&UnY1$G0RwErG;UNhpt*dtMX@ zV$T9t#CEx+M^?Ytj45L-*UGl2?I#bdwB;~7H8h`K__XPH_b>}Pcw zMKJZ->L<;WH!kPet_o()u2s!kCbJQ1>lAS@KE#VpD?w{vK7WBIG1Kbhs>wLSjM{y93*9M|b*!-@mvc3Z3Q{hvqwMo5h68iSuK_GVR& zj~|#`!l#o$;cjGIE59l2n&=kLcyTFN-}ibf53QelOr_UyG8x|Ub&)B?`>$nxHob5X z+P~W)i7%3wqr)T5=XXnJ`*-Dpn}YF}KNmw$(P|~9kpr$~?YULG>=j>XI27k~ zziD4exo~(}i>=!&1`TJQ5$)cS!SFY?Y_j8Ge{17OU_>eS*r1|DpR)_%1fb9cZ<$a% zTN@LsEo?I1-tJR=qfRYHYmM`?nY`kDM`)CIFHXbv*1D9-ECcUUB>^7I^=Pv%3i2@G z1kj)t@dO&ZDUqU&T*55j3=>JyUhwy89tv9@P1g3$h!ig;h8i}am$*ashfiV$qZyrH zG_^GRWrICRKB52nqoiiyRpg0(m@DlFjtY;o`^EQOf1ivFL=+F3>>PX`AfQrr&{OtyoG#~qw_H8&yz8w;C*QI@f82?QG!({7#X!Mbj1EX z*$12cVCdF5+r<70CwWV3JPO`so2J>{q{>n*ht_6BamF<3_kvIBu<0xD59P(wPGXJ= zHIBbCrUhpIINDies`FxuIhOn^nx(dr6XqHB!n1+w`k^}=0ZQ~2$BFE&LcHnr&U&~x zcdpKJ;dPJF9VSKVL0?yQCIZy%MQO?j1-VH}#)i|Gv1)xY$A^+Mf7e7#4>KOf zPb)y2YWn}*2MOXsH-?P7;}(+LmD&@yxBx4~QwciAj#oQbOgD!(Y|Yxdl*X?2ft^Gy$L00sz)O8&4ZX)%fY{2+zuvHDkBtJ* zul3={X#Dh?X<)9*)XyNALC>F2GSoP_*M-M#a_D{NKcEux8ha$_wA5dEn6sFZ9PW>W zKWOE;G=!ZZ>gk5iSYasqyk@O<2_EAqES$*_HL_nHX%f7=Ml3geMX4g7_o&tBpbob!CPBl?IRJIVMv7!yhU{3EsY(v- zslB8@xz8uNuWh*fE(W?(WLV2ear2=gOLnc%=b6^9s}9r6I0uV4Dch4p2A^iTZcjRf ze11vJm>SJ|74D=Ke4qsmlV1enzQ*N7+`Ug(cS=k?J9?7~JWUn7Ov(3!<=y0((q5-ogh=+5 ztfJ!f9S&u5nF#8_K9&bXNkY%&QJ1F+jp!Pp85A@E@hCI^ZV#kM202pgJqpdK@%tIC z*3Ftz7Fc#BFh{|%W^yyG+VkkSE1+Zq)|ELfs^w}GzV29-2hKY?YF9tdmp9WEk?=VP z|I_}Pmq2agLAp8L=My_c(D17U`0wUYesO!tOR^S4_47HS-P)s!zkQiRJBURqo1xgg{nM_R4901+BFT2b5E$c zuT0k0?@USPQTzZ>>-g)tgmQ(&mq-aQ+2w)!oSfSG~dF3tTp@^oAbC+Lbt#!nhU4+_Z;DndM7@6X8@(w?mX}BjxNx7k2`HQUagss z|FG*dV`7z9{xB8?U#=Gse`pUgb-}1BF}bh!S5i*>VLk2+{M*bVS4mi@LqS>U9n!v9 zpqddlm0fmmu%CdsZEwO|dBSa(1f3!D!S~G}uS>E3YjFHMVSu0qircnH zW6qPFeprbH$5!s{`<5bYmV}!T-p7(OX^ynSjEdjeGLN?wk{cDe5%H7z(Dw=YAwg_A zkj!iEn{`c&c2{@shzwmiFXo$J;!^|N=qb7vO0qF^HIev`yrP^l;O1?dj zNAFwJ+gJRP-(~vgsV#W;i-^7DG7_y7eY&$W;1fDqNBtcHwYQfCDXtrQdOk3OhzSbS zTk~CgYpbJ`m1AAHZGFmOPd@(qbBIhnIsdl(c&9Ky8$H^78eh-52~!Qz$cBfp5JV!& zE0Z2uVb865GeZpBk3nixB=9_*e#zx1aHqq#{2uUD$LdRzd@8r?CdxNK`^tH4^wA+} zZm`Dn+3Id17LB&8z`_1ZM1?yPbB;ank%`b)RLtMnQQWk%JfsOHRui^ttWISc<+Ib{ zgB{(ZBcarzlSjRu4=LKQilmd)C_dN0wp$we_>8fN^5#R7QU>g~_%}y#pr|+{y8o_y zU{L4rV_ffGX$SN=~)Yvh^P2Eo<^ebEgHo zR4aRU818PT+9C8a^du)t6fHRU?@Vr*Hvd9XH{i~=9N!>xNxd9l_r{dpImr!-Gq>OZ zEcK*~<7cRSGM2~kV;R&YGh21uUC{}{Q12y|8-vAgT5`Ju7_$;b_kU<$?%~wT)96Wf z+D7sr3RHw8b<~aPiC6SWjD5Xq+BEVElD$t|MK(`1+Guu2(#E+4N2%#Nv@=c1Bcg;b zZ8|}f?R#P=9rOHQqfn40^Fs!dG5>{W(X!j8i55*L^ufE2x|2O$GUVX|%_Q=eZ8MY5 zsUDkP1`*!KcDW9BhN$3xLB?1_%=^@1yplW48q=u>6<1HR?D^eDDGe{7+MW9Gt;8@Qsq<<~&o!ReCg2UPVQO{sLClcnNEH=>o7EqvUA zeJiU^#AIr;ZQL{VxkaqVgn#icgw-~4JK(&~c&F0Hpu=3_xY#?sIUOo~wD-F*meSB` ze6))p?X0$c#OUz`Mlz_`5@#(hiL=jQ*sk(F{(drE7jD@bz>~I^(b~26%P>KE*3G;-&a;p@Zsv-B=+T7D7P+^fU1(Ir^m%&t)^rzDa#C`NL9!fd`O=#Yun8;l{5 zR9#rO#tjRciR0_DYt)yAO2@E7He@eVmZ+k%&5_gN0iviODV@+hv=1W+XsvkU+zo6u zx3XVUjzckazoei%iMdYOgne-^EwQ@Ul3+Ek$2fvoWZO|MOFpjr78l+nV;^JstWgY$ zfGWL|*PDq6*E=;-P9xCbNx47!^;|;5kDcc#;)W0Cv58`@tu)5`wf&!w zbzW|$-h0Va7YUossGjy!&n4=^VN;XlzJ+t+qYVLkA+GzB*U{>2GHveMz*nNX)Ta5E(u{SO zeNVY58>rd$dKjotT=mCgim!NEes?wO9(QUa_l>5BJkCs!q?k70bNr=JJ0IA+D}l1V zFw)7%E$F>kF;pJ&LzJ-nL&?IEZyTmB>^w{h2?NQV9{}OndDQT|du)hx^{43~m9#ooS9CdYF|yLV5$e*$uN`3H)IUxQZl?=>T7 z-|p+lD8`xMQf63_pQG^QI2V7FIZBfDu{26NkZaQT5}{pDVtb>s!5C^^i4M=4+rXbo z(__0w#*4Uhlq=i7xHca2Njo>it+bix`vk3+a;`D=KGJe8>3~r&@rdkF20k<4`)>hJ zSET!0Mqj9skX)w@F4=sM>bV||CwWydw`l}_nVnqKSQqEb@6|Wik^@(&xv5W0NM4c^ zfK{~=aj7Al^aOkIv{{l}@Mu%moUBKQ+gQTgY`%5$1g$0D{+@AA*M7-m=})_heKMaU zb$q!7pN^A0aQIEZo?&brnn1lZJ%f647@%U3aiJ@KVrGFf4}bzg#-FEfU|S;Zgm zjTNOr&@kl2g^BV+xQE|-c>42h?OCoyt{SUZD~c5WT%KXA8}u!?N{@2`o)J+;6SV#K z?!|^~iwv5e=+~0v%SPSE1EJCy8YAbx?W5qaJ!=yhvL#TkJrma9&D8`QW>tpI-Gcf_ zRzIu1|9LQl+c&mxW$$kFZhcjifM%VC>kE;jC8WBWuHd6L5fKr|E^T`8Y}cCy zzpu4Yi*aK;_~cUkC~26^5V0mk1bz<|zEPNzqDVSj*Y`>ET3x)_AmkI|z1B^FiIW+Q zs9rm*@6ji(c+E!1@I8pFu%Btw3SaAHWxA}?O~aN(pPVX8(v8{MAjoof)h!J@d*yG`u7_JWsR2g#+igB!%cNnB_pkl<*)ep&j+@MNcWK2E5GLZDv+}jb<#%R zJzT&eL>48AFnaLr1mL1-u(q1NH5(T31`jG5bs5zqP@!g^j^^89O~n?|CMn&I&a<^NR^rWCT-#XsPZr?+ z^@;!2AEeb0yE9O1B(@N{NbGHJsmNJJEm|4%%)XHF>Kyp}|D=Q+EM6*n)UHf3HwXO*Vu2b#|U7EB!jiC1oh za*F6kEjUJg_*dKy*}!Gr-;}ul{=P#94H+3zh4n!6`HNS^qMhjl`U`Z_`RyjCaPC=9 zAYqAy7Z9Ad7@ldMm?WUO`+KtZ3_vI&zVNz_Q|-ep%8fZYhZE=ug)SWfgWHmmXK z3VKFHd4#{xoryd`V$6KX zeyTcv`px&&#xX1gGv$aybX!KKv%NtId6#2~Pc6(`s9O<@7(^gHv%J0I91k71S3R23 z79g*aB5~*yD{*^jK?kWktwL7Mquqgxh0a$QadWUA=fG{Tmm?+v8M?kD?DC(g!-;8o zF~Kk3nIN~D8&E@o(EYJz-5>R_X3~KlMROVmz3qQ-kt|c(7lYTTm%i~`pB%T-%cvPB zwG5Jhc$=+P?S7?#O&7m5{;31}ZiHVuv|V$??hP_7-v^c>z+m;UJ4I}gr)#3ty`v{X zX6Icw$UwXR4dmVo*6$TWh#tzSh$;%I|G7Y8e=g7sc+2?BxsFc(ozV<0JDj*Vr9mml zq35Xx*{)@giZKF}V|x)^a1Ka1hS_yGP;9=d=OqrZt7Qh;54J(}<&*v>7x#VKjrfPq zuFRMEbKNxV7&o993_4G^$_PDQu~crKIT$R|%X(03h{z|bG7$m4WHIO?Ut=D-w^L7qh8nqr|NaGDv(c$dh2SJV?8HUeNQfd?$!SOS__pclUSw+4Jimi<-d{b8xx3 zHg5;soahZGC2Yr?3tar+b#f>Nno2LpQ3{6P3C$qmte8K{_}8TktQyKn4%i0 zTjmvCzIJxzEG-;0+R5vZfnPu2yeG3`m4_7>s_?^$c5u<*+Rz4Yu=LWzgHVQLp^*F*HI& z(R$uM1-J)lem1l1!+9ki`JT7f#ZU;<-oT$)SBm2;C})v>4(rZ~5hn+s8*ipHWH9}e z(P4=<2B+?FyBh!iDQb}Wzu3GF=CX$f(t#d|a5A(2e$C~Z_@j*+^e)$1%V3dC`Ct5# zi^G<|pyCNM%a?L}^^PQ?##cv#DEKK{IKl}ZcF{jcHu0yho(l;~>VJ)iKab{q_W|!V z#;h&;1>b_oEMX|0B(84=(yYXZ;CM2kzYf{#Rp7$$8hHpby!C6bS=Z}Z76286Rsi8T zQiA!j$5vGDyW&bE@Kk)k{&3sMJHs81ER;{=p_I&@ZGR6IAs67i*vs&W|H*6Nt$v_xy$24n zUyx`;92CKeUWV;@f`y8%cM&V*r<8b$4T?(Y1aoRs&5 z`ynsY50NzX%Iy1Z^5BCmEn}R=eFyc&Fa=!KNuiY(Wzk%vB!M)r0J7oQWq_QU2L@*b z?iKcT6JAG8oFE5LVb#Z|nJG)S11Fu8xENaZ63F(fJUpr(ENXWp^322wBaL`Vz-cM= z8{LBaY%6Tg3K^dS@b|Eq98gl zDICe^H8L08eSzyX3waptug3>l|LHQaAc$u{8lxBTRE@uSu{T?V9!=D^W&sf@1LbdK zZexGkc@igEt5h_Uuhh26VNPDefnC3<CpGp*zNbB(4BL+8pFasQY-s$t~g` z zo)>tnlX4l3g@!G*1K#)|c6OlL(dFf5bhDS z>y&Z{oNqx+?$dV@M*1|MmZfQD5H^-=VhNYC6Q{wDCaCV56~|1a(q`qOm$r* z>pbJtQ|F|lj1F`|g1+Kp=b31{IJE~0p#6c7XI4(u=S&y&3&0UlpvIqITKJiCG=V3? z2sf>Q`~^lm5;iN&m*BaZD_)|M$fC~ZX?t70k@*WvWA%0E$W}j6`w~%{IZLDZ?bvvA zYL(CuwTg?7-?$ zCws)-)IBG>J8Z@a$-G6_-{o3SJ2!maF#!5D?lYA-uW;YF@g2@!hKi}Zz5OPrkjsSh zv7I277w*V5FZX7>tbv#augCt=J#+n{()oS(Fdc||eIA!26470khKji$jPgN2K$%R* z)mZm=Px?Jxn?d$Mv=p}`77_UZ-HJS6=ukrVs4F~v#|H~3JG%7jX4jRhOwEJ&HZXix z`oI&@INd4sUKCTdcu86bRjM5h_x3MAt)))4XmLIQ*>j^AmhXZqg&N4G|I&@$9ONfW z!D!h}GSX~Bk5=BhS(6?au&^l5>K>}JtC`u-HW1Psq2mNZiUWc&2a))Cr>`7Tc1)xN zm4xt$K+d^A_cs8#$DBwC8rvBP_0NgB>?txB+}M6;#4iw=+3!B|wPC4OhF@^RS>`(u zlp!WewONyh4jp0z*bJ(cKjIgqB#Y_Qi*=o5XO`kp4?HdNW-Hz zm?<3>of{8t3{tQ_8H|#ImvtCn%~r5}5DM-eiu_86>er}*Uc_=$W(zFaxrqEp+ zNh%5Qr7?Qsd?w|?zC91y*A+Zz))aUER_tRnuoSHZ^5V9C^+=vI$qOXh9_eVc%L21L zsK^T%Z#aj#5~D8`jmP)p;8O71j=4Q}DQ*hkL^c#+T6#QVBd!j>@jcjDSf-J0wY#VG z2LNZm2hAq2nkVK)e>|1rfJc6n4$uXC4BQ`HbY3@092LR9crJCgnpJUVq%(zzzla`U zzN7kFAfZh~6zl*OSaqVIM@@va&C*J4&>yoY1^#lsl2Bn^4CDD6Hj@XM&!3*t$Ot?A zak?T`CML?E`8N->n3r&-D~~-9t?w{>gD0x=#~Pu54RhN$tjk{@fqJ8wnq3%fuL}_b z8H7h)WNk{IwQu-hQLht*PW(Yn0)R{!UhzNyLVyAZ02tJCtHl8Fp?okgkRXD<&DOtJ z{a{mMnPXK2lz*Q*1qJKeqgF8ck#498N&-*l)dbCdS&1b008^dh{(l1wp~y|MpDDBc z`($4PC?!#tA#VyehzAzIY^$#o_V>vM*73b*hWD3RGb10lC9ESaN<08l7e*BX}Y1ax5;18R%C?Phym9rczE4Amp*IkdUL*fU6>TFMcsl#8=pLQx@vS zjuC`<8KAQaI)OLTl2D(UDxwb+)M9p&t_1O+WXiZ!Ai8(P|u?x2vLDQu_Pvvf!uxt5rh>XJX!`5DsaU+aUSnOxy_Ig_jEJOAbHep7|OuRP-1HA z=a3F(dr(Aj{A=^7UCniH84^H-SAf=M0WcCtIDVW7zk|6bnMw~Vb0#)(P83wpHa^ul z@Zy$K7a^Ey{_A@h2E&!d8^|SkoM0t_$c3cLNkfQ>p?`ZTJw3!(3Dk5fp zEU{ch6Im`gW#;xDFSoWpQhO(sKAS(P?)D4IO;AY_MPX2&m`cb|)|lOV7UB9cBozLXq8}dbUvHw`b2L5q^uPo&mUopg-4#+aR?c4S=jTdatsG!v5m;3dBn@_ z(OGds$VCc!BMJiO!K&qJCNB3e&Va98-El6Wr>Fm=n1zPM_TyHIpAy!zb-69f_?l2HZwaYT0=v_TR3J^%F~5-6Pp`aqRI#R4jB&H z5Vi6plFjH$7fp$!T`Lg#Dn zJARpP!X8CJRaweODXE*Kk@S&5nCqo}vP7R8P?-+z3x3}7QP2H3K+WEDhP_`VPtQ!W z9)y-NbCTA~y@n|ex1WFm=%f9o{LH%D};%Mi^b3%nbpNiuFS_w-LRWA9DgTMW;DZb$x zzA!CIA^h(1hdU+4f4duPWN&*^Xw*sg-*&cwlbPv|X^MBW)_?)m0veu=xI9bSXfOy1 zf7)zQ92)zJ@hk}EY<{7O_%d8-mIdMZwTZ8F*`|v8cNzcmS9$_m3}otTwmoh+KJ(`X z_dQ#kIuAk884kx^|DI5R*&0FfTl;oaEVRSc9x*{`NRM6?Y5hAF7c7Ld@1e{Bg1_w> z!Dfs>)H8pVg!#4s0zca_)BedNDL88XxqhZ^#6h>x7v-e>pQ>;(e_6BBVh=a z%kyyC0Vcv^A!O*d|GVx;iTluFF)IPnOC=o|Yd>Av9|&yXH4}Eb+z1l$S?+slytZQk z63Gx|8KC(j&7|9Q$z%3y*A8=12(D$c2stfP zWjQWes4d&u$g}*u+mg81`3lrI@c^O*4d!f4WcXl+l*jsWY>ELyPO`bu8sy{It@e6{ zK&9`?5K;U(G6$KacmxCf6PFLEhW4+5?Sm*;FeAkT$0-gd>0-}~Y-6#bm78x-x5eB= zcb}Kt$kb$$@Ncq|A56D1NvYK>L>(f@f z*WAVqd06e%?G~hbFO&sL6-}l(p|;TSW0za?Nk3m3%ME*)D!|=N#bX+9`hjV1TPa19 z1LiN&^y|G0u1*^lazh*KO}}?hgc51VwA8T#B$J3zT`6I5?B_^$rByWXZi`^u)uq{m zmsOR*rFFr#)CzLa3qZfFAQ3er;jR$=uO4lnJiEYg-^c=Fr;H-H-J98=@5m$f_g%oI zt0#YFwXLCw+3HS0BrD`AeiT`}Ak~VZ7yv=&+Qo9vdpfV@!T6_+2>|dc>!G5;vPa!T zz!{4`tVxI1TkeVX!5o<;=oPC-Nw@{Xn{5K zavq3TtPpR@gGa5wNfx5+b=rJSbUGQ5QzkSQ!i-4mJD^|ZSofsA0ROz25(Yz*oaNKTc6O4#b=Mjhak7qjREIPRf;7w$1Nzr5=vpmp4F>=5A7l6fbUHK4zx>A%;Xhr4 z9o7HR;0Vt$Hb?!d`CWi^GCn%#-2VGyAB4c5P(ZDb-je?+eAXySm~}uF4vF9WWB;;wiu?D=;C}$F!dj zi6-;khoKw)I3iH`Q%%ij{t`u6^s8X+UByfO+giYJraSu6_n`F+L)RHGUXT6P`e42r z>+f1+|1ZsV)BWjS1t}LH$@~_Yrvl6ma{_aFxe{8s4Yz;=PzujI`kE~Vl+0Lvciu8m(^DtHCLyLc=CT(Rgt^INA8;>p6UnJ~uc03inj2r14XacGzp z{)$ALF7^OJ%7pNNo78_)>D|%yk0JmIsv}b~Na&Y@MNLY`d5s-HUf8_vA*1$6(hpFg-&`7 z-1B1CEM(_yg(b6wPW|}J4je@lPQ4ROB#)W&pX|wPeLPA&*>T80kDNfO$}?{CTLNuc z3uvAuoLDs=MXHltM}>zl*-N^)+Lsei4P(>tii#TGFls^HQ~^Gvg2cDMv=~|hm~r3Y zYjjI>9fSHC|4LzSesi}x=HebWdI7U>`gjVj?N|o{yk~B_#KfPo4|qtN`xr3%VFyhu z&k2vM`>WwXsa{C5bms_Kpa#^30Wd{kD|tB~5)ZSrYS+ZP^bMe#rWM?O&DUe)wso!> zMlIffyLau%5b$g@5EoP-;r9S^M{5xBm%weX2HwvKPtWr{J2mW>R+FG1t2>O0__ZBw zf-#XIs)`X9%Ckw?J1Tieo(GBdK$qeqeWr#&#{ZemqLCDhA2jKxC(1oiTAJqf$7`VH76Rls=!hfvcAX6VaTD5#&5~ z#~~^lZ^~zHfRTGvOpavfYa1@<4;AV?I^Pw?mT(9${2B*eF+*IW6o0TC&$At?GHKbH z4?0$%!6x|)cT_WuRnt)jR9p?vT`PMV<&aFB>2_-egI~l&JI^mN8g6X@zN2?VZde@B z6DVT9%_LR)^j5@OOIh$RX1CO=ZM*z>lWE}wfC9I}Z_Ty#Gl9nP1K8R8VGY3xAV42( z=H?_v!(ClssYj%F@TC>Gi)Fkwn;nY{I(NW-L0qNhT%NUdlcF760TonZO2z@0a&LCe zw3Ny$Iv&38xw^Dah=S7?ldGQ7jwDF?Z9sCFd+R(wsQA)tOCd$HHrX2HM$^B20_(@& z^w?G8lE+j7I*VaVmFDH7SZ39`M(%7c6)+7W#e<v=IB?C5RBx!ld$tX+}my+}I4AqcY(!B|tRvFmAC4TM4(_wAb}9Rn128(luO@ia(P z3BsaTl0laTvGLHo_J3mifN!B79rTJ$5(YJ%?vC>s3He|Hwj~U;@tQVc`5;Wt)6saSA&dhRyKVnUbK6@hg!7&=!>L^&4l2BoMb~Rh6OKwS z1_#}W)O8B{j_m!a)nsW|FnejEGX_ya5&vxUZ9eT-4VI;R-$e<>`*w6K@E2JwZ@j=( z+JrfKy-`v{Nn_ooFlyw+XEo;H$mru_SdcRDtXo8jR`I@Bz*vS1eg}sjQ#L*W)1}t1 z`xr^`VO`H7f_2^1Ls<;aYqU)aHo;#TaKAjh)z*fY?6E25{K)VQ0b&$Ry!mMI`NEUq z_n>y@)Vftn7%dUJ_(mUT3LAP2|shA~$n37lRY(jmrU-hqZyZHW;h0y!l$A#oi z46h)WGdV4d^$3HMO)K{2=O6{>sXLTUK{+}fU-x|3cwu<^y6<-)kc2D`cz5WDumiI! zh~7UeXTCvjj_samzwS?UQl+dwWxW!5TTE(?b%`ejzvdqsn1&JJi~i9gjGai5BgK6H z-<-p2onnzO1`E*bkZ2U+kL@3+1`nu*@@?THqVcb*3tm8lA)qH(4!Zd(GzyV&P}e0p z5^}*M8G{bLp~dKka}z@&++>iJ{W6Y}Afhd)60k*fJsv&qyjqy?uFW?|%+y0<1#0N}u5qNxNs)A!?a)q^VG`{yqm zyIuH?(L&d1Ol;xCa`&&=E*n*oh=cqAB3KvhK78``OJ_U)PHdwG{}JNFF-q^j0zQnq zsrU;U*Cjwb!J9WO2ae3^B%ver7#kQcR=;1JVg6 zil1YWLEmTH?+^Y%p`~mZQu6)d*$MSvge{Z*H+J2dqfC>q4CQ&r^Et5GIH(h{h&qkX z?8~RJn*X|kzu{nAMa(}3O7!gZ7GBg9HD&JtisxStv`Dd8!dQl9`rsN5&O3m_6@dIS=V-ZCqsHRzL zI4+OH1as28p*?y!`ZxCNe@;+)NtwW!ufv-C{K?P>f)8%Bf&(M>{$ZE!4gvIm)YL*SfxF@zLjk!YCmzwWO1hW|dJQqsGvBDJj0Br$J8<*sS<$3VYt z>nxhV+{gBx_5M8_O^M=)Y|?mD>4L3n{;%m4CVEI;rbR;;U8WIb2MpGGKm`svn<(lz9HAna&&)F5qF30fz$O#j zn|Fp7&sP`)<45yD?*^<5o$1!$qvT{zrjXuFtvdZErS5Qv4bjydLu%s~d{}vyk&2Va z>5G~uQq_i&Z7WPp}t)KRT@}nQ!04AMn-mWA4^tH}2zM-c`SSHWl(}(tC zkxzzL&AK$GN5U^E7M`H&K$gmRYclJ1TxmCHd%gpvRe3K+a&IGv%cg2Gs3jUOmq$8? z6E(D4TwiqQkxB~qH2HAHmicN9;CqP-L738=140r9p^qJ~YKH6Oi#99P_?wzy8m62@xQkEkty-vLwEWF)5mR(XhK(am6cY_tXT)% zs3~4<)(&-F>St-3=!D>mFzN)I6^zknTi*`f}h;%@*Y|}qi75q#Xme?XJx7reH zVUm*c$$%-6-!@GY?__QTYwimm(AseFkJ+T6&KO2&O*tIeL`1>2FnHd#27AJ6Ap3Vc zQq#gH9iu<00QwnDGs+Y*8jIS+r@iN_t}*hD^bmt-D$kStBA3dnEaiPK@~f%azKQB6 zTFJRz&S%ErfBK6c22U<|PrF7q&(M&L3G%D;m*SnQ(%;h@w(}#n|7gk0B)*ASC`+ph zDzm%+9i6}LDTBdNOfGSuolvYx>Tj>2^`-ZgO1fGiIqK@+`Pk=JvO|nPZEv8puDwBY z>LtO6C!wq@&32qu<{{H^$L)asQQddHQ~AgLm&|0oDcdm$;n<<z ztYh!&y;s>QTU!d*LK(^4gwONteO=$b;QPbp^MjwxIoEYN_v?PWo{xDLURdqz`QJXt zp2?&xJ{*U)b^d)@Ix&7J#r1sFx2rJh{1~rZ?@2SsV=W94gNDuZ_TzeHtdh>%myNm6 z!xbjH(9K19>kCiDJfxaM#j8YpLB9kz`%)PWksp8_6TqOI@6niS{)@_Eej{ zZcQnmwP{G(ZPBNw_4Y7(7V}TQ{|lc7uwsHN6#JxM^n_kJ4Ie;MDL@1xRP~c)ez)*8 zn*ny);j$D^wz!&b9ab^-mlDt>_YrsatJ|K)UVidtOY>N36E6ELh>mDL#5Rv;qYerv zVbJw~Gw-m+6(~Swz;mT$oVCOq4c4oT3oMm;C-7+gguhteACfG_j>Mz@+_G_Lq(vj8 zEvJAfR1pp!Nunjh9Perj_sc-PGv>hqqef>2#i3|wgf z*b)*XU95KC-3p~+;89oZbMh_lc*jfkglDJB`KlfFQIvK@ z%q+__?P5sCBq7J;=0d*!f;531Xl$vV;nh=XgYGry^6Nf-JS4sLtHNJ5BBUt|j5~s4 z$|U4xai5T~h|0FZ5~SGWgQ%{fmi5&cWejNNvAp`|rSba8wgvDLm03G=R+n*q1xN_| zi|GKz1bpTW(7dbq_fo+G2F*x`h}ul`(kv!`CZxXv<$pn5*gRJnzPBq#&r13YkZM>; zU)=cjNym$`EFY2|?QmwyuK-ueaeA`%IvoxAQ&Fp(qkjqu=_gPMUy=++dtUUg`u;I*$=w}!?d!YT5m+bu5?qx_K>ubudLPXe z=o5_$7Mh-^g7b9BMp^0YwJ=Uy{|NM&3Y@z;gXn^eb`}9GgFqj!+Y<=ruR8x85qb)` zPCjt=0WaA;A{-tVJdFbm$9K^&Ioo(EjZo=l3k8T-r-2`VOp>}vUbxc$sCfAZ&x!41 zLd3xG_gHP|6YVm4-~f#fw4E7`3`Lq!8_#@va6t2|HZ7wwoqnH%uv3=iQ)=I3o7vT= zRwEdfi!v_*;->AMhBF+k4`;f%pp&m&HN|}!tz*u|G`aVr4C=S82;%w2@`5v=9+SxW z(T`3nJ-B7>R0cWCTm zz@|tqjje&?>#al>>21Pco_UXGGYA-c+DUxgF4JR6{3Y->#bBuOf}IC3_ChqE8PSCh z6{!&vCwvOQ=KZg`yXynk$N}~rtf%|IG73v_h_)A{afE%tF!Q7k8tV;Rr9r5J& z+GW+~-mx{%KN$!L@`Pd=;9^WZW44Jqbgpm~+xy?k0nW2HM_?wB=P?KD%!7q=w-f~V z>%hr%JRDi9(<}uoZ`dCQf%}U`q1mjgURr6tO1Z~|mBWpAIC&fhn(@!LD=QxY+{jDs zez-%|@%Tcw_XCGNLhql^pDPRclp%d|QB~(3@;)KU-o<;|N;&&o#N-@`Y8whj_m;ty zYGgas2Uv(E2lJ~##J`@=^ZH&MKFWHXra+H&+mjl3^j!LuV3EHQ(Jq?sX4O2b{>$E7 zm^*_eReMx+50g*(XtHCdFW*Vl_7)ZAWm&C8!}@bEM_OH%mcc9a4xCmMW-L|HR7Q5F zrly3%{ zgkR!PJLS>`6*}vv*_(MFkHk{p>+OituLG^<>S3bvhP*t_5pE?uznem<;Qy}Df2*>7 zEqvheaF`d#a1ClCSRA~#ecwI4z6#eSau?pO?u*!UDNB<3@G;!21)llk*8KT7QiJzm zVSR$CkBlT*IS*bK3Ub`%T2_3e6tSb#-0eI*mv6l~(a1nFd>}<)ibd3oZv#>XTzwx~ zwi^n~52Ky>t_ui0}-pv>G!{?qOzlmV;%-cz6rV7hO3hZnQ z!xQsdy6QK`({ljQaLZLC*LCFh5O(}7hn(#CR`cql4$ZI`?=d81w2^$5e@1n&?52=g z=m2VrEVqM28oRhu2NbcbohcndJ8+nH>IvQvJJrjG{S*?onOvL&ED;|s<~czwBOQMu zsp)<5AJ2+i=wLo~txKU7beb@eze8BQKoGD79VP(-~6OPt;WJ zPr~?y%B-~$iy9`{m`$KQN;od(uxh&72XyX)*5wwRWWzWksU=Ex{LbEeLha(@U+wp4 zciVv=aX?pLw8%ufe@l1kk>J$4ND5{!qJGi_tGTLDbdV2u2=iyybgKW#K}xYyVf{_d z5oOt6;qERQ@Z4cPl4Y(0(#>6{Nio4VpN!d6+H$?}W{p4HY-+iCsg23-h z&@6uS`(La0KkVaH#NRUt%&xnvu{4RU2+z5`&{{obsF|bb= z3nc_bG&Rs~q$|8m~-etE+C*k176>Zd!=5li@NKu;qB$Yq<&QJgRTC!G9E$gtMU`~4t^|k47 zarNL5ephp;KV94x1q`G;cL!M+{ciCdQnek21{*l<{(Sldos*7f57_Pf5K(`0HjE^7 zwSzo$!yFu-h*IVr{1VTsY^I5~;>VPTfl|jC8Ug9~p7$g`7Y*&ihR6A$R{aO51fo5r zzMo_Bi0Ny^Q#Es8ZBGuNK8qjQ^psP_6H_H_58Z9pQz!z?*8%-r7w9@&$m7Ae75C(T zu;hq^W3N@dh;`Sr6$&6q6ho=80(vp*w{(C49s*2aQsS^n)wnj)DT5wFIB@g~GIT(W zCcBF05$vC6pSO+c{|(21qiaE~E50C_m3SR&p7)fH_U70Dkb!7n2us&g4vb3{Aq7hr z+PEQW&vxdh1gX&oi=C(wJQ~(3GceB&jRQ~3vh$qHC9l&>Fy!moxOn}u1xrvL`cHFC zGAg8vv+NzBQscM&wzLR7?AyB)107xCxVXMK;2GR9ZyUIrf*XM5v5$v@@JAj$K0(b^WDx*A6(#xvTZ!}PbI zsvzr@!Nb4-pAKb?&Z;19C-d5fB^wd@$LBAEd*X70W;3Ap(n({8<0wkxq{`6J70v!P z&5aT!#!zt7#imRLi{-?8OZlt*w}U(3fAx$04~|(l+CFKhNaOv;#QyHOY`yF6 zXiJjF@d7&ctP68U@Rt9Y@_{;eex^{1jD+PsDneOK5qu3~|Nngr&uIN23q>kpTm=}a z>Z-oPMJq@)YeAK~Nev>mJ%Iw_+Y%SWa^5NS(B1pk(}@@`{H*a{vyFxEP8E_@(nMLM z)A+GFDiW_<%>Pn6DF0|`KMUXx9hJ9261pWVa0!W4(z%K4Fst(ezxeXkH&C3BIX(4r zN#Pl=;Ud2g;3jSTiHXOkt_=T1T0+OE#vgUo4hrZiMgShdF8~nFcbJ9)_#JH$C%`ki zl_W7cJp@qzlAqdSg&Tg>M6(UM{s17n?fL`2 zQ`z?KuL+LZiESx_$iskC!!8WK7|qb(0NSp)7^PRth=7Z>isHy2!r7XCVJD{Ao zHA%3@c#VWD%Oo`U9Aj-qsU*p1IK{gx9p6P9h?rScNaDPFu+(3Stb?O0WX97 z0NXtQ(|aQf(vdIXokMeA@hgh@1I=C1$Kt8>Z3Z^UtMIm8(6O(LSpS~%%bvDi0Zr;w zbv54H68G*dg>JA%Ukf6}ARGV;U_#T;YO^jdrEamIp`mvab%4tr1kHO8$WmRny*glQ zuSHT^y727}=G+P&got|i;oe#$yr}^3Yah4fd&5AA7z&0U_gf3tOU!;uH2vbN9OBp9 zQfg9qPR?BN-maoG-{GAptf};2*jU{r5rH!oVlXDiPaTv1e*~AMj))pR!I=WlON}E> z)$#b_w_KSzfQR&9f$BpWeAPRI88LC+2T4X_FA(OyQUs)z8@^Hk9YIAJ$xvm!PZB7F z);klo-y`>ch*DS2kTUG)=_y7!*^gLdybKD1uvuilV=arQYwrPuxGIPmDe*&O-3oOWl!pL4ZQqngo|(9JU_>ruvq9PBv5}L2A8(1Y`oSQzeUmoK z3C_aAQ?Oi=3tN3CT9?ZN3JW2<4*_-5$j%~E%+bLeMyHiKG$cqsHqm(_pUykqfu$2n z^!4v#AbO@-`$*S2V!vns91Z-ZaOaIbrb%k^q8yCDcjKu#n!SkAf+lxxS`;B%jY%tZ z=$$n@4<&GfA09%kRf*F+3l) z*5)R(pK7!?9YMpH4j{Q92tt(GaNle?{Wi7JqOcdV!SVWCuZ3_Nd)Q~3jYaI;N3muR ze7Kj@nA<03d3rh{Bh(%lQ1V>M;}*v8$c7Q}Dg{0^#AIJst?&POG*MwYabr!KaV@Q| zVIhOH-}}?T#GaH(#)8Pyj|U}3M$K7@^K=Jk=t^pTvoJY(`*IIY&wmyc7A$;yGZpNi zWrLlP_%1Q+Ovm9NHYPT9Gxs=WChS{eW25AGmxm7@7GGUwlco+0Nlj%m7fvb47)@*_ zE-TBsYbU`}5RFps*{>d7>HO&&IyGe)laYa@ymE_p<;EoDF?$(~60NGMOB-~wZ@Ts< zFVnvN{d?^pR9?uAKgug5E{@}a)vK|o(NUF$&uDiZ_l?+$Gm{f7=_DNU81P*jn4T7B zF|Cxiaf8D+GAb(Qqg5`8hklt}sh>UH(9n>Don5%Y(*3)4v!)oAencj-k`60+W+o;U zJpVv7cKn3-sg&7{BO^UMn{-M_%7h69y)RCAK1k8WM`p-waR2dnZD3+TyS24t;q0u%#Kc6*G1)$|wG}t?MNCXgw>4_#)JTCV zJjSQar1T%lnSHi)?qQM7R(Yv3ipK$M1_2WsX;+v;r-JM1t~oh7D|lqfuH$viMrOop?yq{o*Jp5Kep4{aAv%Brru zx9=t6j6*&AzOcBcTvk~bUQlqw%}RDsQ}<6HWz@;+tcAZe-?f^~PKCp>PWWbGpPvlt z>+6>shKG|nucChaURi;CjfDL(E-H$2yNiUfkeOpr;&QfZMj!PB!bastQgKa9%}2}{ z%zR3Rjnn_?r|qwIiT8{b6H46q{d<;; ztu0$$FZnsH+i5;#W@ba?j%iqnJZ*=^W!GBWaI2NUj2h|5;qD*TJ9s1|Q>MyoOq?c5 zc0fm# zGZtGCpOC<_va*u*+(m(mj7+iUsfWj1Ch}lmQBlPee>uNRSJWpISBMwAGWQ#8h9yGD z;+&j-j3B0;o12@LHp#~150@4fv%L6~g-{|+Ed9!|$BNjL^mJWeVPTyc(VpE> z&CShA$8x?kN%8T~>1u5xK?+i2J<8T)jo$_bxh*X%OUUEz5e*pKuCbfum!4pcAs!`D zljrPAi?JqBA=#{VH^*^KQ@U;@DSqGM@QWZ%y+V5J-@#k&HFsUEr+1@=WH?xk>+9>o z-v4ATd?1_>a{1j468QZMLIQgHNLvi%J88@VEcV4USqa6*0U{zIB_!CUfu+^eNP}$D zmLC&d9q)}xHEX|rbL*8Ryph2~X{f7v+kX4nU+23(m(7a%Ha~CWY%D1$d0ttF4?fAo zM9$mJ#L3(|sev=DGg+qVnC)(ntHT>DDlJttG&0Jy7B-?UGr$o%->*6odqE~k&O%a& zBfWaQVu5ah32wSi{kCIsTiX>i|M1brdhNXgMzov0zwdOmH$*1Bj8l|jM^Wt)QMK>U z=EV-7fbai#n_)Jes=qyv$qFivHO`FbB)MrEGI@!-8@;UeK z-^Abla@yPbeBDE;bH$yD@5)%Hs$R)OGQ&hzP@H%qerHBX_QyUfE*32=FaI+-I=cNW z7>mVHV|IJB - -# Docker Engine managed plugin system - -* [Installing and using a plugin](index.md#installing-and-using-a-plugin) -* [Developing a plugin](index.md#developing-a-plugin) -* [Debugging plugins](index.md#debugging-plugins) - -Docker Engine's plugin system allows you to install, start, stop, and remove -plugins using Docker Engine. - -For information about the legacy plugin system available in Docker Engine 1.12 -and earlier, see [Understand legacy Docker Engine plugins](legacy_plugins.md). - -> **Note**: Docker Engine managed plugins are currently not supported -on Windows daemons. - -## Installing and using a plugin - -Plugins are distributed as Docker images and can be hosted on Docker Hub or on -a private registry. - -To install a plugin, use the `docker plugin install` command, which pulls the -plugin from Docker Hub or your private registry, prompts you to grant -permissions or capabilities if necessary, and enables the plugin. - -To check the status of installed plugins, use the `docker plugin ls` command. -Plugins that start successfully are listed as enabled in the output. - -After a plugin is installed, you can use it as an option for another Docker -operation, such as creating a volume. - -In the following example, you install the `sshfs` plugin, verify that it is -enabled, and use it to create a volume. - -> **Note**: This example is intended for instructional purposes only. Once the volume is created, your SSH password to the remote host will be exposed as plaintext when inspecting the volume. You should delete the volume as soon as you are done with the example. - -1. Install the `sshfs` plugin. - - ```bash - $ docker plugin install vieux/sshfs - - Plugin "vieux/sshfs" is requesting the following privileges: - - network: [host] - - capabilities: [CAP_SYS_ADMIN] - Do you grant the above permissions? [y/N] y - - vieux/sshfs - ``` - - The plugin requests 2 privileges: - - - It needs access to the `host` network. - - It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run - the `mount` command. - -2. Check that the plugin is enabled in the output of `docker plugin ls`. - - ```bash - $ docker plugin ls - - ID NAME TAG DESCRIPTION ENABLED - 69553ca1d789 vieux/sshfs latest the `sshfs` plugin true - ``` - -3. Create a volume using the plugin. - This example mounts the `/remote` directory on host `1.2.3.4` into a - volume named `sshvolume`. - - This volume can now be mounted into containers. - - ```bash - $ docker volume create \ - -d vieux/sshfs \ - --name sshvolume \ - -o sshcmd=user@1.2.3.4:/remote \ - -o password=$(cat file_containing_password_for_remote_host) - - sshvolume - ``` -4. Verify that the volume was created successfully. - - ```bash - $ docker volume ls - - DRIVER NAME - vieux/sshfs sshvolume - ``` - -5. Start a container that uses the volume `sshvolume`. - - ```bash - $ docker run --rm -v sshvolume:/data busybox ls /data - - - ``` - -6. Remove the volume `sshvolume` - ```bash - docker volume rm sshvolume - - sshvolume - ``` -To disable a plugin, use the `docker plugin disable` command. To completely -remove it, use the `docker plugin remove` command. For other available -commands and options, see the -[command line reference](../reference/commandline/index.md). - - -## Developing a plugin - -#### The rootfs directory -The `rootfs` directory represents the root filesystem of the plugin. In this -example, it was created from a Dockerfile: - ->**Note:** The `/run/docker/plugins` directory is mandatory inside of the -plugin's filesystem for docker to communicate with the plugin. - -```bash -$ git clone https://github.com/vieux/docker-volume-sshfs -$ cd docker-volume-sshfs -$ docker build -t rootfsimage . -$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created -$ sudo mkdir -p myplugin/rootfs -$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs -$ docker rm -vf "$id" -$ docker rmi rootfsimage -``` - -#### The config.json file - -The `config.json` file describes the plugin. See the [plugins config reference](config.md). - -Consider the following `config.json` file. - -```json -{ - "description": "sshFS plugin for Docker", - "documentation": "https://docs.docker.com/engine/extend/plugins/", - "entrypoint": ["/docker-volume-sshfs"], - "network": { - "type": "host" - }, - "interface" : { - "types": ["docker.volumedriver/1.0"], - "socket": "sshfs.sock" - }, - "linux": { - "capabilities": ["CAP_SYS_ADMIN"] - } -} -``` - -This plugin is a volume driver. It requires a `host` network and the -`CAP_SYS_ADMIN` capability. It depends upon the `/docker-volume-sshfs` -entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate -with Docker Engine. This plugin has no runtime parameters. - -#### Creating the plugin - -A new plugin can be created by running -`docker plugin create ./path/to/plugin/data` where the plugin -data contains a plugin configuration file `config.json` and a root filesystem -in subdirectory `rootfs`. - -After that the plugin `` will show up in `docker plugin ls`. -Plugins can be pushed to remote registries with -`docker plugin push `. - - -## Debugging plugins - -Stdout of a plugin is redirected to dockerd logs. Such entries have a -`plugin=` suffix. Here are a few examples of commands for pluginID -`f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62` and their -corresponding log entries in the docker daemon logs. - -```bash -$ docker plugin install tiborvass/sample-volume-plugins - -INFO[0036] Starting... Found 0 volumes on startup plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 -``` - -```bash -$ docker volume create -d tiborvass/sample-volume-plugins samplevol - -INFO[0193] Create Called... Ensuring directory /data/samplevol exists on host... plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 -INFO[0193] open /var/lib/docker/plugin-data/local-persist.json: no such file or directory plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 -INFO[0193] Created volume samplevol with mountpoint /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 -INFO[0193] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 -``` - -```bash -$ docker run -v samplevol:/tmp busybox sh - -INFO[0421] Get Called... Found samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 -INFO[0421] Mount Called... Mounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 -INFO[0421] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 -INFO[0421] Unmount Called... Unmounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 -``` - -#### Using docker-runc to obtain logfiles and shell into the plugin. - -`docker-runc`, the default docker container runtime can be used for debugging -plugins. This is specifically useful to collect plugin logs if they are -redirected to a file. - -```bash -$ docker-runc list -ID PID STATUS BUNDLE CREATED -f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2679 running /run/docker/libcontainerd/f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2017-02-06T21:53:03.031537592Z -r -``` - -```bash -$ docker-runc exec f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 cat /var/log/plugin.log -``` - -If the plugin has a built-in shell, then exec into the plugin can be done as -follows: -```bash -$ docker-runc exec -t f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 sh -``` - -#### Using curl to debug plugin socket issues. - -To verify if the plugin API socket that the docker daemon communicates with -is responsive, use curl. In this example, we will make API calls from the -docker host to volume and network plugins using curl 7.47.0 to ensure that -the plugin is listening on the said socket. For a well functioning plugin, -these basic requests should work. Note that plugin sockets are available on the host under `/var/run/docker/plugins/` - - -```bash -curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/e8a37ba56fc879c991f7d7921901723c64df6b42b87e6a0b055771ecf8477a6d/plugin.sock http:/VolumeDriver.List - -{"Mountpoint":"","Err":"","Volumes":[{"Name":"myvol1","Mountpoint":"/data/myvol1"},{"Name":"myvol2","Mountpoint":"/data/myvol2"}],"Volume":null} -``` - -```bash -curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/45e00a7ce6185d6e365904c8bcf62eb724b1fe307e0d4e7ecc9f6c1eb7bcdb70/plugin.sock http:/NetworkDriver.GetCapabilities - -{"Scope":"local"} -``` -When using curl 7.5 and above, the URL should be of the form -`http://hostname/APICall`, where `hostname` is the valid hostname where the -plugin is installed and `APICall` is the call to the plugin API. - -For example, `http://localhost/VolumeDriver.List` diff --git a/fn/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md b/fn/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md deleted file mode 100644 index 901a40ad5..000000000 --- a/fn/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -redirect_from: -- "/engine/extend/plugins/" -title: "Use Docker Engine plugins" -description: "How to add additional functionality to Docker with plugins extensions" -keywords: "Examples, Usage, plugins, docker, documentation, user guide" ---- - - - -This document describes the Docker Engine plugins generally available in Docker -Engine. To view information on plugins managed by Docker, -refer to [Docker Engine plugin system](index.md). - -You can extend the capabilities of the Docker Engine by loading third-party -plugins. This page explains the types of plugins and provides links to several -volume and network plugins for Docker. - -## Types of plugins - -Plugins extend Docker's functionality. They come in specific types. For -example, a [volume plugin](plugins_volume.md) might enable Docker -volumes to persist across multiple Docker hosts and a -[network plugin](plugins_network.md) might provide network plumbing. - -Currently Docker supports authorization, volume and network driver plugins. In the future it -will support additional plugin types. - -## Installing a plugin - -Follow the instructions in the plugin's documentation. - -## Finding a plugin - -The sections below provide an inexhaustive overview of available plugins. - - - -### Network plugins - -Plugin | Description ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -[Contiv Networking](https://github.com/contiv/netplugin) | An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards. -[Kuryr Network Plugin](https://github.com/openstack/kuryr) | A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well. -[Weave Network Plugin](https://www.weave.works/docs/net/latest/introducing-weave/) | A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity. - -### Volume plugins - -Plugin | Description ------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -[Azure File Storage plugin](https://github.com/Azure/azurefile-dockervolumedriver) | Lets you mount Microsoft [Azure File Storage](https://azure.microsoft.com/blog/azure-file-storage-now-generally-available/) shares to Docker containers as volumes using the SMB 3.0 protocol. [Learn more](https://azure.microsoft.com/blog/persistent-docker-volumes-with-azure-file-storage/). -[BeeGFS Volume Plugin](https://github.com/RedCoolBeans/docker-volume-beegfs) | An open source volume plugin to create persistent volumes in a BeeGFS parallel file system. -[Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) | A volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS. -[Contiv Volume Plugin](https://github.com/contiv/volplugin) | An open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption. It has support for Ceph and NFS. -[Convoy plugin](https://github.com/rancher/convoy) | A volume plugin for a variety of storage back-ends including device mapper and NFS. It's a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore. -[DigitalOcean Block Storage plugin](https://github.com/omallo/docker-volume-plugin-dostorage) | Integrates DigitalOcean's [block storage solution](https://www.digitalocean.com/products/storage/) into the Docker ecosystem by automatically attaching a given block storage volume to a DigitalOcean droplet and making the contents of the volume available to Docker containers running on that droplet. -[DRBD plugin](https://www.drbd.org/en/supported-projects/docker) | A volume plugin that provides highly available storage replicated by [DRBD](https://www.drbd.org). Data written to the docker volume is replicated in a cluster of DRBD nodes. -[Flocker plugin](https://clusterhq.com/docker-plugin/) | A volume plugin that provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines. -[Fuxi Volume Plugin](https://github.com/openstack/fuxi) | A volume plugin that is developed as part of the OpenStack Kuryr project and implements the Docker volume plugin API by utilizing Cinder, the OpenStack block storage service. -[gce-docker plugin](https://github.com/mcuadros/gce-docker) | A volume plugin able to attach, format and mount Google Compute [persistent-disks](https://cloud.google.com/compute/docs/disks/persistent-disks). -[GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) | A volume plugin that provides multi-host volumes management for Docker using GlusterFS. -[Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3. -[HPE 3Par Volume Plugin](https://github.com/hpe-storage/python-hpedockerplugin/) | A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays. -[IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. -[Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository. -[Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`. -[NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future. -[Netshare plugin](https://github.com/ContainX/docker-volume-netshare) | A volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems. -[Nimble Storage Volume Plugin](https://connect.nimblestorage.com/community/app-integration/docker)| A volume plug-in that integrates with Nimble Storage Unified Flash Fabric arrays. The plug-in abstracts array volume capabilities to the Docker administrator to allow self-provisioning of secure multi-tenant volumes and clones. -[OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. -[Portworx Volume Plugin](https://github.com/portworx/px-dev) | A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler. -[Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. -[REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. -[Virtuozzo Storage and Ploop plugin](https://github.com/virtuozzo/docker-volume-ploop) | A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices. -[VMware vSphere Storage Plugin](https://github.com/vmware/docker-volume-vsphere) | Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments. - -### Authorization plugins - - Plugin | Description -------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - [Twistlock AuthZ Broker](https://github.com/twistlock/authz) | A basic extendable authorization plugin that runs directly on the host or inside a container. This plugin allows you to define user policies that it evaluates during authorization. Basic authorization is provided if Docker daemon is started with the --tlsverify flag (username is extracted from the certificate common name). - -## Troubleshooting a plugin - -If you are having problems with Docker after loading a plugin, ask the authors -of the plugin for help. The Docker team may not be able to assist you. - -## Writing a plugin - -If you are interested in writing a plugin for Docker, or seeing how they work -under the hood, see the [docker plugins reference](plugin_api.md). diff --git a/fn/vendor/github.com/docker/docker/docs/extend/plugin_api.md b/fn/vendor/github.com/docker/docker/docs/extend/plugin_api.md deleted file mode 100644 index 693b77a2f..000000000 --- a/fn/vendor/github.com/docker/docker/docs/extend/plugin_api.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: "Plugins API" -description: "How to write Docker plugins extensions " -keywords: "API, Usage, plugins, documentation, developer" ---- - - - -# Docker Plugin API - -Docker plugins are out-of-process extensions which add capabilities to the -Docker Engine. - -This document describes the Docker Engine plugin API. To view information on -plugins managed by Docker Engine, refer to [Docker Engine plugin system](index.md). - -This page is intended for people who want to develop their own Docker plugin. -If you just want to learn about or use Docker plugins, look -[here](legacy_plugins.md). - -## What plugins are - -A plugin is a process running on the same or a different host as the docker daemon, -which registers itself by placing a file on the same docker host in one of the plugin -directories described in [Plugin discovery](#plugin-discovery). - -Plugins have human-readable names, which are short, lowercase strings. For -example, `flocker` or `weave`. - -Plugins can run inside or outside containers. Currently running them outside -containers is recommended. - -## Plugin discovery - -Docker discovers plugins by looking for them in the plugin directory whenever a -user or container tries to use one by name. - -There are three types of files which can be put in the plugin directory. - -* `.sock` files are UNIX domain sockets. -* `.spec` files are text files containing a URL, such as `unix:///other.sock` or `tcp://localhost:8080`. -* `.json` files are text files containing a full json specification for the plugin. - -Plugins with UNIX domain socket files must run on the same docker host, whereas -plugins with spec or json files can run on a different host if a remote URL is specified. - -UNIX domain socket files must be located under `/run/docker/plugins`, whereas -spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`. - -The name of the file (excluding the extension) determines the plugin name. - -For example, the `flocker` plugin might create a UNIX socket at -`/run/docker/plugins/flocker.sock`. - -You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. -For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only -mount `/run/docker/plugins/flocker` inside the `flocker` container. - -Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under -`/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as -soon as it finds the first plugin definition with the given name. - -### JSON specification - -This is the JSON format for a plugin: - -```json -{ - "Name": "plugin-example", - "Addr": "https://example.com/docker/plugin", - "TLSConfig": { - "InsecureSkipVerify": false, - "CAFile": "/usr/shared/docker/certs/example-ca.pem", - "CertFile": "/usr/shared/docker/certs/example-cert.pem", - "KeyFile": "/usr/shared/docker/certs/example-key.pem" - } -} -``` - -The `TLSConfig` field is optional and TLS will only be verified if this configuration is present. - -## Plugin lifecycle - -Plugins should be started before Docker, and stopped after Docker. For -example, when packaging a plugin for a platform which supports `systemd`, you -might use [`systemd` dependencies]( -http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to -manage startup and shutdown order. - -When upgrading a plugin, you should first stop the Docker daemon, upgrade the -plugin, then start Docker again. - -## Plugin activation - -When a plugin is first referred to -- either by a user referring to it by name -(e.g. `docker run --volume-driver=foo`) or a container already configured to -use a plugin being started -- Docker looks for the named plugin in the plugin -directory and activates it with a handshake. See Handshake API below. - -Plugins are *not* activated automatically at Docker daemon startup. Rather, -they are activated only lazily, or on-demand, when they are needed. - -## Systemd socket activation - -Plugins may also be socket activated by `systemd`. The official [Plugins helpers](https://github.com/docker/go-plugins-helpers) -natively supports socket activation. In order for a plugin to be socket activated it needs -a `service` file and a `socket` file. - -The `service` file (for example `/lib/systemd/system/your-plugin.service`): - -``` -[Unit] -Description=Your plugin -Before=docker.service -After=network.target your-plugin.socket -Requires=your-plugin.socket docker.service - -[Service] -ExecStart=/usr/lib/docker/your-plugin - -[Install] -WantedBy=multi-user.target -``` -The `socket` file (for example `/lib/systemd/system/your-plugin.socket`): - -``` -[Unit] -Description=Your plugin - -[Socket] -ListenStream=/run/docker/plugins/your-plugin.sock - -[Install] -WantedBy=sockets.target -``` - -This will allow plugins to be actually started when the Docker daemon connects to -the sockets they're listening on (for instance the first time the daemon uses them -or if one of the plugin goes down accidentally). - -## API design - -The Plugin API is RPC-style JSON over HTTP, much like webhooks. - -Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to -implement an HTTP server and bind this to the UNIX socket mentioned in the -"plugin discovery" section. - -All requests are HTTP `POST` requests. - -The API is versioned via an Accept header, which currently is always set to -`application/vnd.docker.plugins.v1+json`. - -## Handshake API - -Plugins are activated via the following "handshake" API call. - -### /Plugin.Activate - -**Request:** empty body - -**Response:** -``` -{ - "Implements": ["VolumeDriver"] -} -``` - -Responds with a list of Docker subsystems which this plugin implements. -After activation, the plugin will then be sent events from this subsystem. - -Possible values are: - -* [`authz`](plugins_authorization.md) -* [`NetworkDriver`](plugins_network.md) -* [`VolumeDriver`](plugins_volume.md) - - -## Plugin retries - -Attempts to call a method on a plugin are retried with an exponential backoff -for up to 30 seconds. This may help when packaging plugins as containers, since -it gives plugin containers a chance to start up before failing any user -containers which depend on them. - -## Plugins helpers - -To ease plugins development, we're providing an `sdk` for each kind of plugins -currently supported by Docker at [docker/go-plugins-helpers](https://github.com/docker/go-plugins-helpers). diff --git a/fn/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md b/fn/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md deleted file mode 100644 index ac1837f75..000000000 --- a/fn/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: "Access authorization plugin" -description: "How to create authorization plugins to manage access control to your Docker daemon." -keywords: "security, authorization, authentication, docker, documentation, plugin, extend" -redirect_from: -- "/engine/extend/authorization/" ---- - - - -# Create an authorization plugin - -This document describes the Docker Engine plugins generally available in Docker -Engine. To view information on plugins managed by Docker Engine, -refer to [Docker Engine plugin system](index.md). - -Docker's out-of-the-box authorization model is all or nothing. Any user with -permission to access the Docker daemon can run any Docker client command. The -same is true for callers using Docker's Engine API to contact the daemon. If you -require greater access control, you can create authorization plugins and add -them to your Docker daemon configuration. Using an authorization plugin, a -Docker administrator can configure granular access policies for managing access -to Docker daemon. - -Anyone with the appropriate skills can develop an authorization plugin. These -skills, at their most basic, are knowledge of Docker, understanding of REST, and -sound programming knowledge. This document describes the architecture, state, -and methods information available to an authorization plugin developer. - -## Basic principles - -Docker's [plugin infrastructure](plugin_api.md) enables -extending Docker by loading, removing and communicating with -third-party components using a generic API. The access authorization subsystem -was built using this mechanism. - -Using this subsystem, you don't need to rebuild the Docker daemon to add an -authorization plugin. You can add a plugin to an installed Docker daemon. You do -need to restart the Docker daemon to add a new plugin. - -An authorization plugin approves or denies requests to the Docker daemon based -on both the current authentication context and the command context. The -authentication context contains all user details and the authentication method. -The command context contains all the relevant request data. - -Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md). -Each plugin must reside within directories described under the -[Plugin discovery](plugin_api.md#plugin-discovery) section. - -**Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication -respectively. - -## Default user authorization mechanism - -If TLS is enabled in the [Docker daemon](https://docs.docker.com/engine/security/https/), the default user authorization flow extracts the user details from the certificate subject name. -That is, the `User` field is set to the client certificate subject common name, and the `AuthenticationMethod` field is set to `TLS`. - -## Basic architecture - -You are responsible for registering your plugin as part of the Docker daemon -startup. You can install multiple plugins and chain them together. This chain -can be ordered. Each request to the daemon passes in order through the chain. -Only when all the plugins grant access to the resource, is the access granted. - -When an HTTP request is made to the Docker daemon through the CLI or via the -Engine API, the authentication subsystem passes the request to the installed -authentication plugin(s). The request contains the user (caller) and command -context. The plugin is responsible for deciding whether to allow or deny the -request. - -The sequence diagrams below depict an allow and deny authorization flow: - -![Authorization Allow flow](images/authz_allow.png) - -![Authorization Deny flow](images/authz_deny.png) - -Each request sent to the plugin includes the authenticated user, the HTTP -headers, and the request/response body. Only the user name and the -authentication method used are passed to the plugin. Most importantly, no user -credentials or tokens are passed. Finally, not all request/response bodies -are sent to the authorization plugin. Only those request/response bodies where -the `Content-Type` is either `text/*` or `application/json` are sent. - -For commands that can potentially hijack the HTTP connection (`HTTP -Upgrade`), such as `exec`, the authorization plugin is only called for the -initial HTTP requests. Once the plugin approves the command, authorization is -not applied to the rest of the flow. Specifically, the streaming data is not -passed to the authorization plugins. For commands that return chunked HTTP -response, such as `logs` and `events`, only the HTTP request is sent to the -authorization plugins. - -During request/response processing, some authorization flows might -need to do additional queries to the Docker daemon. To complete such flows, -plugins can call the daemon API similar to a regular user. To enable these -additional queries, the plugin must provide the means for an administrator to -configure proper authentication and security policies. - -## Docker client flows - -To enable and configure the authorization plugin, the plugin developer must -support the Docker client interactions detailed in this section. - -### Setting up Docker daemon - -Enable the authorization plugin with a dedicated command line flag in the -`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID` -value. This value can be the plugin’s socket or a path to a specification file. -Authorization plugins can be loaded without restarting the daemon. Refer -to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reloading) for more information. - -```bash -$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... -``` - -Docker's authorization subsystem supports multiple `--authorization-plugin` parameters. - -### Calling authorized command (allow) - -```bash -$ docker pull centos -... -f1b10cd84249: Pull complete -... -``` - -### Calling unauthorized command (deny) - -```bash -$ docker pull centos -... -docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed. -``` - -### Error from plugins - -```bash -$ docker pull centos -... -docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?. -``` - -## API schema and implementation - -In addition to Docker's standard plugin registration method, each plugin -should implement the following two methods: - -* `/AuthZPlugin.AuthZReq` This authorize request method is called before the Docker daemon processes the client request. - -* `/AuthZPlugin.AuthZRes` This authorize response method is called before the response is returned from Docker daemon to the client. - -#### /AuthZPlugin.AuthZReq - -**Request**: - -```json -{ - "User": "The user identification", - "UserAuthNMethod": "The authentication method used", - "RequestMethod": "The HTTP method", - "RequestURI": "The HTTP request URI", - "RequestBody": "Byte array containing the raw HTTP request body", - "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string " -} -``` - -**Response**: - -```json -{ - "Allow": "Determined whether the user is allowed or not", - "Msg": "The authorization message", - "Err": "The error message if things go wrong" -} -``` -#### /AuthZPlugin.AuthZRes - -**Request**: - -```json -{ - "User": "The user identification", - "UserAuthNMethod": "The authentication method used", - "RequestMethod": "The HTTP method", - "RequestURI": "The HTTP request URI", - "RequestBody": "Byte array containing the raw HTTP request body", - "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string", - "ResponseBody": "Byte array containing the raw HTTP response body", - "ResponseHeader": "Byte array containing the raw HTTP response header as a map[string][]string", - "ResponseStatusCode":"Response status code" -} -``` - -**Response**: - -```json -{ - "Allow": "Determined whether the user is allowed or not", - "Msg": "The authorization message", - "Err": "The error message if things go wrong" -} -``` - -### Request authorization - -Each plugin must support two request authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. - -#### Daemon -> Plugin - -Name | Type | Description ------------------------|-------------------|------------------------------------------------------- -User | string | The user identification -Authentication method | string | The authentication method used -Request method | enum | The HTTP method (GET/DELETE/POST) -Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) -Request headers | map[string]string | Request headers as key value pairs (without the authorization header) -Request body | []byte | Raw request body - - -#### Plugin -> Daemon - -Name | Type | Description ---------|--------|---------------------------------------------------------------------------------- -Allow | bool | Boolean value indicating whether the request is allowed or denied -Msg | string | Authorization message (will be returned to the client in case the access is denied) -Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) - -### Response authorization - -The plugin must support two authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. - -#### Daemon -> Plugin - - -Name | Type | Description ------------------------ |------------------ |---------------------------------------------------- -User | string | The user identification -Authentication method | string | The authentication method used -Request method | string | The HTTP method (GET/DELETE/POST) -Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) -Request headers | map[string]string | Request headers as key value pairs (without the authorization header) -Request body | []byte | Raw request body -Response status code | int | Status code from the docker daemon -Response headers | map[string]string | Response headers as key value pairs -Response body | []byte | Raw docker daemon response body - - -#### Plugin -> Daemon - -Name | Type | Description ---------|--------|---------------------------------------------------------------------------------- -Allow | bool | Boolean value indicating whether the response is allowed or denied -Msg | string | Authorization message (will be returned to the client in case the access is denied) -Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) diff --git a/fn/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md b/fn/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md deleted file mode 100644 index c134b1ebc..000000000 --- a/fn/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md +++ /dev/null @@ -1,403 +0,0 @@ ---- -title: "Graphdriver plugins" -description: "How to manage image and container filesystems with external plugins" -keywords: "Examples, Usage, storage, image, docker, data, graph, plugin, api" -advisory: experimental ---- - - - - -## Changelog - -### 1.13.0 - -- Support v2 plugins - -# Docker graph driver plugins - -Docker graph driver plugins enable admins to use an external/out-of-process -graph driver for use with Docker engine. This is an alternative to using the -built-in storage drivers, such as aufs/overlay/devicemapper/btrfs. - -You need to install and enable the plugin and then restart the Docker daemon -before using the plugin. See the following example for the correct ordering -of steps. - -``` -$ docker plugin install cpuguy83/docker-overlay2-graphdriver-plugin # this command also enables the driver - -$ pkill dockerd -$ dockerd --experimental -s cpuguy83/docker-overlay2-graphdriver-plugin -``` - -# Write a graph driver plugin - -See the [plugin documentation](https://docs.docker.com/engine/extend/) for detailed information -on the underlying plugin protocol. - - -## Graph Driver plugin protocol - -If a plugin registers itself as a `GraphDriver` when activated, then it is -expected to provide the rootfs for containers as well as image layer storage. - -### /GraphDriver.Init - -**Request**: -```json -{ - "Home": "/graph/home/path", - "Opts": [], - "UIDMaps": [], - "GIDMaps": [] -} -``` - -Initialize the graph driver plugin with a home directory and array of options. -These are passed through from the user, but the plugin is not required to parse -or honor them. - -The request also includes a list of UID and GID mappings, structed as follows: -```json -{ - "ContainerID": 0, - "HostID": 0, - "Size": 0 -} -``` - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - - -### /GraphDriver.Capabilities - -**Request**: -```json -{} -``` - -Get behavioral characteristics of the graph driver. If a plugin does not handle -this request, the engine will use default values for all capabilities. - -**Response**: -```json -{ - "ReproducesExactDiffs": false, -} -``` - -Respond with values of capabilities: - -* **ReproducesExactDiffs** Defaults to false. Flags that this driver is capable -of reproducing exactly equivalent diffs for read-only filesystem layers. - - -### /GraphDriver.Create - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", - "MountLabel": "", - "StorageOpt": {} -} -``` - -Create a new, empty, read-only filesystem layer with the specified -`ID`, `Parent` and `MountLabel`. If `Parent` is an empty string, there is no -parent layer. `StorageOpt` is map of strings which indicate storage options. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.CreateReadWrite - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", - "MountLabel": "", - "StorageOpt": {} -} -``` - -Similar to `/GraphDriver.Create` but creates a read-write filesystem layer. - -### /GraphDriver.Remove - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Remove the filesystem layer with this given `ID`. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Get - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "MountLabel": "" -} -``` - -Get the mountpoint for the layered filesystem referred to by the given `ID`. - -**Response**: -```json -{ - "Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Err": "" -} -``` - -Respond with the absolute path to the mounted layered filesystem. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Put - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Release the system resources for the specified `ID`, such as unmounting the -filesystem layer. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Exists - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Determine if a filesystem layer with the specified `ID` exists. - -**Response**: -```json -{ - "Exists": true -} -``` - -Respond with a boolean for whether or not the filesystem layer with the specified -`ID` exists. - -### /GraphDriver.Status - -**Request**: -```json -{} -``` - -Get low-level diagnostic information about the graph driver. - -**Response**: -```json -{ - "Status": [[]] -} -``` - -Respond with a 2-D array with key/value pairs for the underlying status -information. - - -### /GraphDriver.GetMetadata - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Get low-level diagnostic information about the layered filesystem with the -with the specified `ID` - -**Response**: -```json -{ - "Metadata": {}, - "Err": "" -} -``` - -Respond with a set of key/value pairs containing the low-level diagnostic -information about the layered filesystem. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Cleanup - -**Request**: -```json -{} -``` - -Perform necessary tasks to release resources help by the plugin, such as -unmounting all the layered file systems. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - - -### /GraphDriver.Diff - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Get an archive of the changes between the filesystem layers specified by the `ID` -and `Parent`. `Parent` may be an empty string, in which case there is no parent. - -**Response**: -``` -{% raw %} -{{ TAR STREAM }} -{% endraw %} -``` - -### /GraphDriver.Changes - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Get a list of changes between the filesystem layers specified by the `ID` and -`Parent`. If `Parent` is an empty string, there is no parent. - -**Response**: -```json -{ - "Changes": [{}], - "Err": "" -} -``` - -Respond with a list of changes. The structure of a change is: -```json - "Path": "/some/path", - "Kind": 0, -``` - -Where the `Path` is the filesystem path within the layered filesystem that is -changed and `Kind` is an integer specifying the type of change that occurred: - -- 0 - Modified -- 1 - Added -- 2 - Deleted - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.ApplyDiff - -**Request**: -``` -{% raw %} -{{ TAR STREAM }} -{% endraw %} -``` - -Extract the changeset from the given diff into the layer with the specified `ID` -and `Parent` - -**Query Parameters**: - -- id (required)- the `ID` of the new filesystem layer to extract the diff to -- parent (required)- the `Parent` of the given `ID` - -**Response**: -```json -{ - "Size": 512366, - "Err": "" -} -``` - -Respond with the size of the new layer in bytes. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.DiffSize - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Calculate the changes between the specified `ID` - -**Response**: -```json -{ - "Size": 512366, - "Err": "" -} -``` - -Respond with the size changes between the specified `ID` and `Parent` -Respond with a non-empty string error if an error occurred. diff --git a/fn/vendor/github.com/docker/docker/docs/extend/plugins_logging.md b/fn/vendor/github.com/docker/docker/docs/extend/plugins_logging.md deleted file mode 100644 index fd02a6a6f..000000000 --- a/fn/vendor/github.com/docker/docker/docs/extend/plugins_logging.md +++ /dev/null @@ -1,220 +0,0 @@ ---- -title: "Docker log driver plugins" -description: "Log driver plugins." -keywords: "Examples, Usage, plugins, docker, documentation, user guide, logging" ---- - - - -# Logging driver plugins - -This document describes logging driver plugins for Docker. - -Logging drivers enables users to forward container logs to another service for -processing. Docker includes several logging drivers as built-ins, however can -never hope to support all use-cases with built-in drivers. Plugins allow Docker -to support a wide range of logging services without requiring to embed client -libraries for these services in the main Docker codebase. See the -[plugin documentation](legacy_plugins.md) for more information. - -## Create a logging plugin - -The main interface for logging plugins uses the same JSON+HTTP RPC protocol used -by other plugin types. See the -[example](https://github.com/cpuguy83/docker-log-driver-test) plugin for a -reference implementation of a logging plugin. The example wraps the built-in -`jsonfilelog` log driver. - -## LogDriver protocol - -Logging plugins must register as a `LogDriver` during plugin activation. Once -activated users can specify the plugin as a log driver. - -There are two HTTP endpoints that logging plugins must implement: - -### `/LogDriver.StartLogging` - -Signals to the plugin that a container is starting that the plugin should start -receiving logs for. - -Logs will be streamed over the defined file in the request. On Linux this file -is a FIFO. Logging plugins are not currently supported on Windows. - -**Request**: -```json -{ - "File": "/path/to/file/stream", - "Info": { - "ContainerID": "123456" - } -} -``` - -`File` is the path to the log stream that needs to be consumed. Each call to -`StartLogging` should provide a different file path, even if it's a container -that the plugin has already received logs for prior. The file is created by -docker with a randomly generated name. - -`Info` is details about the container that's being logged. This is fairly -free-form, but is defined by the following struct definition: - -```go -type Info struct { - Config map[string]string - ContainerID string - ContainerName string - ContainerEntrypoint string - ContainerArgs []string - ContainerImageID string - ContainerImageName string - ContainerCreated time.Time - ContainerEnv []string - ContainerLabels map[string]string - LogPath string - DaemonName string -} -``` - - -`ContainerID` will always be supplied with this struct, but other fields may be -empty or missing. - -**Response** -```json -{ - "Err": "" -} -``` - -If an error occurred during this request, add an error message to the `Err` field -in the response. If no error then you can either send an empty response (`{}`) -or an empty value for the `Err` field. - -The driver should at this point be consuming log messages from the passed in file. -If messages are unconsumed, it may cause the contaier to block while trying to -write to its stdio streams. - -Log stream messages are encoded as protocol buffers. The protobuf definitions are -in the -[docker repository](https://github.com/docker/docker/blob/master/api/types/plugins/logdriver/entry.proto). - -Since protocol buffers are not self-delimited you must decode them from the stream -using the following stream format: - -``` -[size][message] -``` - -Where `size` is a 4-byte big endian binary encoded uint32. `size` in this case -defines the size of the next message. `message` is the actual log entry. - -A reference golang implementation of a stream encoder/decoder can be found -[here](https://github.com/docker/docker/blob/master/api/types/plugins/logdriver/io.go) - -### `/LogDriver.StopLogging` - -Signals to the plugin to stop collecting logs from the defined file. -Once a response is received, the file will be removed by Docker. You must make -sure to collect all logs on the stream before responding to this request or risk -losing log data. - -Requests on this endpoint does not mean that the container has been removed -only that it has stopped. - -**Request**: -```json -{ - "File": "/path/to/file/stream" -} -``` - -**Response**: -```json -{ - "Err": "" -} -``` - -If an error occurred during this request, add an error message to the `Err` field -in the response. If no error then you can either send an empty response (`{}`) -or an empty value for the `Err` field. - -## Optional endpoints - -Logging plugins can implement two extra logging endpoints: - -### `/LogDriver.Capabilities` - -Defines the capabilities of the log driver. You must implement this endpoint for -Docker to be able to take advantage of any of the defined capabilities. - -**Request**: -```json -{} -``` - -**Response**: -```json -{ - "ReadLogs": true -} -``` - -Supported capabilities: - -- `ReadLogs` - this tells Docker that the plugin is capable of reading back logs -to clients. Plugins that report that they support `ReadLogs` must implement the -`/LogDriver.ReadLogs` endpoint - -### `/LogDriver.ReadLogs` - -Reads back logs to the client. This is used when `docker logs ` is -called. - -In order for Docker to use this endpoint, the plugin must specify as much when -`/LogDriver.Capabilities` is called. - - -**Request**: -```json -{ - "ReadConfig": {}, - "Info": { - "ContainerID": "123456" - } -} -``` - -`ReadConfig` is the list of options for reading, it is defined with the following -golang struct: - -```go -type ReadConfig struct { - Since time.Time - Tail int - Follow bool -} -``` - -- `Since` defines the oldest log that should be sent. -- `Tail` defines the number of lines to read (e.g. like the command `tail -n 10`) -- `Follow` signals that the client wants to stay attached to receive new log messages -as they come in once the existing logs have been read. - -`Info` is the same type defined in `/LogDriver.StartLogging`. It should be used -to determine what set of logs to read. - -**Response**: -``` -{{ log stream }} -``` - -The response should be the encoded log message using the same format as the -messages that the plugin consumed from Docker. diff --git a/fn/vendor/github.com/docker/docker/docs/extend/plugins_network.md b/fn/vendor/github.com/docker/docker/docs/extend/plugins_network.md deleted file mode 100644 index a974862fa..000000000 --- a/fn/vendor/github.com/docker/docker/docs/extend/plugins_network.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Docker network driver plugins" -description: "Network driver plugins." -keywords: "Examples, Usage, plugins, docker, documentation, user guide" ---- - - - -# Engine network driver plugins - -This document describes Docker Engine network driver plugins generally -available in Docker Engine. To view information on plugins -managed by Docker Engine, refer to [Docker Engine plugin system](index.md). - -Docker Engine network plugins enable Engine deployments to be extended to -support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN -or something completely different. Network driver plugins are supported via the -LibNetwork project. Each plugin is implemented as a "remote driver" for -LibNetwork, which shares plugin infrastructure with Engine. Effectively, network -driver plugins are activated in the same way as other plugins, and use the same -kind of protocol. - -## Network driver plugins and swarm mode - -Docker 1.12 adds support for cluster management and orchestration called -[swarm mode](https://docs.docker.com/engine/swarm/). Docker Engine running in swarm mode currently -only supports the built-in overlay driver for networking. Therefore existing -networking plugins will not work in swarm mode. - -When you run Docker Engine outside of swarm mode, all networking plugins that -worked in Docker 1.11 will continue to function normally. They do not require -any modification. - -## Using network driver plugins - -The means of installing and running a network driver plugin depend on the -particular plugin. So, be sure to install your plugin according to the -instructions obtained from the plugin developer. - -Once running however, network driver plugins are used just like the built-in -network drivers: by being mentioned as a driver in network-oriented Docker -commands. For example, - - $ docker network create --driver weave mynet - -Some network driver plugins are listed in [plugins](legacy_plugins.md) - -The `mynet` network is now owned by `weave`, so subsequent commands -referring to that network will be sent to the plugin, - - $ docker run --network=mynet busybox top - - -## Write a network plugin - -Network plugins implement the [Docker plugin -API](plugin_api.md) and the network plugin protocol - -## Network plugin protocol - -The network driver protocol, in addition to the plugin activation call, is -documented as part of libnetwork: -[https://github.com/docker/libnetwork/blob/master/docs/remote.md](https://github.com/docker/libnetwork/blob/master/docs/remote.md). - -# Related Information - -To interact with the Docker maintainers and other interested users, see the IRC channel `#docker-network`. - -- [Docker networks feature overview](https://docs.docker.com/engine/userguide/networking/) -- The [LibNetwork](https://github.com/docker/libnetwork) project diff --git a/fn/vendor/github.com/docker/docker/docs/extend/plugins_services.md b/fn/vendor/github.com/docker/docker/docs/extend/plugins_services.md deleted file mode 100644 index 79e344f9c..000000000 --- a/fn/vendor/github.com/docker/docker/docs/extend/plugins_services.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -description: Using services with plugins -keywords: "API, Usage, plugins, documentation, developer" -title: Plugins and Services ---- - - - -# Using Volume and Network plugins in Docker services - -In swarm mode, it is possible to create a service that allows for attaching -to networks or mounting volumes that are backed by plugins. Swarm schedules -services based on plugin availability on a node. - - -### Volume plugins - -In this example, a volume plugin is installed on a swarm worker and a volume -is created using the plugin. In the manager, a service is created with the -relevant mount options. It can be observed that the service is scheduled to -run on the worker node with the said volume plugin and volume. Note that, -node1 is the manager and node2 is the worker. - -1. Prepare manager. In node 1: - - ```bash - $ docker swarm init - Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager. - ``` - -2. Join swarm, install plugin and create volume on worker. In node 2: - - ```bash - $ docker swarm join \ - --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \ - 192.168.99.100:2377 - ``` - - ```bash - $ docker plugin install tiborvass/sample-volume-plugin - latest: Pulling from tiborvass/sample-volume-plugin - eb9c16fbdc53: Download complete - Digest: sha256:00b42de88f3a3e0342e7b35fa62394b0a9ceb54d37f4c50be5d3167899994639 - Status: Downloaded newer image for tiborvass/sample-volume-plugin:latest - Installed plugin tiborvass/sample-volume-plugin - ``` - - ```bash - $ docker volume create -d tiborvass/sample-volume-plugin --name pluginVol - ``` - -3. Create a service using the plugin and volume. In node1: - - ```bash - $ docker service create --name my-service --mount type=volume,volume-driver=tiborvass/sample-volume-plugin,source=pluginVol,destination=/tmp busybox top - - $ docker service ls - z1sj8bb8jnfn my-service replicated 1/1 busybox:latest - ``` - docker service ls shows service 1 instance of service running. - -4. Observe the task getting scheduled in node 2: - - ```bash - {% raw %} - $ docker ps --format '{{.ID}}\t {{.Status}} {{.Names}} {{.Command}}' - 83fc1e842599 Up 2 days my-service.1.9jn59qzn7nbc3m0zt1hij12xs "top" - {% endraw %} - ``` - -### Network plugins - -In this example, a global scope network plugin is installed on both the -swarm manager and worker. A service is created with replicated instances -using the installed plugin. We will observe how the availability of the -plugin determines network creation and container scheduling. - -Note that node1 is the manager and node2 is the worker. - - -1. Install a global scoped network plugin on both manager and worker. On node1 - and node2: - - ```bash - $ docker plugin install bboreham/weave2 - Plugin "bboreham/weave2" is requesting the following privileges: - - network: [host] - - capabilities: [CAP_SYS_ADMIN CAP_NET_ADMIN] - Do you grant the above permissions? [y/N] y - latest: Pulling from bboreham/weave2 - 7718f575adf7: Download complete - Digest: sha256:2780330cc15644b60809637ee8bd68b4c85c893d973cb17f2981aabfadfb6d72 - Status: Downloaded newer image for bboreham/weave2:latest - Installed plugin bboreham/weave2 - ``` - -2. Create a network using plugin on manager. On node1: - - ```bash - $ docker network create --driver=bboreham/weave2:latest globalnet - - $ docker network ls - NETWORK ID NAME DRIVER SCOPE - qlj7ueteg6ly globalnet bboreham/weave2:latest swarm - ``` - -3. Create a service on the manager and have replicas set to 8. Observe that -containers get scheduled on both manager and worker. - - On node 1: - - ```bash - $ docker service create --network globalnet --name myservice --replicas=8 mrjana/simpleweb simpleweb -w90drnfzw85nygbie9kb89vpa - ``` - - ```bash - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 87520965206a mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.4.ytdzpktmwor82zjxkh118uf1v - 15e24de0f7aa mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.2.kh7a9n3iauq759q9mtxyfs9hp - c8c8f0144cdc mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.6.sjhpj5gr3xt33e3u2jycoj195 - 2e8e4b2c5c08 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.8.2z29zowsghx66u2velublwmrh - ``` - - On node 2: - - ```bash - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 53c0ae7c1dae mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.7.x44tvvdm3iwkt9kif35f7ykz1 - 9b56c627fee0 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.1.x7n1rm6lltw5gja3ueikze57q - d4f5927ba52c mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up 1 second myservice.5.i97bfo9uc6oe42lymafs9rz6k - 478c0d395bd7 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.3.yr7nkffa48lff1vrl2r1m1ucs - ``` - -4. Scale down the number of instances. On node1: - - ```bash - $ docker service scale myservice=0 - myservice scaled to 0 - ``` - -5. Disable and uninstall the plugin on the worker. On node2: - - ```bash - $ docker plugin rm -f bboreham/weave2 - bboreham/weave2 - ``` - -6. Scale up the number of instances again. Observe that all containers are -scheduled on the master and not on the worker, because the plugin is not available on the worker anymore. - - On node 1: - - ```bash - $ docker service scale myservice=8 - myservice scaled to 8 - ``` - - ```bash - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - cf4b0ec2415e mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.3.r7p5o208jmlzpcbm2ytl3q6n1 - 57c64a6a2b88 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.4.dwoezsbb02ccstkhlqjy2xe7h - 3ac68cc4e7b8 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 35 seconds myservice.5.zx4ezdrm2nwxzkrwnxthv0284 - 006c3cb318fc mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.8.q0e3umt19y3h3gzo1ty336k5r - dd2ffebde435 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.7.a77y3u22prjipnrjg7vzpv3ba - a86c74d8b84b mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.6.z9nbn14bagitwol1biveeygl7 - 2846a7850ba0 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 37 seconds myservice.2.ypufz2eh9fyhppgb89g8wtj76 - e2ec01efcd8a mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 38 seconds myservice.1.8w7c4ttzr6zcb9sjsqyhwp3yl - ``` - - On node 2: - - ```bash - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - ``` diff --git a/fn/vendor/github.com/docker/docker/docs/extend/plugins_volume.md b/fn/vendor/github.com/docker/docker/docs/extend/plugins_volume.md deleted file mode 100644 index 807ab5a48..000000000 --- a/fn/vendor/github.com/docker/docker/docs/extend/plugins_volume.md +++ /dev/null @@ -1,360 +0,0 @@ ---- -title: "Volume plugins" -description: "How to manage data with external volume plugins" -keywords: "Examples, Usage, volume, docker, data, volumes, plugin, api" ---- - - - -# Write a volume plugin - -Docker Engine volume plugins enable Engine deployments to be integrated with -external storage systems such as Amazon EBS, and enable data volumes to persist -beyond the lifetime of a single Docker host. See the -[plugin documentation](legacy_plugins.md) for more information. - -## Changelog - -### 1.13.0 - -- If used as part of the v2 plugin architecture, mountpoints that are part of - paths returned by the plugin must be mounted under the directory specified by - `PropagatedMount` in the plugin configuration - ([#26398](https://github.com/docker/docker/pull/26398)) - -### 1.12.0 - -- Add `Status` field to `VolumeDriver.Get` response - ([#21006](https://github.com/docker/docker/pull/21006#)) -- Add `VolumeDriver.Capabilities` to get capabilities of the volume driver - ([#22077](https://github.com/docker/docker/pull/22077)) - -### 1.10.0 - -- Add `VolumeDriver.Get` which gets the details about the volume - ([#16534](https://github.com/docker/docker/pull/16534)) -- Add `VolumeDriver.List` which lists all volumes owned by the driver - ([#16534](https://github.com/docker/docker/pull/16534)) - -### 1.8.0 - -- Initial support for volume driver plugins - ([#14659](https://github.com/docker/docker/pull/14659)) - -## Command-line changes - -To give a container access to a volume, use the `--volume` and `--volume-driver` -flags on the `docker container run` command. The `--volume` (or `-v`) flag -accepts a volume name and path on the host, and the `--volume-driver` flag -accepts a driver type. - -```bash -$ docker volume create --driver=flocker volumename - -$ docker container run -it --volume volumename:/data busybox sh -``` - -### `--volume` - -The `--volume` (or `-v`) flag takes a value that is in the format -`:`. The two parts of the value are -separated by a colon (`:`) character. - -- The volume name is a human-readable name for the volume, and cannot begin with - a `/` character. It is referred to as `volume_name` in the rest of this topic. -- The `Mountpoint` is the path on the host (v1) or in the plugin (v2) where the - volume has been made available. - -### `volumedriver` - -Specifying a `volumedriver` in conjunction with a `volumename` allows you to -use plugins such as [Flocker](https://github.com/ScatterHQ/flocker) to manage -volumes external to a single host, such as those on EBS. - -## Create a VolumeDriver - -The container creation endpoint (`/containers/create`) accepts a `VolumeDriver` -field of type `string` allowing to specify the name of the driver. If not -specified, it defaults to `"local"` (the default driver for local volumes). - -## Volume plugin protocol - -If a plugin registers itself as a `VolumeDriver` when activated, it must -provide the Docker Daemon with writeable paths on the host filesystem. The Docker -daemon provides these paths to containers to consume. The Docker daemon makes -the volumes available by bind-mounting the provided paths into the containers. - -> **Note**: Volume plugins should *not* write data to the `/var/lib/docker/` -> directory, including `/var/lib/docker/volumes`. The `/var/lib/docker/` -> directory is reserved for Docker. - -### `/VolumeDriver.Create` - -**Request**: -```json -{ - "Name": "volume_name", - "Opts": {} -} -``` - -Instruct the plugin that the user wants to create a volume, given a user -specified volume name. The plugin does not need to actually manifest the -volume on the filesystem yet (until `Mount` is called). -`Opts` is a map of driver specific options passed through from the user request. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a string error if an error occurred. - -### `/VolumeDriver.Remove` - -**Request**: -```json -{ - "Name": "volume_name" -} -``` - -Delete the specified volume from disk. This request is issued when a user -invokes `docker rm -v` to remove volumes associated with a container. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a string error if an error occurred. - -### `/VolumeDriver.Mount` - -**Request**: -```json -{ - "Name": "volume_name", - "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" -} -``` - -Docker requires the plugin to provide a volume, given a user specified volume -name. `Mount` is called once per container start. If the same `volume_name` is requested -more than once, the plugin may need to keep track of each new mount request and provision -at the first mount request and deprovision at the last corresponding unmount request. - -`ID` is a unique ID for the caller that is requesting the mount. - -**Response**: - -- **v1**: - - ```json - { - "Mountpoint": "/path/to/directory/on/host", - "Err": "" - } - ``` - -- **v2**: - - ```json - { - "Mountpoint": "/path/under/PropagatedMount", - "Err": "" - } - ``` - -`Mountpoint` is the path on the host (v1) or in the plugin (v2) where the volume -has been made available. - -`Err` is either empty or contains an error string. - -### `/VolumeDriver.Path` - -**Request**: - -```json -{ - "Name": "volume_name" -} -``` - -Request the path to the volume with the given `volume_name`. - -**Response**: - -- **v1**: - - ```json - { - "Mountpoin": "/path/to/directory/on/host", - "Err": "" - } - ``` - -- **v2**: - - ```json - { - "Mountpoint": "/path/under/PropagatedMount", - "Err": "" - } - ``` - -Respond with the path on the host (v1) or inside the plugin (v2) where the -volume has been made available, and/or a string error if an error occurred. - -`Mountpoint` is optional. However, the plugin may be queried again later if one -is not provided. - -### `/VolumeDriver.Unmount` - -**Request**: -```json -{ - "Name": "volume_name", - "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" -} -``` - -Docker is no longer using the named volume. `Unmount` is called once per -container stop. Plugin may deduce that it is safe to deprovision the volume at -this point. - -`ID` is a unique ID for the caller that is requesting the mount. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a string error if an error occurred. - - -### `/VolumeDriver.Get` - -**Request**: -```json -{ - "Name": "volume_name" -} -``` - -Get info about `volume_name`. - - -**Response**: - -- **v1**: - - ```json - { - "Volume": { - "Name": "volume_name", - "Mountpoint": "/path/to/directory/on/host", - "Status": {} - }, - "Err": "" - } - ``` - -- **v2**: - - ```json - { - "Volume": { - "Name": "volume_name", - "Mountpoint": "/path/under/PropagatedMount", - "Status": {} - }, - "Err": "" - } - ``` - -Respond with a string error if an error occurred. `Mountpoint` and `Status` are -optional. - - -### /VolumeDriver.List - -**Request**: -```json -{} -``` - -Get the list of volumes registered with the plugin. - -**Response**: - -- **v1**: - - ```json - { - "Volumes": [ - { - "Name": "volume_name", - "Mountpoint": "/path/to/directory/on/host" - } - ], - "Err": "" - } - ``` - -- **v2**: - - ```json - { - "Volumes": [ - { - "Name": "volume_name", - "Mountpoint": "/path/under/PropagatedMount" - } - ], - "Err": "" - } - ``` - - -Respond with a string error if an error occurred. `Mountpoint` is optional. - -### /VolumeDriver.Capabilities - -**Request**: -```json -{} -``` - -Get the list of capabilities the driver supports. - -The driver is not required to implement `Capabilities`. If it is not -implemented, the default values are used. - -**Response**: -```json -{ - "Capabilities": { - "Scope": "global" - } -} -``` - -Supported scopes are `global` and `local`. Any other value in `Scope` will be -ignored, and `local` is used. `Scope` allows cluster managers to handle the -volume in different ways. For instance, a scope of `global`, signals to the -cluster manager that it only needs to create the volume once instead of on each -Docker host. More capabilities may be added in the future. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/builder.md b/fn/vendor/github.com/docker/docker/docs/reference/builder.md deleted file mode 100644 index 9c28e8b47..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/builder.md +++ /dev/null @@ -1,1838 +0,0 @@ ---- -title: "Dockerfile reference" -description: "Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image." -keywords: "builder, docker, Dockerfile, automation, image creation" -redirect_from: -- /reference/builder/ ---- - - - -# Dockerfile reference - -Docker can build images automatically by reading the instructions from a -`Dockerfile`. A `Dockerfile` is a text document that contains all the commands a -user could call on the command line to assemble an image. Using `docker build` -users can create an automated build that executes several command-line -instructions in succession. - -This page describes the commands you can use in a `Dockerfile`. When you are -done reading this page, refer to the [`Dockerfile` Best -Practices](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/) for a tip-oriented guide. - -## Usage - -The [`docker build`](commandline/build.md) command builds an image from -a `Dockerfile` and a *context*. The build's context is the files at a specified -location `PATH` or `URL`. The `PATH` is a directory on your local filesystem. -The `URL` is a Git repository location. - -A context is processed recursively. So, a `PATH` includes any subdirectories and -the `URL` includes the repository and its submodules. A simple build command -that uses the current directory as context: - - $ docker build . - Sending build context to Docker daemon 6.51 MB - ... - -The build is run by the Docker daemon, not by the CLI. The first thing a build -process does is send the entire context (recursively) to the daemon. In most -cases, it's best to start with an empty directory as context and keep your -Dockerfile in that directory. Add only the files needed for building the -Dockerfile. - ->**Warning**: Do not use your root directory, `/`, as the `PATH` as it causes ->the build to transfer the entire contents of your hard drive to the Docker ->daemon. - -To use a file in the build context, the `Dockerfile` refers to the file specified -in an instruction, for example, a `COPY` instruction. To increase the build's -performance, exclude files and directories by adding a `.dockerignore` file to -the context directory. For information about how to [create a `.dockerignore` -file](#dockerignore-file) see the documentation on this page. - -Traditionally, the `Dockerfile` is called `Dockerfile` and located in the root -of the context. You use the `-f` flag with `docker build` to point to a Dockerfile -anywhere in your file system. - - $ docker build -f /path/to/a/Dockerfile . - -You can specify a repository and tag at which to save the new image if -the build succeeds: - - $ docker build -t shykes/myapp . - -To tag the image into multiple repositories after the build, -add multiple `-t` parameters when you run the `build` command: - - $ docker build -t shykes/myapp:1.0.2 -t shykes/myapp:latest . - -Before the Docker daemon runs the instructions in the `Dockerfile`, it performs -a preliminary validation of the `Dockerfile` and returns an error if the syntax is incorrect: - - $ docker build -t test/myapp . - Sending build context to Docker daemon 2.048 kB - Error response from daemon: Unknown instruction: RUNCMD - -The Docker daemon runs the instructions in the `Dockerfile` one-by-one, -committing the result of each instruction -to a new image if necessary, before finally outputting the ID of your -new image. The Docker daemon will automatically clean up the context you -sent. - -Note that each instruction is run independently, and causes a new image -to be created - so `RUN cd /tmp` will not have any effect on the next -instructions. - -Whenever possible, Docker will re-use the intermediate images (cache), -to accelerate the `docker build` process significantly. This is indicated by -the `Using cache` message in the console output. -(For more information, see the [Build cache section](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache)) in the -`Dockerfile` best practices guide: - - $ docker build -t svendowideit/ambassador . - Sending build context to Docker daemon 15.36 kB - Step 1/4 : FROM alpine:3.2 - ---> 31f630c65071 - Step 2/4 : MAINTAINER SvenDowideit@home.org.au - ---> Using cache - ---> 2a1c91448f5f - Step 3/4 : RUN apk update && apk add socat && rm -r /var/cache/ - ---> Using cache - ---> 21ed6e7fbb73 - Step 4/4 : CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh - ---> Using cache - ---> 7ea8aef582cc - Successfully built 7ea8aef582cc - -Build cache is only used from images that have a local parent chain. This means -that these images were created by previous builds or the whole chain of images -was loaded with `docker load`. If you wish to use build cache of a specific -image you can specify it with `--cache-from` option. Images specified with -`--cache-from` do not need to have a parent chain and may be pulled from other -registries. - -When you're done with your build, you're ready to look into [*Pushing a -repository to its registry*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). - -## Format - -Here is the format of the `Dockerfile`: - -```Dockerfile -# Comment -INSTRUCTION arguments -``` - -The instruction is not case-sensitive. However, convention is for them to -be UPPERCASE to distinguish them from arguments more easily. - - -Docker runs instructions in a `Dockerfile` in order. A `Dockerfile` **must -start with a \`FROM\` instruction**. The `FROM` instruction specifies the [*Base -Image*](glossary.md#base-image) from which you are building. `FROM` may only be -proceeded by one or more `ARG` instructions, which declare arguments that are used -in `FROM` lines in the `Dockerfile`. - -Docker treats lines that *begin* with `#` as a comment, unless the line is -a valid [parser directive](#parser-directives). A `#` marker anywhere -else in a line is treated as an argument. This allows statements like: - -```Dockerfile -# Comment -RUN echo 'we are running some # of cool things' -``` - -Line continuation characters are not supported in comments. - -## Parser directives - -Parser directives are optional, and affect the way in which subsequent lines -in a `Dockerfile` are handled. Parser directives do not add layers to the build, -and will not be shown as a build step. Parser directives are written as a -special type of comment in the form `# directive=value`. A single directive -may only be used once. - -Once a comment, empty line or builder instruction has been processed, Docker -no longer looks for parser directives. Instead it treats anything formatted -as a parser directive as a comment and does not attempt to validate if it might -be a parser directive. Therefore, all parser directives must be at the very -top of a `Dockerfile`. - -Parser directives are not case-sensitive. However, convention is for them to -be lowercase. Convention is also to include a blank line following any -parser directives. Line continuation characters are not supported in parser -directives. - -Due to these rules, the following examples are all invalid: - -Invalid due to line continuation: - -```Dockerfile -# direc \ -tive=value -``` - -Invalid due to appearing twice: - -```Dockerfile -# directive=value1 -# directive=value2 - -FROM ImageName -``` - -Treated as a comment due to appearing after a builder instruction: - -```Dockerfile -FROM ImageName -# directive=value -``` - -Treated as a comment due to appearing after a comment which is not a parser -directive: - -```Dockerfile -# About my dockerfile -# directive=value -FROM ImageName -``` - -The unknown directive is treated as a comment due to not being recognized. In -addition, the known directive is treated as a comment due to appearing after -a comment which is not a parser directive. - -```Dockerfile -# unknowndirective=value -# knowndirective=value -``` - -Non line-breaking whitespace is permitted in a parser directive. Hence, the -following lines are all treated identically: - -```Dockerfile -#directive=value -# directive =value -# directive= value -# directive = value -# dIrEcTiVe=value -``` - -The following parser directive is supported: - -* `escape` - -## escape - - # escape=\ (backslash) - -Or - - # escape=` (backtick) - -The `escape` directive sets the character used to escape characters in a -`Dockerfile`. If not specified, the default escape character is `\`. - -The escape character is used both to escape characters in a line, and to -escape a newline. This allows a `Dockerfile` instruction to -span multiple lines. Note that regardless of whether the `escape` parser -directive is included in a `Dockerfile`, *escaping is not performed in -a `RUN` command, except at the end of a line.* - -Setting the escape character to `` ` `` is especially useful on -`Windows`, where `\` is the directory path separator. `` ` `` is consistent -with [Windows PowerShell](https://technet.microsoft.com/en-us/library/hh847755.aspx). - -Consider the following example which would fail in a non-obvious way on -`Windows`. The second `\` at the end of the second line would be interpreted as an -escape for the newline, instead of a target of the escape from the first `\`. -Similarly, the `\` at the end of the third line would, assuming it was actually -handled as an instruction, cause it be treated as a line continuation. The result -of this dockerfile is that second and third lines are considered a single -instruction: - -```Dockerfile -FROM microsoft/nanoserver -COPY testfile.txt c:\\ -RUN dir c:\ -``` - -Results in: - - PS C:\John> docker build -t cmd . - Sending build context to Docker daemon 3.072 kB - Step 1/2 : FROM microsoft/nanoserver - ---> 22738ff49c6d - Step 2/2 : COPY testfile.txt c:\RUN dir c: - GetFileAttributesEx c:RUN: The system cannot find the file specified. - PS C:\John> - -One solution to the above would be to use `/` as the target of both the `COPY` -instruction, and `dir`. However, this syntax is, at best, confusing as it is not -natural for paths on `Windows`, and at worst, error prone as not all commands on -`Windows` support `/` as the path separator. - -By adding the `escape` parser directive, the following `Dockerfile` succeeds as -expected with the use of natural platform semantics for file paths on `Windows`: - - # escape=` - - FROM microsoft/nanoserver - COPY testfile.txt c:\ - RUN dir c:\ - -Results in: - - PS C:\John> docker build -t succeeds --no-cache=true . - Sending build context to Docker daemon 3.072 kB - Step 1/3 : FROM microsoft/nanoserver - ---> 22738ff49c6d - Step 2/3 : COPY testfile.txt c:\ - ---> 96655de338de - Removing intermediate container 4db9acbb1682 - Step 3/3 : RUN dir c:\ - ---> Running in a2c157f842f5 - Volume in drive C has no label. - Volume Serial Number is 7E6D-E0F7 - - Directory of c:\ - - 10/05/2016 05:04 PM 1,894 License.txt - 10/05/2016 02:22 PM

Program Files - 10/05/2016 02:14 PM Program Files (x86) - 10/28/2016 11:18 AM 62 testfile.txt - 10/28/2016 11:20 AM Users - 10/28/2016 11:20 AM Windows - 2 File(s) 1,956 bytes - 4 Dir(s) 21,259,096,064 bytes free - ---> 01c7f3bef04f - Removing intermediate container a2c157f842f5 - Successfully built 01c7f3bef04f - PS C:\John> - -## Environment replacement - -Environment variables (declared with [the `ENV` statement](#env)) can also be -used in certain instructions as variables to be interpreted by the -`Dockerfile`. Escapes are also handled for including variable-like syntax -into a statement literally. - -Environment variables are notated in the `Dockerfile` either with -`$variable_name` or `${variable_name}`. They are treated equivalently and the -brace syntax is typically used to address issues with variable names with no -whitespace, like `${foo}_bar`. - -The `${variable_name}` syntax also supports a few of the standard `bash` -modifiers as specified below: - -* `${variable:-word}` indicates that if `variable` is set then the result - will be that value. If `variable` is not set then `word` will be the result. -* `${variable:+word}` indicates that if `variable` is set then `word` will be - the result, otherwise the result is the empty string. - -In all cases, `word` can be any string, including additional environment -variables. - -Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, -for example, will translate to `$foo` and `${foo}` literals respectively. - -Example (parsed representation is displayed after the `#`): - - FROM busybox - ENV foo /bar - WORKDIR ${foo} # WORKDIR /bar - ADD . $foo # ADD . /bar - COPY \$foo /quux # COPY $foo /quux - -Environment variables are supported by the following list of instructions in -the `Dockerfile`: - -* `ADD` -* `COPY` -* `ENV` -* `EXPOSE` -* `FROM` -* `LABEL` -* `STOPSIGNAL` -* `USER` -* `VOLUME` -* `WORKDIR` - -as well as: - -* `ONBUILD` (when combined with one of the supported instructions above) - -> **Note**: -> prior to 1.4, `ONBUILD` instructions did **NOT** support environment -> variable, even when combined with any of the instructions listed above. - -Environment variable substitution will use the same value for each variable -throughout the entire instruction. In other words, in this example: - - ENV abc=hello - ENV abc=bye def=$abc - ENV ghi=$abc - -will result in `def` having a value of `hello`, not `bye`. However, -`ghi` will have a value of `bye` because it is not part of the same instruction -that set `abc` to `bye`. - -## .dockerignore file - -Before the docker CLI sends the context to the docker daemon, it looks -for a file named `.dockerignore` in the root directory of the context. -If this file exists, the CLI modifies the context to exclude files and -directories that match patterns in it. This helps to avoid -unnecessarily sending large or sensitive files and directories to the -daemon and potentially adding them to images using `ADD` or `COPY`. - -The CLI interprets the `.dockerignore` file as a newline-separated -list of patterns similar to the file globs of Unix shells. For the -purposes of matching, the root of the context is considered to be both -the working and the root directory. For example, the patterns -`/foo/bar` and `foo/bar` both exclude a file or directory named `bar` -in the `foo` subdirectory of `PATH` or in the root of the git -repository located at `URL`. Neither excludes anything else. - -If a line in `.dockerignore` file starts with `#` in column 1, then this line is -considered as a comment and is ignored before interpreted by the CLI. - -Here is an example `.dockerignore` file: - -``` -# comment -*/temp* -*/*/temp* -temp? -``` - -This file causes the following build behavior: - -| Rule | Behavior | -|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `# comment` | Ignored. | -| `*/temp*` | Exclude files and directories whose names start with `temp` in any immediate subdirectory of the root. For example, the plain file `/somedir/temporary.txt` is excluded, as is the directory `/somedir/temp`. | -| `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. | -| `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. - - -Matching is done using Go's -[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. A -preprocessing step removes leading and trailing whitespace and -eliminates `.` and `..` elements using Go's -[filepath.Clean](http://golang.org/pkg/path/filepath/#Clean). Lines -that are blank after preprocessing are ignored. - -Beyond Go's filepath.Match rules, Docker also supports a special -wildcard string `**` that matches any number of directories (including -zero). For example, `**/*.go` will exclude all files that end with `.go` -that are found in all directories, including the root of the build context. - -Lines starting with `!` (exclamation mark) can be used to make exceptions -to exclusions. The following is an example `.dockerignore` file that -uses this mechanism: - -``` - *.md - !README.md -``` - -All markdown files *except* `README.md` are excluded from the context. - -The placement of `!` exception rules influences the behavior: the last -line of the `.dockerignore` that matches a particular file determines -whether it is included or excluded. Consider the following example: - -``` - *.md - !README*.md - README-secret.md -``` - -No markdown files are included in the context except README files other than -`README-secret.md`. - -Now consider this example: - -``` - *.md - README-secret.md - !README*.md -``` - -All of the README files are included. The middle line has no effect because -`!README*.md` matches `README-secret.md` and comes last. - -You can even use the `.dockerignore` file to exclude the `Dockerfile` -and `.dockerignore` files. These files are still sent to the daemon -because it needs them to do its job. But the `ADD` and `COPY` instructions -do not copy them to the image. - -Finally, you may want to specify which files to include in the -context, rather than which to exclude. To achieve this, specify `*` as -the first pattern, followed by one or more `!` exception patterns. - -**Note**: For historical reasons, the pattern `.` is ignored. - -## FROM - - FROM [AS ] - -Or - - FROM [:] [AS ] - -Or - - FROM [@] [AS ] - -The `FROM` instruction initializes a new build stage and sets the -[*Base Image*](glossary.md#base-image) for subsequent instructions. As such, a -valid `Dockerfile` must start with a `FROM` instruction. The image can be -any valid image – it is especially easy to start by **pulling an image** from -the [*Public Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/). - -- `ARG` is the only instruction that may proceed `FROM` in the `Dockerfile`. - See [Understand how ARG and FROM interact](#understand-how-arg-and-from-interact). - -- `FROM` can appear multiple times within a single `Dockerfile` to - create multiple images or use one build stage as a dependency for another. - Simply make a note of the last image ID output by the commit before each new - `FROM` instruction. Each `FROM` instruction clears any state created by previous - instructions. - -- Optionally a name can be given to a new build stage by adding `AS name` to the - `FROM` instruction. The name can be used in subsequent `FROM` and - `COPY --from=` instructions to refer to the image built in this stage. - -- The `tag` or `digest` values are optional. If you omit either of them, the - builder assumes a `latest` tag by default. The builder returns an error if it - cannot find the `tag` value. - -### Understand how ARG and FROM interact - -`FROM` instructions support variables that are declared by any `ARG` -instructions that occur before the first `FROM`. - -```Dockerfile -ARG CODE_VERSION=latest -FROM base:${CODE_VERSION} -CMD /code/run-app - -FROM extras:${CODE_VERSION} -CMD /code/run-extras -``` - -## RUN - -RUN has 2 forms: - -- `RUN ` (*shell* form, the command is run in a shell, which by -default is `/bin/sh -c` on Linux or `cmd /S /C` on Windows) -- `RUN ["executable", "param1", "param2"]` (*exec* form) - -The `RUN` instruction will execute any commands in a new layer on top of the -current image and commit the results. The resulting committed image will be -used for the next step in the `Dockerfile`. - -Layering `RUN` instructions and generating commits conforms to the core -concepts of Docker where commits are cheap and containers can be created from -any point in an image's history, much like source control. - -The *exec* form makes it possible to avoid shell string munging, and to `RUN` -commands using a base image that does not contain the specified shell executable. - -The default shell for the *shell* form can be changed using the `SHELL` -command. - -In the *shell* form you can use a `\` (backslash) to continue a single -RUN instruction onto the next line. For example, consider these two lines: - -``` -RUN /bin/bash -c 'source $HOME/.bashrc; \ -echo $HOME' -``` -Together they are equivalent to this single line: - -``` -RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME' -``` - -> **Note**: -> To use a different shell, other than '/bin/sh', use the *exec* form -> passing in the desired shell. For example, -> `RUN ["/bin/bash", "-c", "echo hello"]` - -> **Note**: -> The *exec* form is parsed as a JSON array, which means that -> you must use double-quotes (") around words not single-quotes ('). - -> **Note**: -> Unlike the *shell* form, the *exec* form does not invoke a command shell. -> This means that normal shell processing does not happen. For example, -> `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. -> If you want shell processing then either use the *shell* form or execute -> a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`. -> When using the exec form and executing a shell directly, as in the case for -> the shell form, it is the shell that is doing the environment variable -> expansion, not docker. -> -> **Note**: -> In the *JSON* form, it is necessary to escape backslashes. This is -> particularly relevant on Windows where the backslash is the path separator. -> The following line would otherwise be treated as *shell* form due to not -> being valid JSON, and fail in an unexpected way: -> `RUN ["c:\windows\system32\tasklist.exe"]` -> The correct syntax for this example is: -> `RUN ["c:\\windows\\system32\\tasklist.exe"]` - -The cache for `RUN` instructions isn't invalidated automatically during -the next build. The cache for an instruction like -`RUN apt-get dist-upgrade -y` will be reused during the next build. The -cache for `RUN` instructions can be invalidated by using the `--no-cache` -flag, for example `docker build --no-cache`. - -See the [`Dockerfile` Best Practices -guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. - -The cache for `RUN` instructions can be invalidated by `ADD` instructions. See -[below](#add) for details. - -### Known issues (RUN) - -- [Issue 783](https://github.com/docker/docker/issues/783) is about file - permissions problems that can occur when using the AUFS file system. You - might notice it during an attempt to `rm` a file, for example. - - For systems that have recent aufs version (i.e., `dirperm1` mount option can - be set), docker will attempt to fix the issue automatically by mounting - the layers with `dirperm1` option. More details on `dirperm1` option can be - found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs) - - If your system doesn't have support for `dirperm1`, the issue describes a workaround. - -## CMD - -The `CMD` instruction has three forms: - -- `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form) -- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) -- `CMD command param1 param2` (*shell* form) - -There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` -then only the last `CMD` will take effect. - -**The main purpose of a `CMD` is to provide defaults for an executing -container.** These defaults can include an executable, or they can omit -the executable, in which case you must specify an `ENTRYPOINT` -instruction as well. - -> **Note**: -> If `CMD` is used to provide default arguments for the `ENTRYPOINT` -> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified -> with the JSON array format. - -> **Note**: -> The *exec* form is parsed as a JSON array, which means that -> you must use double-quotes (") around words not single-quotes ('). - -> **Note**: -> Unlike the *shell* form, the *exec* form does not invoke a command shell. -> This means that normal shell processing does not happen. For example, -> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. -> If you want shell processing then either use the *shell* form or execute -> a shell directly, for example: `CMD [ "sh", "-c", "echo $HOME" ]`. -> When using the exec form and executing a shell directly, as in the case for -> the shell form, it is the shell that is doing the environment variable -> expansion, not docker. - -When used in the shell or exec formats, the `CMD` instruction sets the command -to be executed when running the image. - -If you use the *shell* form of the `CMD`, then the `` will execute in -`/bin/sh -c`: - - FROM ubuntu - CMD echo "This is a test." | wc - - -If you want to **run your** `` **without a shell** then you must -express the command as a JSON array and give the full path to the executable. -**This array form is the preferred format of `CMD`.** Any additional parameters -must be individually expressed as strings in the array: - - FROM ubuntu - CMD ["/usr/bin/wc","--help"] - -If you would like your container to run the same executable every time, then -you should consider using `ENTRYPOINT` in combination with `CMD`. See -[*ENTRYPOINT*](#entrypoint). - -If the user specifies arguments to `docker run` then they will override the -default specified in `CMD`. - -> **Note**: -> Don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits -> the result; `CMD` does not execute anything at build time, but specifies -> the intended command for the image. - -## LABEL - - LABEL = = = ... - -The `LABEL` instruction adds metadata to an image. A `LABEL` is a -key-value pair. To include spaces within a `LABEL` value, use quotes and -backslashes as you would in command-line parsing. A few usage examples: - - LABEL "com.example.vendor"="ACME Incorporated" - LABEL com.example.label-with-value="foo" - LABEL version="1.0" - LABEL description="This text illustrates \ - that label-values can span multiple lines." - -An image can have more than one label. To specify multiple labels, -Docker recommends combining labels into a single `LABEL` instruction where -possible. Each `LABEL` instruction produces a new layer which can result in an -inefficient image if you use many labels. This example results in a single image -layer. - - LABEL multi.label1="value1" multi.label2="value2" other="value3" - -The above can also be written as: - - LABEL multi.label1="value1" \ - multi.label2="value2" \ - other="value3" - -Labels are additive including `LABEL`s in `FROM` images. If Docker -encounters a label/key that already exists, the new value overrides any previous -labels with identical keys. - -To view an image's labels, use the `docker inspect` command. - - "Labels": { - "com.example.vendor": "ACME Incorporated" - "com.example.label-with-value": "foo", - "version": "1.0", - "description": "This text illustrates that label-values can span multiple lines.", - "multi.label1": "value1", - "multi.label2": "value2", - "other": "value3" - }, - -## MAINTAINER (deprecated) - - MAINTAINER - -The `MAINTAINER` instruction sets the *Author* field of the generated images. -The `LABEL` instruction is a much more flexible version of this and you should use -it instead, as it enables setting any metadata you require, and can be viewed -easily, for example with `docker inspect`. To set a label corresponding to the -`MAINTAINER` field you could use: - - LABEL maintainer="SvenDowideit@home.org.au" - -This will then be visible from `docker inspect` with the other labels. - -## EXPOSE - - EXPOSE [...] - -The `EXPOSE` instruction informs Docker that the container listens on the -specified network ports at runtime. `EXPOSE` does not make the ports of the -container accessible to the host. To do that, you must use either the `-p` flag -to publish a range of ports or the `-P` flag to publish all of the exposed -ports. You can expose one port number and publish it externally under another -number. - -To set up port redirection on the host system, see [using the -P -flag](run.md#expose-incoming-ports). The Docker network feature supports -creating networks without the need to expose ports within the network, for -detailed information see the [overview of this -feature](https://docs.docker.com/engine/userguide/networking/)). - -## ENV - - ENV - ENV = ... - -The `ENV` instruction sets the environment variable `` to the value -``. This value will be in the environment of all "descendant" -`Dockerfile` commands and can be [replaced inline](#environment-replacement) in -many as well. - -The `ENV` instruction has two forms. The first form, `ENV `, -will set a single variable to a value. The entire string after the first -space will be treated as the `` - including characters such as -spaces and quotes. - -The second form, `ENV = ...`, allows for multiple variables to -be set at one time. Notice that the second form uses the equals sign (=) -in the syntax, while the first form does not. Like command line parsing, -quotes and backslashes can be used to include spaces within values. - -For example: - - ENV myName="John Doe" myDog=Rex\ The\ Dog \ - myCat=fluffy - -and - - ENV myName John Doe - ENV myDog Rex The Dog - ENV myCat fluffy - -will yield the same net results in the final image, but the first form -is preferred because it produces a single cache layer. - -The environment variables set using `ENV` will persist when a container is run -from the resulting image. You can view the values using `docker inspect`, and -change them using `docker run --env =`. - -> **Note**: -> Environment persistence can cause unexpected side effects. For example, -> setting `ENV DEBIAN_FRONTEND noninteractive` may confuse apt-get -> users on a Debian-based image. To set a value for a single command, use -> `RUN = `. - -## ADD - -ADD has two forms: - -- `ADD ... ` -- `ADD ["",... ""]` (this form is required for paths containing -whitespace) - -The `ADD` instruction copies new files, directories or remote file URLs from `` -and adds them to the filesystem of the image at the path ``. - -Multiple `` resource may be specified but if they are files or -directories then they must be relative to the source directory that is -being built (the context of the build). - -Each `` may contain wildcards and matching will be done using Go's -[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: - - ADD hom* /mydir/ # adds all files starting with "hom" - ADD hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" - -The `` is an absolute path, or a path relative to `WORKDIR`, into which -the source will be copied inside the destination container. - - ADD test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ - ADD test /absoluteDir/ # adds "test" to /absoluteDir/ - -When adding files or directories that contain special characters (such as `[` -and `]`), you need to escape those paths following the Golang rules to prevent -them from being treated as a matching pattern. For example, to add a file -named `arr[0].txt`, use the following; - - ADD arr[[]0].txt /mydir/ # copy a file named "arr[0].txt" to /mydir/ - - -All new files and directories are created with a UID and GID of 0. - -In the case where `` is a remote file URL, the destination will -have permissions of 600. If the remote file being retrieved has an HTTP -`Last-Modified` header, the timestamp from that header will be used -to set the `mtime` on the destination file. However, like any other file -processed during an `ADD`, `mtime` will not be included in the determination -of whether or not the file has changed and the cache should be updated. - -> **Note**: -> If you build by passing a `Dockerfile` through STDIN (`docker -> build - < somefile`), there is no build context, so the `Dockerfile` -> can only contain a URL based `ADD` instruction. You can also pass a -> compressed archive through STDIN: (`docker build - < archive.tar.gz`), -> the `Dockerfile` at the root of the archive and the rest of the -> archive will be used as the context of the build. - -> **Note**: -> If your URL files are protected using authentication, you -> will need to use `RUN wget`, `RUN curl` or use another tool from -> within the container as the `ADD` instruction does not support -> authentication. - -> **Note**: -> The first encountered `ADD` instruction will invalidate the cache for all -> following instructions from the Dockerfile if the contents of `` have -> changed. This includes invalidating the cache for `RUN` instructions. -> See the [`Dockerfile` Best Practices -guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. - - -`ADD` obeys the following rules: - -- The `` path must be inside the *context* of the build; - you cannot `ADD ../something /something`, because the first step of a - `docker build` is to send the context directory (and subdirectories) to the - docker daemon. - -- If `` is a URL and `` does not end with a trailing slash, then a - file is downloaded from the URL and copied to ``. - -- If `` is a URL and `` does end with a trailing slash, then the - filename is inferred from the URL and the file is downloaded to - `/`. For instance, `ADD http://example.com/foobar /` would - create the file `/foobar`. The URL must have a nontrivial path so that an - appropriate filename can be discovered in this case (`http://example.com` - will not work). - -- If `` is a directory, the entire contents of the directory are copied, - including filesystem metadata. - -> **Note**: -> The directory itself is not copied, just its contents. - -- If `` is a *local* tar archive in a recognized compression format - (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources - from *remote* URLs are **not** decompressed. When a directory is copied or - unpacked, it has the same behavior as `tar -x`, the result is the union of: - - 1. Whatever existed at the destination path and - 2. The contents of the source tree, with conflicts resolved in favor - of "2." on a file-by-file basis. - - > **Note**: - > Whether a file is identified as a recognized compression format or not - > is done solely based on the contents of the file, not the name of the file. - > For example, if an empty file happens to end with `.tar.gz` this will not - > be recognized as a compressed file and **will not** generate any kind of - > decompression error message, rather the file will simply be copied to the - > destination. - -- If `` is any other kind of file, it is copied individually along with - its metadata. In this case, if `` ends with a trailing slash `/`, it - will be considered a directory and the contents of `` will be written - at `/base()`. - -- If multiple `` resources are specified, either directly or due to the - use of a wildcard, then `` must be a directory, and it must end with - a slash `/`. - -- If `` does not end with a trailing slash, it will be considered a - regular file and the contents of `` will be written at ``. - -- If `` doesn't exist, it is created along with all missing directories - in its path. - -## COPY - -COPY has two forms: - -- `COPY ... ` -- `COPY ["",... ""]` (this form is required for paths containing -whitespace) - -The `COPY` instruction copies new files or directories from `` -and adds them to the filesystem of the container at the path ``. - -Multiple `` resource may be specified but they must be relative -to the source directory that is being built (the context of the build). - -Each `` may contain wildcards and matching will be done using Go's -[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: - - COPY hom* /mydir/ # adds all files starting with "hom" - COPY hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" - -The `` is an absolute path, or a path relative to `WORKDIR`, into which -the source will be copied inside the destination container. - - COPY test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ - COPY test /absoluteDir/ # adds "test" to /absoluteDir/ - - -When copying files or directories that contain special characters (such as `[` -and `]`), you need to escape those paths following the Golang rules to prevent -them from being treated as a matching pattern. For example, to copy a file -named `arr[0].txt`, use the following; - - COPY arr[[]0].txt /mydir/ # copy a file named "arr[0].txt" to /mydir/ - -All new files and directories are created with a UID and GID of 0. - -> **Note**: -> If you build using STDIN (`docker build - < somefile`), there is no -> build context, so `COPY` can't be used. - -Optionally `COPY` accepts a flag `--from=` that can be used to set -the source location to a previous build stage (created with `FROM .. AS `) -that will be used instead of a build context sent by the user. The flag also -accepts a numeric index assigned for all previous build stages started with -`FROM` instruction. In case a build stage with a specified name can't be found an -image with the same name is attempted to be used instead. - -`COPY` obeys the following rules: - -- The `` path must be inside the *context* of the build; - you cannot `COPY ../something /something`, because the first step of a - `docker build` is to send the context directory (and subdirectories) to the - docker daemon. - -- If `` is a directory, the entire contents of the directory are copied, - including filesystem metadata. - -> **Note**: -> The directory itself is not copied, just its contents. - -- If `` is any other kind of file, it is copied individually along with - its metadata. In this case, if `` ends with a trailing slash `/`, it - will be considered a directory and the contents of `` will be written - at `/base()`. - -- If multiple `` resources are specified, either directly or due to the - use of a wildcard, then `` must be a directory, and it must end with - a slash `/`. - -- If `` does not end with a trailing slash, it will be considered a - regular file and the contents of `` will be written at ``. - -- If `` doesn't exist, it is created along with all missing directories - in its path. - -## ENTRYPOINT - -ENTRYPOINT has two forms: - -- `ENTRYPOINT ["executable", "param1", "param2"]` - (*exec* form, preferred) -- `ENTRYPOINT command param1 param2` - (*shell* form) - -An `ENTRYPOINT` allows you to configure a container that will run as an executable. - -For example, the following will start nginx with its default content, listening -on port 80: - - docker run -i -t --rm -p 80:80 nginx - -Command line arguments to `docker run ` will be appended after all -elements in an *exec* form `ENTRYPOINT`, and will override all elements specified -using `CMD`. -This allows arguments to be passed to the entry point, i.e., `docker run -d` -will pass the `-d` argument to the entry point. -You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` -flag. - -The *shell* form prevents any `CMD` or `run` command line arguments from being -used, but has the disadvantage that your `ENTRYPOINT` will be started as a -subcommand of `/bin/sh -c`, which does not pass signals. -This means that the executable will not be the container's `PID 1` - and -will _not_ receive Unix signals - so your executable will not receive a -`SIGTERM` from `docker stop `. - -Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. - -### Exec form ENTRYPOINT example - -You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands -and arguments and then use either form of `CMD` to set additional defaults that -are more likely to be changed. - - FROM ubuntu - ENTRYPOINT ["top", "-b"] - CMD ["-c"] - -When you run the container, you can see that `top` is the only process: - - $ docker run -it --rm --name test top -H - top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 - Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st - KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers - KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top - -To examine the result further, you can use `docker exec`: - - $ docker exec -it test ps aux - USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND - root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H - root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux - -And you can gracefully request `top` to shut down using `docker stop test`. - -The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the -foreground (i.e., as `PID 1`): - -``` -FROM debian:stable -RUN apt-get update && apt-get install -y --force-yes apache2 -EXPOSE 80 443 -VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] -ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] -``` - -If you need to write a starter script for a single executable, you can ensure that -the final executable receives the Unix signals by using `exec` and `gosu` -commands: - -```bash -#!/usr/bin/env bash -set -e - -if [ "$1" = 'postgres' ]; then - chown -R postgres "$PGDATA" - - if [ -z "$(ls -A "$PGDATA")" ]; then - gosu postgres initdb - fi - - exec gosu postgres "$@" -fi - -exec "$@" -``` - -Lastly, if you need to do some extra cleanup (or communicate with other containers) -on shutdown, or are co-ordinating more than one executable, you may need to ensure -that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then -does some more work: - -``` -#!/bin/sh -# Note: I've written this using sh so it works in the busybox container too - -# USE the trap if you need to also do manual cleanup after the service is stopped, -# or need to start multiple services in the one container -trap "echo TRAPed signal" HUP INT QUIT TERM - -# start service in background here -/usr/sbin/apachectl start - -echo "[hit enter key to exit] or run 'docker stop '" -read - -# stop service and clean up here -echo "stopping apache" -/usr/sbin/apachectl stop - -echo "exited $0" -``` - -If you run this image with `docker run -it --rm -p 80:80 --name test apache`, -you can then examine the container's processes with `docker exec`, or `docker top`, -and then ask the script to stop Apache: - -```bash -$ docker exec -it test ps aux -USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND -root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 -root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start -www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start -www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start -root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux -$ docker top test -PID USER COMMAND -10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 -10054 root /usr/sbin/apache2 -k start -10055 33 /usr/sbin/apache2 -k start -10056 33 /usr/sbin/apache2 -k start -$ /usr/bin/time docker stop test -test -real 0m 0.27s -user 0m 0.03s -sys 0m 0.03s -``` - -> **Note:** you can override the `ENTRYPOINT` setting using `--entrypoint`, -> but this can only set the binary to *exec* (no `sh -c` will be used). - -> **Note**: -> The *exec* form is parsed as a JSON array, which means that -> you must use double-quotes (") around words not single-quotes ('). - -> **Note**: -> Unlike the *shell* form, the *exec* form does not invoke a command shell. -> This means that normal shell processing does not happen. For example, -> `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. -> If you want shell processing then either use the *shell* form or execute -> a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo $HOME" ]`. -> When using the exec form and executing a shell directly, as in the case for -> the shell form, it is the shell that is doing the environment variable -> expansion, not docker. - -### Shell form ENTRYPOINT example - -You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. -This form will use shell processing to substitute shell environment variables, -and will ignore any `CMD` or `docker run` command line arguments. -To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable -correctly, you need to remember to start it with `exec`: - - FROM ubuntu - ENTRYPOINT exec top -b - -When you run this image, you'll see the single `PID 1` process: - - $ docker run -it --rm --name test top - Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached - CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq - Load average: 0.08 0.03 0.05 2/98 6 - PID PPID USER STAT VSZ %VSZ %CPU COMMAND - 1 0 root R 3164 0% 0% top -b - -Which will exit cleanly on `docker stop`: - - $ /usr/bin/time docker stop test - test - real 0m 0.20s - user 0m 0.02s - sys 0m 0.04s - -If you forget to add `exec` to the beginning of your `ENTRYPOINT`: - - FROM ubuntu - ENTRYPOINT top -b - CMD --ignored-param1 - -You can then run it (giving it a name for the next step): - - $ docker run -it --name test top --ignored-param2 - Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached - CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq - Load average: 0.01 0.02 0.05 2/101 7 - PID PPID USER STAT VSZ %VSZ %CPU COMMAND - 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 - 7 1 root R 3164 0% 0% top -b - -You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. - -If you then run `docker stop test`, the container will not exit cleanly - the -`stop` command will be forced to send a `SIGKILL` after the timeout: - - $ docker exec -it test ps aux - PID USER COMMAND - 1 root /bin/sh -c top -b cmd cmd2 - 7 root top -b - 8 root ps aux - $ /usr/bin/time docker stop test - test - real 0m 10.19s - user 0m 0.04s - sys 0m 0.03s - -### Understand how CMD and ENTRYPOINT interact - -Both `CMD` and `ENTRYPOINT` instructions define what command gets executed when running a container. -There are few rules that describe their co-operation. - -1. Dockerfile should specify at least one of `CMD` or `ENTRYPOINT` commands. - -2. `ENTRYPOINT` should be defined when using the container as an executable. - -3. `CMD` should be used as a way of defining default arguments for an `ENTRYPOINT` command -or for executing an ad-hoc command in a container. - -4. `CMD` will be overridden when running the container with alternative arguments. - -The table below shows what command is executed for different `ENTRYPOINT` / `CMD` combinations: - -| | No ENTRYPOINT | ENTRYPOINT exec_entry p1_entry | ENTRYPOINT ["exec_entry", "p1_entry"] | -|--------------------------------|----------------------------|--------------------------------|------------------------------------------------| -| **No CMD** | *error, not allowed* | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry | -| **CMD ["exec_cmd", "p1_cmd"]** | exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry exec_cmd p1_cmd | -| **CMD ["p1_cmd", "p2_cmd"]** | p1_cmd p2_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry p1_cmd p2_cmd | -| **CMD exec_cmd p1_cmd** | /bin/sh -c exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd | - -## VOLUME - - VOLUME ["/data"] - -The `VOLUME` instruction creates a mount point with the specified name -and marks it as holding externally mounted volumes from native host or other -containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain -string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log -/var/db`. For more information/examples and mounting instructions via the -Docker client, refer to -[*Share Directories via Volumes*](https://docs.docker.com/engine/tutorials/dockervolumes/#/mount-a-host-directory-as-a-data-volume) -documentation. - -The `docker run` command initializes the newly created volume with any data -that exists at the specified location within the base image. For example, -consider the following Dockerfile snippet: - - FROM ubuntu - RUN mkdir /myvol - RUN echo "hello world" > /myvol/greeting - VOLUME /myvol - -This Dockerfile results in an image that causes `docker run`, to -create a new mount point at `/myvol` and copy the `greeting` file -into the newly created volume. - -> **Note**: -> When using Windows-based containers, the destination of a volume inside the -> container must be one of: a non-existing or empty directory; or a drive other -> than C:. - -> **Note**: -> If any build steps change the data within the volume after it has been -> declared, those changes will be discarded. - -> **Note**: -> The list is parsed as a JSON array, which means that -> you must use double-quotes (") around words not single-quotes ('). - -## USER - - USER daemon - -The `USER` instruction sets the user name or UID to use when running the image -and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the -`Dockerfile`. - -## WORKDIR - - WORKDIR /path/to/workdir - -The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, -`ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. -If the `WORKDIR` doesn't exist, it will be created even if it's not used in any -subsequent `Dockerfile` instruction. - -It can be used multiple times in the one `Dockerfile`. If a relative path -is provided, it will be relative to the path of the previous `WORKDIR` -instruction. For example: - - WORKDIR /a - WORKDIR b - WORKDIR c - RUN pwd - -The output of the final `pwd` command in this `Dockerfile` would be -`/a/b/c`. - -The `WORKDIR` instruction can resolve environment variables previously set using -`ENV`. You can only use environment variables explicitly set in the `Dockerfile`. -For example: - - ENV DIRPATH /path - WORKDIR $DIRPATH/$DIRNAME - RUN pwd - -The output of the final `pwd` command in this `Dockerfile` would be -`/path/$DIRNAME` - -## ARG - - ARG [=] - -The `ARG` instruction defines a variable that users can pass at build-time to -the builder with the `docker build` command using the `--build-arg =` -flag. If a user specifies a build argument that was not -defined in the Dockerfile, the build outputs a warning. - -``` -[Warning] One or more build-args [foo] were not consumed. -``` - -The Dockerfile author can define a single variable by specifying `ARG` once or many -variables by specifying `ARG` more than once. For example, a valid Dockerfile: - -``` -FROM busybox -ARG user1 -ARG buildno -... -``` - -A Dockerfile author may optionally specify a default value for an `ARG` instruction: - -``` -FROM busybox -ARG user1=someuser -ARG buildno=1 -... -``` - -If an `ARG` value has a default and if there is no value passed at build-time, the -builder uses the default. - -An `ARG` variable definition comes into effect from the line on which it is -defined in the `Dockerfile` not from the argument's use on the command-line or -elsewhere. For example, consider this Dockerfile: - -``` -1 FROM busybox -2 USER ${user:-some_user} -3 ARG user -4 USER $user -... -``` -A user builds this file by calling: - -``` -$ docker build --build-arg user=what_user . -``` - -The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the -subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is -defined and the `what_user` value was passed on the command line. Prior to its definition by an -`ARG` instruction, any use of a variable results in an empty string. - -> **Warning:** It is not recommended to use build-time variables for -> passing secrets like github keys, user credentials etc. Build-time variable -> values are visible to any user of the image with the `docker history` command. - -You can use an `ARG` or an `ENV` instruction to specify variables that are -available to the `RUN` instruction. Environment variables defined using the -`ENV` instruction always override an `ARG` instruction of the same name. Consider -this Dockerfile with an `ENV` and `ARG` instruction. - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 ENV CONT_IMG_VER v1.0.0 -4 RUN echo $CONT_IMG_VER -``` -Then, assume this image is built with this command: - -``` -$ docker build --build-arg CONT_IMG_VER=v2.0.1 . -``` - -In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting -passed by the user:`v2.0.1` This behavior is similar to a shell -script where a locally scoped variable overrides the variables passed as -arguments or inherited from environment, from its point of definition. - -Using the example above but a different `ENV` specification you can create more -useful interactions between `ARG` and `ENV` instructions: - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} -4 RUN echo $CONT_IMG_VER -``` - -Unlike an `ARG` instruction, `ENV` values are always persisted in the built -image. Consider a docker build without the `--build-arg` flag: - -``` -$ docker build . -``` - -Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but -its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. - -The variable expansion technique in this example allows you to pass arguments -from the command line and persist them in the final image by leveraging the -`ENV` instruction. Variable expansion is only supported for [a limited set of -Dockerfile instructions.](#environment-replacement) - -Docker has a set of predefined `ARG` variables that you can use without a -corresponding `ARG` instruction in the Dockerfile. - -* `HTTP_PROXY` -* `http_proxy` -* `HTTPS_PROXY` -* `https_proxy` -* `FTP_PROXY` -* `ftp_proxy` -* `NO_PROXY` -* `no_proxy` - -To use these, simply pass them on the command line using the flag: - -``` ---build-arg = -``` - -By default, these pre-defined variables are excluded from the output of -`docker history`. Excluding them reduces the risk of accidentally leaking -sensitive authentication information in an `HTTP_PROXY` variable. - -For example, consider building the following Dockerfile using -`--build-arg HTTP_PROXY=http://user:pass@proxy.lon.example.com` - -``` Dockerfile -FROM ubuntu -RUN echo "Hello World" -``` - -In this case, the value of the `HTTP_PROXY` variable is not available in the -`docker history` and is not cached. If you were to change location, and your -proxy server changed to `http://user:pass@proxy.sfo.example.com`, a subsequent -build does not result in a cache miss. - -If you need to override this behaviour then you may do so by adding an `ARG` -statement in the Dockerfile as follows: - -``` Dockerfile -FROM ubuntu -ARG HTTP_PROXY -RUN echo "Hello World" -``` - -When building this Dockerfile, the `HTTP_PROXY` is preserved in the -`docker history`, and changing its value invalidates the build cache. - -### Impact on build caching - -`ARG` variables are not persisted into the built image as `ENV` variables are. -However, `ARG` variables do impact the build cache in similar ways. If a -Dockerfile defines an `ARG` variable whose value is different from a previous -build, then a "cache miss" occurs upon its first usage, not its definition. In -particular, all `RUN` instructions following an `ARG` instruction use the `ARG` -variable implicitly (as an environment variable), thus can cause a cache miss. -All predefined `ARG` variables are exempt from caching unless there is a -matching `ARG` statement in the `Dockerfile`. - -For example, consider these two Dockerfile: - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 RUN echo $CONT_IMG_VER -``` - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 RUN echo hello -``` - -If you specify `--build-arg CONT_IMG_VER=` on the command line, in both -cases, the specification on line 2 does not cause a cache miss; line 3 does -cause a cache miss.`ARG CONT_IMG_VER` causes the RUN line to be identified -as the same as running `CONT_IMG_VER=` echo hello, so if the `` -changes, we get a cache miss. - -Consider another example under the same command line: - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 ENV CONT_IMG_VER $CONT_IMG_VER -4 RUN echo $CONT_IMG_VER -``` -In this example, the cache miss occurs on line 3. The miss happens because -the variable's value in the `ENV` references the `ARG` variable and that -variable is changed through the command line. In this example, the `ENV` -command causes the image to include the value. - -If an `ENV` instruction overrides an `ARG` instruction of the same name, like -this Dockerfile: - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 ENV CONT_IMG_VER hello -4 RUN echo $CONT_IMG_VER -``` - -Line 3 does not cause a cache miss because the value of `CONT_IMG_VER` is a -constant (`hello`). As a result, the environment variables and values used on -the `RUN` (line 4) doesn't change between builds. - -## ONBUILD - - ONBUILD [INSTRUCTION] - -The `ONBUILD` instruction adds to the image a *trigger* instruction to -be executed at a later time, when the image is used as the base for -another build. The trigger will be executed in the context of the -downstream build, as if it had been inserted immediately after the -`FROM` instruction in the downstream `Dockerfile`. - -Any build instruction can be registered as a trigger. - -This is useful if you are building an image which will be used as a base -to build other images, for example an application build environment or a -daemon which may be customized with user-specific configuration. - -For example, if your image is a reusable Python application builder, it -will require application source code to be added in a particular -directory, and it might require a build script to be called *after* -that. You can't just call `ADD` and `RUN` now, because you don't yet -have access to the application source code, and it will be different for -each application build. You could simply provide application developers -with a boilerplate `Dockerfile` to copy-paste into their application, but -that is inefficient, error-prone and difficult to update because it -mixes with application-specific code. - -The solution is to use `ONBUILD` to register advance instructions to -run later, during the next build stage. - -Here's how it works: - -1. When it encounters an `ONBUILD` instruction, the builder adds a - trigger to the metadata of the image being built. The instruction - does not otherwise affect the current build. -2. At the end of the build, a list of all triggers is stored in the - image manifest, under the key `OnBuild`. They can be inspected with - the `docker inspect` command. -3. Later the image may be used as a base for a new build, using the - `FROM` instruction. As part of processing the `FROM` instruction, - the downstream builder looks for `ONBUILD` triggers, and executes - them in the same order they were registered. If any of the triggers - fail, the `FROM` instruction is aborted which in turn causes the - build to fail. If all triggers succeed, the `FROM` instruction - completes and the build continues as usual. -4. Triggers are cleared from the final image after being executed. In - other words they are not inherited by "grand-children" builds. - -For example you might add something like this: - - [...] - ONBUILD ADD . /app/src - ONBUILD RUN /usr/local/bin/python-build --dir /app/src - [...] - -> **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. - -> **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. - -## STOPSIGNAL - - STOPSIGNAL signal - -The `STOPSIGNAL` instruction sets the system call signal that will be sent to the container to exit. -This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, -or a signal name in the format SIGNAME, for instance SIGKILL. - -## HEALTHCHECK - -The `HEALTHCHECK` instruction has two forms: - -* `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) -* `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) - -The `HEALTHCHECK` instruction tells Docker how to test a container to check that -it is still working. This can detect cases such as a web server that is stuck in -an infinite loop and unable to handle new connections, even though the server -process is still running. - -When a container has a healthcheck specified, it has a _health status_ in -addition to its normal status. This status is initially `starting`. Whenever a -health check passes, it becomes `healthy` (whatever state it was previously in). -After a certain number of consecutive failures, it becomes `unhealthy`. - -The options that can appear before `CMD` are: - -* `--interval=DURATION` (default: `30s`) -* `--timeout=DURATION` (default: `30s`) -* `--start-period=DURATION` (default: `0s`) -* `--retries=N` (default: `3`) - -The health check will first run **interval** seconds after the container is -started, and then again **interval** seconds after each previous check completes. - -If a single run of the check takes longer than **timeout** seconds then the check -is considered to have failed. - -It takes **retries** consecutive failures of the health check for the container -to be considered `unhealthy`. - -**start period** provides initialization time for containers that need time to bootstrap. -Probe failure during that period will not be counted towards the maximum number of retries. -However, if a health check succeeds during the start period, the container is considered -started and all consecutive failures will be counted towards the maximum number of retries. - -There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list -more than one then only the last `HEALTHCHECK` will take effect. - -The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK -CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; -see e.g. `ENTRYPOINT` for details). - -The command's exit status indicates the health status of the container. -The possible values are: - -- 0: success - the container is healthy and ready for use -- 1: unhealthy - the container is not working correctly -- 2: reserved - do not use this exit code - -For example, to check every five minutes or so that a web-server is able to -serve the site's main page within three seconds: - - HEALTHCHECK --interval=5m --timeout=3s \ - CMD curl -f http://localhost/ || exit 1 - -To help debug failing probes, any output text (UTF-8 encoded) that the command writes -on stdout or stderr will be stored in the health status and can be queried with -`docker inspect`. Such output should be kept short (only the first 4096 bytes -are stored currently). - -When the health status of a container changes, a `health_status` event is -generated with the new status. - -The `HEALTHCHECK` feature was added in Docker 1.12. - - -## SHELL - - SHELL ["executable", "parameters"] - -The `SHELL` instruction allows the default shell used for the *shell* form of -commands to be overridden. The default shell on Linux is `["/bin/sh", "-c"]`, and on -Windows is `["cmd", "/S", "/C"]`. The `SHELL` instruction *must* be written in JSON -form in a Dockerfile. - -The `SHELL` instruction is particularly useful on Windows where there are -two commonly used and quite different native shells: `cmd` and `powershell`, as -well as alternate shells available including `sh`. - -The `SHELL` instruction can appear multiple times. Each `SHELL` instruction overrides -all previous `SHELL` instructions, and affects all subsequent instructions. For example: - - FROM microsoft/windowsservercore - - # Executed as cmd /S /C echo default - RUN echo default - - # Executed as cmd /S /C powershell -command Write-Host default - RUN powershell -command Write-Host default - - # Executed as powershell -command Write-Host hello - SHELL ["powershell", "-command"] - RUN Write-Host hello - - # Executed as cmd /S /C echo hello - SHELL ["cmd", "/S"", "/C"] - RUN echo hello - -The following instructions can be affected by the `SHELL` instruction when the -*shell* form of them is used in a Dockerfile: `RUN`, `CMD` and `ENTRYPOINT`. - -The following example is a common pattern found on Windows which can be -streamlined by using the `SHELL` instruction: - - ... - RUN powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" - ... - -The command invoked by docker will be: - - cmd /S /C powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" - -This is inefficient for two reasons. First, there is an un-necessary cmd.exe command -processor (aka shell) being invoked. Second, each `RUN` instruction in the *shell* -form requires an extra `powershell -command` prefixing the command. - -To make this more efficient, one of two mechanisms can be employed. One is to -use the JSON form of the RUN command such as: - - ... - RUN ["powershell", "-command", "Execute-MyCmdlet", "-param1 \"c:\\foo.txt\""] - ... - -While the JSON form is unambiguous and does not use the un-necessary cmd.exe, -it does require more verbosity through double-quoting and escaping. The alternate -mechanism is to use the `SHELL` instruction and the *shell* form, -making a more natural syntax for Windows users, especially when combined with -the `escape` parser directive: - - # escape=` - - FROM microsoft/nanoserver - SHELL ["powershell","-command"] - RUN New-Item -ItemType Directory C:\Example - ADD Execute-MyCmdlet.ps1 c:\example\ - RUN c:\example\Execute-MyCmdlet -sample 'hello world' - -Resulting in: - - PS E:\docker\build\shell> docker build -t shell . - Sending build context to Docker daemon 4.096 kB - Step 1/5 : FROM microsoft/nanoserver - ---> 22738ff49c6d - Step 2/5 : SHELL powershell -command - ---> Running in 6fcdb6855ae2 - ---> 6331462d4300 - Removing intermediate container 6fcdb6855ae2 - Step 3/5 : RUN New-Item -ItemType Directory C:\Example - ---> Running in d0eef8386e97 - - - Directory: C:\ - - - Mode LastWriteTime Length Name - ---- ------------- ------ ---- - d----- 10/28/2016 11:26 AM Example - - - ---> 3f2fbf1395d9 - Removing intermediate container d0eef8386e97 - Step 4/5 : ADD Execute-MyCmdlet.ps1 c:\example\ - ---> a955b2621c31 - Removing intermediate container b825593d39fc - Step 5/5 : RUN c:\example\Execute-MyCmdlet 'hello world' - ---> Running in be6d8e63fe75 - hello world - ---> 8e559e9bf424 - Removing intermediate container be6d8e63fe75 - Successfully built 8e559e9bf424 - PS E:\docker\build\shell> - -The `SHELL` instruction could also be used to modify the way in which -a shell operates. For example, using `SHELL cmd /S /C /V:ON|OFF` on Windows, delayed -environment variable expansion semantics could be modified. - -The `SHELL` instruction can also be used on Linux should an alternate shell be -required such as `zsh`, `csh`, `tcsh` and others. - -The `SHELL` feature was added in Docker 1.12. - -## Dockerfile examples - -Below you can see some examples of Dockerfile syntax. If you're interested in -something more realistic, take a look at the list of [Dockerization examples](https://docs.docker.com/engine/examples/). - -``` -# Nginx -# -# VERSION 0.0.1 - -FROM ubuntu -LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0" -RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server -``` - -``` -# Firefox over VNC -# -# VERSION 0.3 - -FROM ubuntu - -# Install vnc, xvfb in order to create a 'fake' display and firefox -RUN apt-get update && apt-get install -y x11vnc xvfb firefox -RUN mkdir ~/.vnc -# Setup a password -RUN x11vnc -storepasswd 1234 ~/.vnc/passwd -# Autostart firefox (might not be the best way, but it does the trick) -RUN bash -c 'echo "firefox" >> /.bashrc' - -EXPOSE 5900 -CMD ["x11vnc", "-forever", "-usepw", "-create"] -``` - -``` -# Multiple images example -# -# VERSION 0.1 - -FROM ubuntu -RUN echo foo > bar -# Will output something like ===> 907ad6c2736f - -FROM ubuntu -RUN echo moo > oink -# Will output something like ===> 695d7793cbe4 - -# You'll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with -# /oink. -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/attach.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/attach.md deleted file mode 100644 index 34ae39054..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/attach.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: "attach" -description: "The attach command description and usage" -keywords: "attach, running, container" ---- - - - -# attach - -```markdown -Usage: docker attach [OPTIONS] CONTAINER - -Attach local standard input, output, and error streams to a running container - -Options: - --detach-keys string Override the key sequence for detaching a container - --help Print usage - --no-stdin Do not attach STDIN - --sig-proxy Proxy all received signals to the process (default true) -``` - -## Description - -Use `docker attach` to attach your terminal's standard input, output, and error -(or any combination of the three) to a running container using the container's -ID or name. This allows you to view its ongoing output or to control it -interactively, as though the commands were running directly in your terminal. - -You can attach to the same contained process multiple times simultaneously, -even as a different user with the appropriate permissions. - -To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the -container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to -the container. You can detach from a container and leave it running using the - `CTRL-p CTRL-q` key sequence. - -> **Note:** -> A process running as PID 1 inside a container is treated specially by -> Linux: it ignores any signal with the default action. So, the process -> will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do -> so. - -It is forbidden to redirect the standard input of a `docker attach` command -while attaching to a tty-enabled container (i.e.: launched with `-t`). - -While a client is connected to container's stdio using `docker attach`, Docker -uses a ~1MB memory buffer to maximize the throughput of the application. If -this buffer is filled, the speed of the API connection will start to have an -effect on the process output writing speed. This is similar to other -applications like SSH. Because of this, it is not recommended to run -performance critical applications that generate a lot of output in the -foreground over a slow client connection. Instead, users should use the -`docker logs` command to get access to the logs. - -### Override the detach sequence - -If you want, you can configure an override the Docker key sequence for detach. -This is useful if the Docker default sequence conflicts with key sequence you -use for other applications. There are two ways to define your own detach key -sequence, as a per-container override or as a configuration property on your -entire configuration. - -To override the sequence for an individual container, use the -`--detach-keys=""` flag with the `docker attach` command. The format of -the `` is either a letter [a-Z], or the `ctrl-` combined with any of -the following: - -* `a-z` (a single lowercase alpha character ) -* `@` (at sign) -* `[` (left bracket) -* `\\` (two backward slashes) -* `_` (underscore) -* `^` (caret) - -These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key -sequences. To configure a different configuration default key sequence for all -containers, see [**Configuration file** section](cli.md#configuration-files). - -## Examples - -### Attach to and detach from a running container - -```bash -$ docker run -d --name topdemo ubuntu /usr/bin/top -b - -$ docker attach topdemo - -top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 -Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie -Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st -Mem: 373572k total, 355560k used, 18012k free, 27872k buffers -Swap: 786428k total, 0k used, 786428k free, 221740k cached - -PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top - - top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355244k used, 18328k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221776k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top - - - top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355780k used, 17792k free, 27880k buffers - Swap: 786428k total, 0k used, 786428k free, 221776k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top -^C$ - -$ echo $? -0 -$ docker ps -a | grep topdemo - -7998ac8581f9 ubuntu:14.04 "/usr/bin/top -b" 38 seconds ago Exited (0) 21 seconds ago topdemo -``` - -### Get the exit code of the container's command - -And in this second example, you can see the exit code returned by the `bash` -process is returned by the `docker attach` command to its caller too: - -```bash - $ docker run --name test -d -it debian - - 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab - - $ docker attach test - - root@f38c87f2a42d:/# exit 13 - - exit - - $ echo $? - - 13 - - $ docker ps -a | grep test - - 275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/build.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/build.md deleted file mode 100644 index 4f5d3c1a3..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/build.md +++ /dev/null @@ -1,550 +0,0 @@ ---- -title: "build" -description: "The build command description and usage" -keywords: "build, docker, image" ---- - - - -# build - -```markdown -Usage: docker build [OPTIONS] PATH | URL | - - -Build an image from a Dockerfile - -Options: - --add-host value Add a custom host-to-IP mapping (host:ip) (default []) - --build-arg value Set build-time variables (default []) - --cache-from value Images to consider as cache sources (default []) - --cgroup-parent string Optional parent cgroup for the container - --compress Compress the build context using gzip - --cpu-period int Limit the CPU CFS (Completely Fair Scheduler) period - --cpu-quota int Limit the CPU CFS (Completely Fair Scheduler) quota - -c, --cpu-shares int CPU shares (relative weight) - --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) - --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) - --disable-content-trust Skip image verification (default true) - -f, --file string Name of the Dockerfile (Default is 'PATH/Dockerfile') - --force-rm Always remove intermediate containers - --help Print usage - --isolation string Container isolation technology - --label value Set metadata for an image (default []) - -m, --memory string Memory limit - --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap - --network string Set the networking mode for the RUN instructions during build - 'bridge': use default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack - '|': connect to a user-defined network - --no-cache Do not use cache when building the image - --pull Always attempt to pull a newer version of the image - -q, --quiet Suppress the build output and print image ID on success - --rm Remove intermediate containers after a successful build (default true) - --security-opt value Security Options (default []) - --shm-size bytes Size of /dev/shm - The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), - or `g` (gigabytes). If you omit the unit, the system uses bytes. - --squash Squash newly built layers into a single new layer (**Experimental Only**) - -t, --tag value Name and optionally a tag in the 'name:tag' format (default []) - --ulimit value Ulimit options (default []) -``` - -## Description - -Builds Docker images from a Dockerfile and a "context". A build's context is -the files located in the specified `PATH` or `URL`. The build process can refer -to any of the files in the context. For example, your build can use an -[*ADD*](../builder.md#add) instruction to reference a file in the -context. - -The `URL` parameter can refer to three kinds of resources: Git repositories, -pre-packaged tarball contexts and plain text files. - -### Git repositories - -When the `URL` parameter points to the location of a Git repository, the -repository acts as the build context. The system recursively clones the -repository and its submodules using a `git clone --depth 1 --recursive` -command. This command runs in a temporary directory on your local host. After -the command succeeds, the directory is sent to the Docker daemon as the -context. Local clones give you the ability to access private repositories using -local user credentials, VPN's, and so forth. - -Git URLs accept context configuration in their fragment section, separated by a -colon `:`. The first part represents the reference that Git will check out, -this can be either a branch, a tag, or a commit SHA. The second part represents -a subdirectory inside the repository that will be used as a build context. - -For example, run this command to use a directory called `docker` in the branch -`container`: - -```bash -$ docker build https://github.com/docker/rootfs.git#container:docker -``` - -The following table represents all the valid suffixes with their build -contexts: - -Build Syntax Suffix | Commit Used | Build Context Used ---------------------------------|-----------------------|------------------- -`myrepo.git` | `refs/heads/master` | `/` -`myrepo.git#mytag` | `refs/tags/mytag` | `/` -`myrepo.git#mybranch` | `refs/heads/mybranch` | `/` -`myrepo.git#abcdef` | `sha1 = abcdef` | `/` -`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` -`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` -`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` -`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` -`myrepo.git#abcdef:myfolder` | `sha1 = abcdef` | `/myfolder` - - -### Tarball contexts - -If you pass an URL to a remote tarball, the URL itself is sent to the daemon: - -```bash -$ docker build http://server/context.tar.gz -``` - -The download operation will be performed on the host the Docker daemon is -running on, which is not necessarily the same host from which the build command -is being issued. The Docker daemon will fetch `context.tar.gz` and use it as the -build context. Tarball contexts must be tar archives conforming to the standard -`tar` UNIX format and can be compressed with any one of the 'xz', 'bzip2', -'gzip' or 'identity' (no compression) formats. - -### Text files - -Instead of specifying a context, you can pass a single `Dockerfile` in the -`URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`: - -```bash -$ docker build - < Dockerfile -``` - -With Powershell on Windows, you can run: - -```powershell -Get-Content Dockerfile | docker build - -``` - -If you use `STDIN` or specify a `URL` pointing to a plain text file, the system -places the contents into a file called `Dockerfile`, and any `-f`, `--file` -option is ignored. In this scenario, there is no context. - -By default the `docker build` command will look for a `Dockerfile` at the root -of the build context. The `-f`, `--file`, option lets you specify the path to -an alternative file to use instead. This is useful in cases where the same set -of files are used for multiple builds. The path must be to a file within the -build context. If a relative path is specified then it is interpreted as -relative to the root of the context. - -In most cases, it's best to put each Dockerfile in an empty directory. Then, -add to that directory only the files needed for building the Dockerfile. To -increase the build's performance, you can exclude files and directories by -adding a `.dockerignore` file to that directory as well. For information on -creating one, see the [.dockerignore file](../builder.md#dockerignore-file). - -If the Docker client loses connection to the daemon, the build is canceled. -This happens if you interrupt the Docker client with `CTRL-c` or if the Docker -client is killed for any reason. If the build initiated a pull which is still -running at the time the build is cancelled, the pull is cancelled as well. - -## Return code - -On a successful build, a return code of success `0` will be returned. When the -build fails, a non-zero failure code will be returned. - -There should be informational output of the reason for failure output to -`STDERR`: - -```bash -$ docker build -t fail . - -Sending build context to Docker daemon 2.048 kB -Sending build context to Docker daemon -Step 1/3 : FROM busybox - ---> 4986bf8c1536 -Step 2/3 : RUN exit 13 - ---> Running in e26670ec7a0a -INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13 -$ echo $? -1 -``` - -See also: - -[*Dockerfile Reference*](../builder.md). - -## Examples - -### Build with PATH - -```bash -$ docker build . - -Uploading context 10240 bytes -Step 1/3 : FROM busybox -Pulling repository busybox - ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ -Step 2/3 : RUN ls -lh / - ---> Running in 9c9e81692ae9 -total 24 -drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin -drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev -drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc -drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib -lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib -dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc -lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin -dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys -drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp -drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr - ---> b35f4035db3f -Step 3/3 : CMD echo Hello world - ---> Running in 02071fceb21b - ---> f52f38b7823e -Successfully built f52f38b7823e -Removing intermediate container 9c9e81692ae9 -Removing intermediate container 02071fceb21b -``` - -This example specifies that the `PATH` is `.`, and so all the files in the -local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies -where to find the files for the "context" of the build on the Docker daemon. -Remember that the daemon could be running on a remote machine and that no -parsing of the Dockerfile happens at the client side (where you're running -`docker build`). That means that *all* the files at `PATH` get sent, not just -the ones listed to [*ADD*](../builder.md#add) in the Dockerfile. - -The transfer of context from the local machine to the Docker daemon is what the -`docker` client means when you see the "Sending build context" message. - -If you wish to keep the intermediate containers after the build is complete, -you must use `--rm=false`. This does not affect the build cache. - -### Build with URL - -```bash -$ docker build github.com/creack/docker-firefox -``` - -This will clone the GitHub repository and use the cloned repository as context. -The Dockerfile at the root of the repository is used as Dockerfile. You can -specify an arbitrary Git repository by using the `git://` or `git@` scheme. - -```bash -$ docker build -f ctx/Dockerfile http://server/ctx.tar.gz - -Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B -Step 1/3 : FROM busybox - ---> 8c2e06607696 -Step 2/3 : ADD ctx/container.cfg / - ---> e7829950cee3 -Removing intermediate container b35224abf821 -Step 3/3 : CMD /bin/ls - ---> Running in fbc63d321d73 - ---> 3286931702ad -Removing intermediate container fbc63d321d73 -Successfully built 377c409b35e4 -``` - -This sends the URL `http://server/ctx.tar.gz` to the Docker daemon, which -downloads and extracts the referenced tarball. The `-f ctx/Dockerfile` -parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` that is used -to build the image. Any `ADD` commands in that `Dockerfile` that refers to local -paths must be relative to the root of the contents inside `ctx.tar.gz`. In the -example above, the tarball contains a directory `ctx/`, so the `ADD -ctx/container.cfg /` operation works as expected. - -### Build with - - -```bash -$ docker build - < Dockerfile -``` - -This will read a Dockerfile from `STDIN` without context. Due to the lack of a -context, no contents of any local directory will be sent to the Docker daemon. -Since there is no context, a Dockerfile `ADD` only works if it refers to a -remote URL. - -```bash -$ docker build - < context.tar.gz -``` - -This will build an image for a compressed context read from `STDIN`. Supported -formats are: bzip2, gzip and xz. - -### Use a .dockerignore file - -```bash -$ docker build . - -Uploading context 18.829 MB -Uploading context -Step 1/2 : FROM busybox - ---> 769b9341d937 -Step 2/2 : CMD echo Hello world - ---> Using cache - ---> 99cc1ad10469 -Successfully built 99cc1ad10469 -$ echo ".git" > .dockerignore -$ docker build . -Uploading context 6.76 MB -Uploading context -Step 1/2 : FROM busybox - ---> 769b9341d937 -Step 2/2 : CMD echo Hello world - ---> Using cache - ---> 99cc1ad10469 -Successfully built 99cc1ad10469 -``` - -This example shows the use of the `.dockerignore` file to exclude the `.git` -directory from the context. Its effect can be seen in the changed size of the -uploaded context. The builder reference contains detailed information on -[creating a .dockerignore file](../builder.md#dockerignore-file) - -### Tag an image (-t) - -```bash -$ docker build -t vieux/apache:2.0 . -``` - -This will build like the previous example, but it will then tag the resulting -image. The repository name will be `vieux/apache` and the tag will be `2.0`. -[Read more about valid tags](tag.md). - -You can apply multiple tags to an image. For example, you can apply the `latest` -tag to a newly built image and add another tag that references a specific -version. -For example, to tag an image both as `whenry/fedora-jboss:latest` and -`whenry/fedora-jboss:v2.1`, use the following: - -```bash -$ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . -``` - -### Specify a Dockerfile (-f) - -```bash -$ docker build -f Dockerfile.debug . -``` - -This will use a file called `Dockerfile.debug` for the build instructions -instead of `Dockerfile`. - -```bash -$ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . -$ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . -``` - -The above commands will build the current build context (as specified by the -`.`) twice, once using a debug version of a `Dockerfile` and once using a -production version. - -```bash -$ cd /home/me/myapp/some/dir/really/deep -$ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp -$ docker build -f ../../../../dockerfiles/debug /home/me/myapp -``` - -These two `docker build` commands do the exact same thing. They both use the -contents of the `debug` file instead of looking for a `Dockerfile` and will use -`/home/me/myapp` as the root of the build context. Note that `debug` is in the -directory structure of the build context, regardless of how you refer to it on -the command line. - -> **Note:** -> `docker build` will return a `no such file or directory` error if the -> file or directory does not exist in the uploaded context. This may -> happen if there is no context, or if you specify a file that is -> elsewhere on the Host system. The context is limited to the current -> directory (and its children) for security reasons, and to ensure -> repeatable builds on remote Docker hosts. This is also the reason why -> `ADD ../file` will not work. - -### Use a custom parent cgroup (--cgroup-parent) - -When `docker build` is run with the `--cgroup-parent` option the containers -used in the build will be run with the [corresponding `docker run` -flag](../run.md#specifying-custom-cgroups). - -### Set ulimits in container (--ulimit) - -Using the `--ulimit` option with `docker build` will cause each build step's -container to be started using those [`--ulimit` -flag values](./run.md#set-ulimits-in-container-ulimit). - -### Set build-time variables (--build-arg) - -You can use `ENV` instructions in a Dockerfile to define variable -values. These values persist in the built image. However, often -persistence is not what you want. Users want to specify variables differently -depending on which host they build an image on. - -A good example is `http_proxy` or source versions for pulling intermediate -files. The `ARG` instruction lets Dockerfile authors define values that users -can set at build-time using the `--build-arg` flag: - -```bash -$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 . -``` - -This flag allows you to pass the build-time variables that are -accessed like regular environment variables in the `RUN` instruction of the -Dockerfile. Also, these values don't persist in the intermediate or final images -like `ENV` values do. - -Using this flag will not alter the output you see when the `ARG` lines from the -Dockerfile are echoed during the build process. - -For detailed information on using `ARG` and `ENV` instructions, see the -[Dockerfile reference](../builder.md). - -### Optional security options (--security-opt) - -This flag is only supported on a daemon running on Windows, and only supports -the `credentialspec` option. The `credentialspec` must be in the format -`file://spec.txt` or `registry://keyname`. - -### Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation=` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. On Microsoft Windows, you can specify these values: - - -| Value | Description | -|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | -| `process` | Namespace isolation only. | -| `hyperv` | Hyper-V hypervisor partition-based isolation. | - -Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. - -### Add entries to container hosts file (--add-host) - -You can add other hosts into a container's `/etc/hosts` file by using one or -more `--add-host` flags. This example adds a static address for a host named -`docker`: - - $ docker build --add-host=docker:10.180.0.1 . - -### Squash an image's layers (--squash) **Experimental Only** - -#### Overview - -Once the image is built, squash the new layers into a new image with a single -new layer. Squashing does not destroy any existing image, rather it creates a new -image with the content of the squashed layers. This effectively makes it look -like all `Dockerfile` commands were created with a single layer. The build -cache is preserved with this method. - -**Note**: using this option means the new image will not be able to take -advantage of layer sharing with other images and may use significantly more -space. - -**Note**: using this option you may see significantly more space used due to -storing two copies of the image, one for the build cache with all the cache -layers in tact, and one for the squashed version. - -#### Prerequisites - -The example on this page is using experimental mode in Docker 1.13. - -Experimental mode can be enabled by using the `--experimental` flag when starting the Docker daemon or setting `experimental: true` in the `daemon.json` configuration file. - -By default, experimental mode is disabled. To see the current configuration, use the `docker version` command. - -```none - -Server: - Version: 1.13.1 - API version: 1.26 (minimum version 1.12) - Go version: go1.7.5 - Git commit: 092cba3 - Built: Wed Feb 8 06:35:24 2017 - OS/Arch: linux/amd64 - Experimental: false - - [...] - -``` - -To enable experimental mode, users need to restart the docker daemon with the experimental flag enabled. - -#### Enable Docker experimental - -Experimental features are now included in the standard Docker binaries as of version 1.13.0. For enabling experimental features, you need to start the Docker daemon with `--experimental` flag. You can also enable the daemon flag via /etc/docker/daemon.json. e.g. - -``` - -{ - "experimental": true -} - -``` -Then make sure the experimental flag is enabled: - -```bash - -$ docker version -f '{{.Server.Experimental}}' -true - -``` - -#### Build an image with `--squash` argument - -The following is an example of docker build with `--squash` argument - -```Dockerfile - -FROM busybox -RUN echo hello > /hello -RUN echo world >> /hello -RUN touch remove_me /remove_me -ENV HELLO world -RUN rm /remove_me - -``` -An image named `test` is built with `--squash` argument. - -```bash - -$ docker build --squash -t test . - -[...] - -``` - -If everything is right, the history will look like this: - -```bash -$ docker history test - -IMAGE CREATED CREATED BY SIZE COMMENT -4e10cb5b4cac 3 seconds ago 12 B merge sha256:88a7b0112a41826885df0e7072698006ee8f621c6ab99fca7fe9151d7b599702 to sha256:47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb - 5 minutes ago /bin/sh -c rm /remove_me 0 B - 5 minutes ago /bin/sh -c #(nop) ENV HELLO=world 0 B - 5 minutes ago /bin/sh -c touch remove_me /remove_me 0 B - 5 minutes ago /bin/sh -c echo world >> /hello 0 B - 6 minutes ago /bin/sh -c echo hello > /hello 0 B - 7 weeks ago /bin/sh -c #(nop) CMD ["sh"] 0 B - 7 weeks ago /bin/sh -c #(nop) ADD file:47ca6e777c36a4cfff 1.113 MB - -``` -We could find that all layer's name is ``, and there is a new layer with COMMENT `merge`. - -Test the image, check for `/remove_me` being gone, make sure `hello\nworld` is in `/hello`, make sure the `HELLO` envvar's value is `world`. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/cli.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/cli.md deleted file mode 100644 index a8529e728..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/cli.md +++ /dev/null @@ -1,308 +0,0 @@ ---- -title: "Use the Docker command line" -description: "Docker's CLI command description and usage" -keywords: "Docker, Docker documentation, CLI, command line" ---- - - - -# docker - -To list available commands, either run `docker` with no parameters -or execute `docker help`: - -```bash -$ docker -Usage: docker [OPTIONS] COMMAND [ARG...] - docker [ --help | -v | --version ] - -A self-sufficient runtime for containers. - -Options: - --config string Location of client config files (default "/root/.docker") - -D, --debug Enable debug mode - --help Print usage - -H, --host value Daemon socket(s) to connect to (default []) - -l, --log-level string Set the logging level ("debug"|"info"|"warn"|"error"|"fatal") (default "info") - --tls Use TLS; implied by --tlsverify - --tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem") - --tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem") - --tlskey string Path to TLS key file (default "/root/.docker/key.pem") - --tlsverify Use TLS and verify the remote - -v, --version Print version information and quit - -Commands: - attach Attach to a running container - # […] -``` - -## Description - -Depending on your Docker system configuration, you may be required to preface -each `docker` command with `sudo`. To avoid having to use `sudo` with the -`docker` command, your system administrator can create a Unix group called -`docker` and add users to it. - -For more information about installing Docker or `sudo` configuration, refer to -the [installation](https://docs.docker.com/engine/installation/) instructions for your operating system. - -### Environment variables - -For easy reference, the following list of environment variables are supported -by the `docker` command line: - -* `DOCKER_API_VERSION` The API version to use (e.g. `1.19`) -* `DOCKER_CONFIG` The location of your client configuration files. -* `DOCKER_CERT_PATH` The location of your authentication keys. -* `DOCKER_DRIVER` The graph driver to use. -* `DOCKER_HOST` Daemon socket to connect to. -* `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is - unsuitable for Docker. -* `DOCKER_RAMDISK` If set this will disable 'pivot_root'. -* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote. -* `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images. - Equates to `--disable-content-trust=false` for build, create, pull, push, run. -* `DOCKER_CONTENT_TRUST_SERVER` The URL of the Notary server to use. This defaults - to the same URL as the registry. -* `DOCKER_HIDE_LEGACY_COMMANDS` When set, Docker hides "legacy" top-level commands (such as `docker rm`, and - `docker pull`) in `docker help` output, and only `Management commands` per object-type (e.g., `docker container`) are - printed. This may become the default in a future release, at which point this environment-variable is removed. -* `DOCKER_TMPDIR` Location for temporary Docker files. - -Because Docker is developed using Go, you can also use any environment -variables used by the Go runtime. In particular, you may find these useful: - -* `HTTP_PROXY` -* `HTTPS_PROXY` -* `NO_PROXY` - -These Go environment variables are case-insensitive. See the -[Go specification](http://golang.org/pkg/net/http/) for details on these -variables. - -### Configuration files - -By default, the Docker command line stores its configuration files in a -directory called `.docker` within your `$HOME` directory. However, you can -specify a different location via the `DOCKER_CONFIG` environment variable -or the `--config` command line option. If both are specified, then the -`--config` option overrides the `DOCKER_CONFIG` environment variable. -For example: - - docker --config ~/testconfigs/ ps - -Instructs Docker to use the configuration files in your `~/testconfigs/` -directory when running the `ps` command. - -Docker manages most of the files in the configuration directory -and you should not modify them. However, you *can modify* the -`config.json` file to control certain aspects of how the `docker` -command behaves. - -Currently, you can modify the `docker` command behavior using environment -variables or command-line options. You can also use options within -`config.json` to modify some of the same behavior. When using these -mechanisms, you must keep in mind the order of precedence among them. Command -line options override environment variables and environment variables override -properties you specify in a `config.json` file. - -The `config.json` file stores a JSON encoding of several properties: - -The property `HttpHeaders` specifies a set of headers to include in all messages -sent from the Docker client to the daemon. Docker does not try to interpret or -understand these header; it simply puts them into the messages. Docker does -not allow these headers to change any headers it sets for itself. - -The property `psFormat` specifies the default format for `docker ps` output. -When the `--format` flag is not provided with the `docker ps` command, -Docker's client uses this property. If this property is not set, the client -falls back to the default table format. For a list of supported formatting -directives, see the -[**Formatting** section in the `docker ps` documentation](ps.md) - -The property `imagesFormat` specifies the default format for `docker images` output. -When the `--format` flag is not provided with the `docker images` command, -Docker's client uses this property. If this property is not set, the client -falls back to the default table format. For a list of supported formatting -directives, see the [**Formatting** section in the `docker images` documentation](images.md) - -The property `pluginsFormat` specifies the default format for `docker plugin ls` output. -When the `--format` flag is not provided with the `docker plugin ls` command, -Docker's client uses this property. If this property is not set, the client -falls back to the default table format. For a list of supported formatting -directives, see the [**Formatting** section in the `docker plugin ls` documentation](plugin_ls.md) - -The property `servicesFormat` specifies the default format for `docker -service ls` output. When the `--format` flag is not provided with the -`docker service ls` command, Docker's client uses this property. If this -property is not set, the client falls back to the default json format. For a -list of supported formatting directives, see the -[**Formatting** section in the `docker service ls` documentation](service_ls.md) - -The property `serviceInspectFormat` specifies the default format for `docker -service inspect` output. When the `--format` flag is not provided with the -`docker service inspect` command, Docker's client uses this property. If this -property is not set, the client falls back to the default json format. For a -list of supported formatting directives, see the -[**Formatting** section in the `docker service inspect` documentation](service_inspect.md) - -The property `statsFormat` specifies the default format for `docker -stats` output. When the `--format` flag is not provided with the -`docker stats` command, Docker's client uses this property. If this -property is not set, the client falls back to the default table -format. For a list of supported formatting directives, see -[**Formatting** section in the `docker stats` documentation](stats.md) - -The property `secretFormat` specifies the default format for `docker -secret ls` output. When the `--format` flag is not provided with the -`docker secret ls` command, Docker's client uses this property. If this -property is not set, the client falls back to the default table -format. For a list of supported formatting directives, see -[**Formatting** section in the `docker secret ls` documentation](secret_ls.md) - -The property `nodesFormat` specifies the default format for `docker node ls` output. -When the `--format` flag is not provided with the `docker node ls` command, -Docker's client uses the value of `nodesFormat`. If the value of `nodesFormat` is not set, -the client uses the default table format. For a list of supported formatting -directives, see the [**Formatting** section in the `docker node ls` documentation](node_ls.md) - -The property `credsStore` specifies an external binary to serve as the default -credential store. When this property is set, `docker login` will attempt to -store credentials in the binary specified by `docker-credential-` which -is visible on `$PATH`. If this property is not set, credentials will be stored -in the `auths` property of the config. For more information, see the -[**Credentials store** section in the `docker login` documentation](login.md#credentials-store) - -The property `credHelpers` specifies a set of credential helpers to use -preferentially over `credsStore` or `auths` when storing and retrieving -credentials for specific registries. If this property is set, the binary -`docker-credential-` will be used when storing or retrieving credentials -for a specific registry. For more information, see the -[**Credential helpers** section in the `docker login` documentation](login.md#credential-helpers) - -Once attached to a container, users detach from it and leave it running using -the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable -using the `detachKeys` property. Specify a `` value for the -property. The format of the `` is a comma-separated list of either -a letter [a-Z], or the `ctrl-` combined with any of the following: - -* `a-z` (a single lowercase alpha character ) -* `@` (at sign) -* `[` (left bracket) -* `\\` (two backward slashes) -* `_` (underscore) -* `^` (caret) - -Your customization applies to all containers started in with your Docker client. -Users can override your custom or the default key sequence on a per-container -basis. To do this, the user specifies the `--detach-keys` flag with the `docker -attach`, `docker exec`, `docker run` or `docker start` command. - -Following is a sample `config.json` file: - -```json -{ - "HttpHeaders": { - "MyHeader": "MyValue" - }, - "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", - "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", - "pluginsFormat": "table {{.ID}}\t{{.Name}}\t{{.Enabled}}", - "statsFormat": "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", - "servicesFormat": "table {{.ID}}\t{{.Name}}\t{{.Mode}}", - "secretFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}", - "serviceInspectFormat": "pretty", - "nodesFormat": "table {{.ID}}\t{{.Hostname}}\t{{.Availability}}", - "detachKeys": "ctrl-e,e", - "credsStore": "secretservice", - "credHelpers": { - "awesomereg.example.org": "hip-star", - "unicorn.example.com": "vcbait" - } -} -``` - -### Notary - -If using your own notary server and a self-signed certificate or an internal -Certificate Authority, you need to place the certificate at -`tls//ca.crt` in your docker config directory. - -Alternatively you can trust the certificate globally by adding it to your system's -list of root Certificate Authorities. - -## Examples - -### Display help text - -To list the help on any command just execute the command, followed by the -`--help` option. - - $ docker run --help - - Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] - - Run a command in a new container - - Options: - --add-host value Add a custom host-to-IP mapping (host:ip) (default []) - -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) - ... - -### Option types - -Single character command line options can be combined, so rather than -typing `docker run -i -t --name test busybox sh`, -you can write `docker run -it --name test busybox sh`. - -#### Boolean - -Boolean options take the form `-d=false`. The value you see in the help text is -the default value which is set if you do **not** specify that flag. If you -specify a Boolean flag without a value, this will set the flag to `true`, -irrespective of the default value. - -For example, running `docker run -d` will set the value to `true`, so your -container **will** run in "detached" mode, in the background. - -Options which default to `true` (e.g., `docker build --rm=true`) can only be -set to the non-default value by explicitly setting them to `false`: - -```bash -$ docker build --rm=false . -``` - -#### Multi - -You can specify options like `-a=[]` multiple times in a single command line, -for example in these commands: - -```bash -$ docker run -a stdin -a stdout -i -t ubuntu /bin/bash - -$ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls -``` - -Sometimes, multiple options can call for a more complex value string as for -`-v`: - -```bash -$ docker run -v /host:/container example/mysql -``` - -> **Note**: Do not use the `-t` and `-a stderr` options together due to -> limitations in the `pty` implementation. All `stderr` in `pty` mode -> simply goes to `stdout`. - -#### Strings and Integers - -Options like `--name=""` expect a string, and they -can only be specified once. Options like `-c=0` -expect an integer, and they can only be specified once. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/commit.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/commit.md deleted file mode 100644 index f713eeab9..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/commit.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: "commit" -description: "The commit command description and usage" -keywords: "commit, file, changes" ---- - - - -# commit - -```markdown -Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] - -Create a new image from a container's changes - -Options: - -a, --author string Author (e.g., "John Hannibal Smith ") - -c, --change value Apply Dockerfile instruction to the created image (default []) - --help Print usage - -m, --message string Commit message - -p, --pause Pause container during commit (default true) -``` - -## Description - -It can be useful to commit a container's file changes or settings into a new -image. This allows you to debug a container by running an interactive shell, or to -export a working dataset to another server. Generally, it is better to use -Dockerfiles to manage your images in a documented and maintainable way. -[Read more about valid image names and tags](tag.md). - -The commit operation will not include any data contained in -volumes mounted inside the container. - -By default, the container being committed and its processes will be paused -while the image is committed. This reduces the likelihood of encountering data -corruption during the process of creating the commit. If this behavior is -undesired, set the `--pause` option to false. - -The `--change` option will apply `Dockerfile` instructions to the image that is -created. Supported `Dockerfile` instructions: -`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - -## Examples - -### Commit a container - -```bash -$ docker ps - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky -197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton - -$ docker commit c3f279d17e0a svendowideit/testimage:version3 - -f5283438590d - -$ docker images - -REPOSITORY TAG ID CREATED SIZE -svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB -``` - -### Commit a container with new configurations - -```bash -$ docker ps - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky -197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton - -$ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a - -[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] - -$ docker commit --change "ENV DEBUG true" c3f279d17e0a svendowideit/testimage:version3 - -f5283438590d - -$ docker inspect -f "{{ .Config.Env }}" f5283438590d - -[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] -``` - -### Commit a container with new `CMD` and `EXPOSE` instructions - -```bash -$ docker ps - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky -197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton - -$ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 - -f5283438590d - -$ docker run -d svendowideit/testimage:version4 - -89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 - -$ docker ps - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp distracted_fermat -c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky -197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/container.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/container.md deleted file mode 100644 index 5eefbf2c3..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/container.md +++ /dev/null @@ -1,61 +0,0 @@ - ---- -title: "container" -description: "The container command description and usage" -keywords: "container" ---- - - - -# container - -```markdown -Usage: docker container COMMAND - -Manage containers - -Options: - --help Print usage - -Commands: - attach Attach to a running container - commit Create a new image from a container's changes - cp Copy files/folders between a container and the local filesystem - create Create a new container - diff Inspect changes to files or directories on a container's filesystem - exec Run a command in a running container - export Export a container's filesystem as a tar archive - inspect Display detailed information on one or more containers - kill Kill one or more running containers - logs Fetch the logs of a container - ls List containers - pause Pause all processes within one or more containers - port List port mappings or a specific mapping for the container - prune Remove all stopped containers - rename Rename a container - restart Restart one or more containers - rm Remove one or more containers - run Run a command in a new container - start Start one or more stopped containers - stats Display a live stream of container(s) resource usage statistics - stop Stop one or more running containers - top Display the running processes of a container - unpause Unpause all processes within one or more containers - update Update configuration of one or more containers - wait Block until one or more containers stop, then print their exit codes - -Run 'docker container COMMAND --help' for more information on a command. - -``` - -## Description - -Manage containers. - diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md deleted file mode 100644 index 724057cf2..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: "container prune" -description: "Remove all stopped containers" -keywords: container, prune, delete, remove ---- - - - -# container prune - -```markdown -Usage: docker container prune [OPTIONS] - -Remove all stopped containers - -Options: -Options: - --filter filter Provide filter values (e.g. 'until=') - -f, --force Do not prompt for confirmation - --help Print usage -``` - -## Description - -Removes all stopped containers. - -## Examples - -### Prune containers - -```bash -$ docker container prune -WARNING! This will remove all stopped containers. -Are you sure you want to continue? [y/N] y -Deleted Containers: -4a7f7eebae0f63178aff7eb0aa39cd3f0627a203ab2df258c1a00b456cf20063 -f98f9c2aa1eaf727e4ec9c0283bc7d4aa4762fbdba7f26191f26c97f64090360 - -Total reclaimed space: 212 B -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* until (``) - only remove containers created before given timestamp - -The `until` filter can be Unix timestamps, date formatted -timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed -relative to the daemon machine’s time. Supported formats for date -formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the daemon will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. - -The following removes containers created more than 5 minutes ago: - -```bash -$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' - -CONTAINER ID IMAGE COMMAND CREATED AT STATUS -61b9efa71024 busybox "sh" 2017-01-04 13:23:33 -0800 PST Exited (0) 41 seconds ago -53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 12 minutes ago - -$ docker container prune --force --filter "until=5m" - -Deleted Containers: -53a9bc23a5168b6caa2bfbefddf1b30f93c7ad57f3dec271fd32707497cb9369 - -Total reclaimed space: 25 B - -$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' - -CONTAINER ID IMAGE COMMAND CREATED AT STATUS -61b9efa71024 busybox "sh" 2017-01-04 13:23:33 -0800 PST Exited (0) 44 seconds ago -``` - -The following removes containers created before `2017-01-04T13:10:00`: - -```bash -$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' - -CONTAINER ID IMAGE COMMAND CREATED AT STATUS -53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 7 minutes ago -4a75091a6d61 busybox "sh" 2017-01-04 13:09:53 -0800 PST Exited (0) 9 minutes ago - -$ docker container prune --force --filter "until=2017-01-04T13:10:00" - -Deleted Containers: -4a75091a6d618526fcd8b33ccd6e5928ca2a64415466f768a6180004b0c72c6c - -Total reclaimed space: 27 B - -$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' - -CONTAINER ID IMAGE COMMAND CREATED AT STATUS -53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 9 minutes ago -``` - -## Related commands - -* [system df](system_df.md) -* [volume prune](volume_prune.md) -* [image prune](image_prune.md) -* [network prune](network_prune.md) -* [system prune](system_prune.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/cp.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/cp.md deleted file mode 100644 index 5cbbee25a..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/cp.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: "cp" -description: "The cp command description and usage" -keywords: "copy, container, files, folders" ---- - - - -# cp - -```markdown -Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- - docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH - -Copy files/folders between a container and the local filesystem - -Use '-' as the source to read a tar archive from stdin -and extract it to a directory destination in a container. -Use '-' as the destination to stream a tar archive of a -container source to stdout. - -Options: - -L, --follow-link Always follow symbol link in SRC_PATH - --help Print usage -``` - -## Description - -The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. -You can copy from the container's file system to the local machine or the -reverse, from the local filesystem to the container. If `-` is specified for -either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from -`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. -The `SRC_PATH` or `DEST_PATH` can be a file or directory. - -The `docker cp` command assumes container paths are relative to the container's -`/` (root) directory. This means supplying the initial forward slash is optional; -The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and -`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can -be an absolute or relative value. The command interprets a local machine's -relative paths as relative to the current working directory where `docker cp` is -run. - -The `cp` command behaves like the Unix `cp -a` command in that directories are -copied recursively with permissions preserved if possible. Ownership is set to -the user and primary group at the destination. For example, files copied to a -container are created with `UID:GID` of the root user. Files copied to the local -machine are created with the `UID:GID` of the user which invoked the `docker cp` -command. If you specify the `-L` option, `docker cp` follows any symbolic link -in the `SRC_PATH`. `docker cp` does *not* create parent directories for -`DEST_PATH` if they do not exist. - -Assuming a path separator of `/`, a first argument of `SRC_PATH` and second -argument of `DEST_PATH`, the behavior is as follows: - -- `SRC_PATH` specifies a file - - `DEST_PATH` does not exist - - the file is saved to a file created at `DEST_PATH` - - `DEST_PATH` does not exist and ends with `/` - - Error condition: the destination directory must exist. - - `DEST_PATH` exists and is a file - - the destination is overwritten with the source file's contents - - `DEST_PATH` exists and is a directory - - the file is copied into this directory using the basename from - `SRC_PATH` -- `SRC_PATH` specifies a directory - - `DEST_PATH` does not exist - - `DEST_PATH` is created as a directory and the *contents* of the source - directory are copied into this directory - - `DEST_PATH` exists and is a file - - Error condition: cannot copy a directory to a file - - `DEST_PATH` exists and is a directory - - `SRC_PATH` does not end with `/.` (that is: _slash_ followed by _dot_) - - the source directory is copied into this directory - - `SRC_PATH` does end with `/.` (that is: _slash_ followed by _dot_) - - the *content* of the source directory is copied into this - directory - -The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above -rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not -the target, is copied by default. To copy the link target and not the link, specify -the `-L` option. - -A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can -also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local -machine, for example `file:name.txt`. If you use a `:` in a local machine path, -you must be explicit with a relative or absolute path, for example: - - `/path/to/file:name.txt` or `./file:name.txt` - -It is not possible to copy certain system files such as resources under -`/proc`, `/sys`, `/dev`, [tmpfs](run.md#mount-tmpfs-tmpfs), and mounts created by -the user in the container. However, you can still copy such files by manually -running `tar` in `docker exec`. Both of the following examples do the same thing -in different ways (consider `SRC_PATH` and `DEST_PATH` are directories): - -```bash -$ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - -``` - -```bash -$ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - -``` - -Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. -The command extracts the content of the tar to the `DEST_PATH` in container's -filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as -the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/create.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/create.md deleted file mode 100644 index 8a57f2ffe..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/create.md +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: "create" -description: "The create command description and usage" -keywords: "docker, create, container" ---- - - - -# create - -Creates a new container. - -```markdown -Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] - -Create a new container - -Options: - --add-host value Add a custom host-to-IP mapping (host:ip) (default []) - -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) - --blkio-weight value Block IO (relative weight), between 10 and 1000 - --blkio-weight-device value Block IO weight (relative device weight) (default []) - --cap-add value Add Linux capabilities (default []) - --cap-drop value Drop Linux capabilities (default []) - --cgroup-parent string Optional parent cgroup for the container - --cidfile string Write the container ID to the file - --cpu-count int The number of CPUs available for execution by the container. - Windows daemon only. On Windows Server containers, this is - approximated as a percentage of total CPU usage. - --cpu-percent int CPU percent (Windows only) - --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period - --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota - -c, --cpu-shares int CPU shares (relative weight) - --cpus NanoCPUs Number of CPUs (default 0.000) - --cpu-rt-period int Limit the CPU real-time period in microseconds - --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds - --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) - --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) - --device value Add a host device to the container (default []) - --device-cgroup-rule value Add a rule to the cgroup allowed devices list - --device-read-bps value Limit read rate (bytes per second) from a device (default []) - --device-read-iops value Limit read rate (IO per second) from a device (default []) - --device-write-bps value Limit write rate (bytes per second) to a device (default []) - --device-write-iops value Limit write rate (IO per second) to a device (default []) - --disable-content-trust Skip image verification (default true) - --dns value Set custom DNS servers (default []) - --dns-option value Set DNS options (default []) - --dns-search value Set custom DNS search domains (default []) - --entrypoint string Overwrite the default ENTRYPOINT of the image - -e, --env value Set environment variables (default []) - --env-file value Read in a file of environment variables (default []) - --expose value Expose a port or a range of ports (default []) - --group-add value Add additional groups to join (default []) - --health-cmd string Command to run to check health - --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) - --health-retries int Consecutive failures needed to report unhealthy - --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) - --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) (default 0s) - --help Print usage - -h, --hostname string Container host name - --init Run an init inside the container that forwards signals and reaps processes - -i, --interactive Keep STDIN open even if not attached - --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) - --io-maxiops uint Maximum IOps limit for the system drive (Windows only) - --ip string IPv4 address (e.g., 172.30.100.104) - --ip6 string IPv6 address (e.g., 2001:db8::33) - --ipc string IPC namespace to use - --isolation string Container isolation technology - --kernel-memory string Kernel memory limit - -l, --label value Set meta data on a container (default []) - --label-file value Read in a line delimited file of labels (default []) - --link value Add link to another container (default []) - --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) - --log-driver string Logging driver for the container - --log-opt value Log driver options (default []) - --mac-address string Container MAC address (e.g., 92:d0:c6:0a:29:33) - -m, --memory string Memory limit - --memory-reservation string Memory soft limit - --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap - --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) - --mount value Attach a filesytem mount to the container (default []) - --name string Assign a name to the container - --network-alias value Add network-scoped alias for the container (default []) - --network string Connect a container to a network (default "default") - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack - '|': connect to a user-defined network - --no-healthcheck Disable any container-specified HEALTHCHECK - --oom-kill-disable Disable OOM Killer - --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) - --pid string PID namespace to use - --pids-limit int Tune container pids limit (set -1 for unlimited), kernel >= 4.3 - --privileged Give extended privileges to this container - -p, --publish value Publish a container's port(s) to the host (default []) - -P, --publish-all Publish all exposed ports to random ports - --read-only Mount the container's root filesystem as read only - --restart string Restart policy to apply when a container exits (default "no") - Possible values are: no, on-failure[:max-retry], always, unless-stopped - --rm Automatically remove the container when it exits - --runtime string Runtime to use for this container - --security-opt value Security Options (default []) - --shm-size bytes Size of /dev/shm - The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), - or `g` (gigabytes). If you omit the unit, the system uses bytes. - --stop-signal string Signal to stop a container (default "SIGTERM") - --stop-timeout=10 Timeout (in seconds) to stop a container - --storage-opt value Storage driver options for the container (default []) - --sysctl value Sysctl options (default map[]) - --tmpfs value Mount a tmpfs directory (default []) - -t, --tty Allocate a pseudo-TTY - --ulimit value Ulimit options (default []) - -u, --user string Username or UID (format: [:]) - --userns string User namespace to use - 'host': Use the Docker host user namespace - '': Use the Docker daemon user namespace specified by `--userns-remap` option. - --uts string UTS namespace to use - -v, --volume value Bind mount a volume (default []). The format - is `[host-src:]container-dest[:]`. - The comma-delimited `options` are [rw|ro], - [z|Z], [[r]shared|[r]slave|[r]private], - [delegated|cached|consistent], and - [nocopy]. The 'host-src' is an absolute path - or a name value. - --volume-driver string Optional volume driver for the container - --volumes-from value Mount volumes from the specified container(s) (default []) - -w, --workdir string Working directory inside the container -``` -## Description - -The `docker create` command creates a writeable container layer over the -specified image and prepares it for running the specified command. The -container ID is then printed to `STDOUT`. This is similar to `docker run -d` -except the container is never started. You can then use the -`docker start ` command to start the container at any point. - -This is useful when you want to set up a container configuration ahead of time -so that it is ready to start when you need it. The initial status of the -new container is `created`. - -Please see the [run command](run.md) section and the [Docker run reference](../run.md) for more details. - -## Examples - -### Create and start a container - -```bash -$ docker create -t -i fedora bash - -6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 - -$ docker start -a -i 6d8af538ec5 - -bash-4.2# -``` - -### Initialize volumes - -As of v1.4.0 container volumes are initialized during the `docker create` phase -(i.e., `docker run` too). For example, this allows you to `create` the `data` -volume container, and then use it from another container: - -```bash -$ docker create -v /data --name data ubuntu - -240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 - -$ docker run --rm --volumes-from data ubuntu ls -la /data - -total 8 -drwxr-xr-x 2 root root 4096 Dec 5 04:10 . -drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. -``` - -Similarly, `create` a host directory bind mounted volume container, which can -then be used from the subsequent container: - -```bash -$ docker create -v /home/docker:/docker --name docker ubuntu - -9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 - -$ docker run --rm --volumes-from docker ubuntu ls -la /docker - -total 20 -drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . -drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. --rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history --rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc --rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig -drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local --rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile -drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh -drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker -``` - - -Set storage driver options per container. - -```bash -$ docker create -it --storage-opt size=120G fedora /bin/bash -``` - -This (size) will allow to set the container rootfs size to 120G at creation time. -This option is only available for the `devicemapper`, `btrfs`, `overlay2`, -`windowsfilter` and `zfs` graph drivers. -For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, -user cannot pass a size less than the Default BaseFS Size. -For the `overlay2` storage driver, the size option is only available if the -backing fs is `xfs` and mounted with the `pquota` mount option. -Under these conditions, user can pass any size less then the backing fs size. - -### Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation=` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. On Microsoft Windows, you can specify these values: - - -| Value | Description | -|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the -daemon is running on Windows server, or `hyperv` if running on Windows client. | -| `process` | Namespace isolation only. | -| `hyperv` | Hyper-V hypervisor partition-based isolation. | - -Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. - -### Dealing with dynamically created devices (--device-cgroup-rule) - -Devices available to a container are assigned at creation time. The -assigned devices will both be added to the cgroup.allow file and -created into the container once it is run. This poses a problem when -a new device needs to be added to running container. - -One of the solution is to add a more permissive rule to a container -allowing it access to a wider range of devices. For example, supposing -our container needs access to a character device with major `42` and -any number of minor number (added as new devices appear), the -following rule would be added: - -``` -docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image -``` - -Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 ` -the required device when it is added. - -NOTE: initially present devices still need to be explicitely added to -the create/run command diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md deleted file mode 100644 index a86b2b4b4..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: "deploy" -description: "The deploy command description and usage" -keywords: "stack, deploy" -advisory: "experimental" ---- - - - -# deploy (experimental) - -An alias for `stack deploy`. - -```markdown -Usage: docker deploy [OPTIONS] STACK - -Deploy a new stack or update an existing stack - -Aliases: - deploy, up - -Options: - --bundle-file string Path to a Distributed Application Bundle file - --compose-file string Path to a Compose file - --help Print usage - --prune Prune services that are no longer referenced - --with-registry-auth Send registry authentication details to Swarm agents -``` - -## Description - -Create and update a stack from a `compose` or a `dab` file on the swarm. This command -has to be run targeting a manager node. - -## Examples - -### Compose file - -The `deploy` command supports compose file version `3.0` and above. - -```bash -$ docker stack deploy --compose-file docker-compose.yml vossibility - -Ignoring unsupported options: links - -Creating network vossibility_vossibility -Creating network vossibility_default -Creating service vossibility_nsqd -Creating service vossibility_logstash -Creating service vossibility_elasticsearch -Creating service vossibility_kibana -Creating service vossibility_ghollector -Creating service vossibility_lookupd -``` - -You can verify that the services were correctly created - -```bash -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe -axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba -``` - -### DAB file - -```bash -$ docker stack deploy --bundle-file vossibility-stack.dab vossibility - -Loading bundle from vossibility-stack.dab -Creating service vossibility_elasticsearch -Creating service vossibility_kibana -Creating service vossibility_logstash -Creating service vossibility_lookupd -Creating service vossibility_nsqd -Creating service vossibility_vossibility-collector -``` - -You can verify that the services were correctly created: - -```bash -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe -axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba -``` - -## Related commands - -* [stack config](stack_config.md) -* [stack deploy](stack_deploy.md) -* [stack ls](stack_ls.md) -* [stack ps](stack_ps.md) -* [stack rm](stack_rm.md) -* [stack services](stack_services.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/diff.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/diff.md deleted file mode 100644 index e6e12cef8..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/diff.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "diff" -description: "The diff command description and usage" -keywords: "list, changed, files, container" ---- - - - -# diff - -```markdown -Usage: docker diff CONTAINER - -Inspect changes to files or directories on a container's filesystem - -Options: - --help Print usage -``` - -## Description - -List the changed files and directories in a container᾿s filesystem since the -container was created. Three different types of change are tracked: - -| Symbol | Description | -|--------|---------------------------------| -| `A` | A file or directory was added | -| `D` | A file or directory was deleted | -| `C` | A file or directory was changed | - -You can use the full or shortened container ID or the container name set using -`docker run --name` option. - -## Examples - -Inspect the changes to an `nginx` container: - -```bash -$ docker diff 1fdfd1f54c1b - -C /dev -C /dev/console -C /dev/core -C /dev/stdout -C /dev/fd -C /dev/ptmx -C /dev/stderr -C /dev/stdin -C /run -A /run/nginx.pid -C /var/lib/nginx/tmp -A /var/lib/nginx/tmp/client_body -A /var/lib/nginx/tmp/fastcgi -A /var/lib/nginx/tmp/proxy -A /var/lib/nginx/tmp/scgi -A /var/lib/nginx/tmp/uwsgi -C /var/log/nginx -A /var/log/nginx/access.log -A /var/log/nginx/error.log -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/dockerd.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/dockerd.md deleted file mode 100644 index c9fceba7b..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/dockerd.md +++ /dev/null @@ -1,1371 +0,0 @@ ---- -title: "dockerd" -aliases: ["/engine/reference/commandline/daemon/"] -description: "The daemon command description and usage" -keywords: "container, daemon, runtime" ---- - - - -# daemon - -```markdown -Usage: dockerd COMMAND - -A self-sufficient runtime for containers. - -Options: - --add-runtime runtime Register an additional OCI compatible runtime (default []) - --api-cors-header string Set CORS headers in the Engine API - --authorization-plugin list Authorization plugins to load (default []) - --bip string Specify network bridge IP - -b, --bridge string Attach containers to a network bridge - --cgroup-parent string Set parent cgroup for all containers - --cluster-advertise string Address or interface name to advertise - --cluster-store string URL of the distributed storage backend - --cluster-store-opt map Set cluster store options (default map[]) - --config-file string Daemon configuration file (default "/etc/docker/daemon.json") - --containerd string Path to containerd socket - --cpu-rt-period int Limit the CPU real-time period in microseconds - --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds - --data-root string Root directory of persistent Docker state (default "/var/lib/docker") - -D, --debug Enable debug mode - --default-gateway ip Container default gateway IPv4 address - --default-gateway-v6 ip Container default gateway IPv6 address - --default-runtime string Default OCI runtime for containers (default "runc") - --default-ulimit ulimit Default ulimits for containers (default []) - --disable-legacy-registry Disable contacting legacy registries - --dns list DNS server to use (default []) - --dns-opt list DNS options to use (default []) - --dns-search list DNS search domains to use (default []) - --exec-opt list Runtime execution options (default []) - --exec-root string Root directory for execution state files (default "/var/run/docker") - --experimental Enable experimental features - --fixed-cidr string IPv4 subnet for fixed IPs - --fixed-cidr-v6 string IPv6 subnet for fixed IPs - -G, --group string Group for the unix socket (default "docker") - --help Print usage - -H, --host list Daemon socket(s) to connect to (default []) - --icc Enable inter-container communication (default true) - --init Run an init in the container to forward signals and reap processes - --init-path string Path to the docker-init binary - --insecure-registry list Enable insecure registry communication (default []) - --ip ip Default IP when binding container ports (default 0.0.0.0) - --ip-forward Enable net.ipv4.ip_forward (default true) - --ip-masq Enable IP masquerading (default true) - --iptables Enable addition of iptables rules (default true) - --ipv6 Enable IPv6 networking - --label list Set key=value labels to the daemon (default []) - --live-restore Enable live restore of docker when containers are still running - --log-driver string Default driver for container logs (default "json-file") - -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") - --log-opt map Default log driver options for containers (default map[]) - --max-concurrent-downloads int Set the max concurrent downloads for each pull (default 3) - --max-concurrent-uploads int Set the max concurrent uploads for each push (default 5) - --metrics-addr string Set default address and port to serve the metrics api on - --mtu int Set the containers network MTU - --oom-score-adjust int Set the oom_score_adj for the daemon (default -500) - -p, --pidfile string Path to use for daemon PID file (default "/var/run/docker.pid") - --raw-logs Full timestamps without ANSI coloring - --registry-mirror list Preferred Docker registry mirror (default []) - --seccomp-profile string Path to seccomp profile - --selinux-enabled Enable selinux support - --shutdown-timeout int Set the default shutdown timeout (default 15) - -s, --storage-driver string Storage driver to use - --storage-opt list Storage driver options (default []) - --swarm-default-advertise-addr string Set default address or interface for swarm advertised address - --tls Use TLS; implied by --tlsverify - --tlscacert string Trust certs signed only by this CA (default "~/.docker/ca.pem") - --tlscert string Path to TLS certificate file (default "~/.docker/cert.pem") - --tlskey string Path to TLS key file (default ~/.docker/key.pem") - --tlsverify Use TLS and verify the remote - --userland-proxy Use userland proxy for loopback traffic (default true) - --userland-proxy-path string Path to the userland proxy binary - --userns-remap string User/Group setting for user namespaces - -v, --version Print version information and quit -``` - -Options with [] may be specified multiple times. - -## Description - -`dockerd` is the persistent process that manages containers. Docker -uses different binaries for the daemon and client. To run the daemon you -type `dockerd`. - -To run the daemon with debug output, use `dockerd -D` or add `debug: true` to -the `daemon.json` file. - -> **Note**: In Docker 1.13 and higher, enable experimental features by starting -> `dockerd` with the `--experimental` flag or adding `experimental: true` to the -> `daemon.json` file. In earlier Docker versions, a different build was required -> to enable experimental features. - -## Examples - -### Daemon socket option - -The Docker daemon can listen for [Docker Engine API](../api/) -requests via three different types of Socket: `unix`, `tcp`, and `fd`. - -By default, a `unix` domain socket (or IPC socket) is created at -`/var/run/docker.sock`, requiring either `root` permission, or `docker` group -membership. - -If you need to access the Docker daemon remotely, you need to enable the `tcp` -Socket. Beware that the default setup provides un-encrypted and -un-authenticated direct access to the Docker daemon - and should be secured -either using the [built in HTTPS encrypted socket](https://docs.docker.com/engine/security/https/), or by -putting a secure web proxy in front of it. You can listen on port `2375` on all -network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network -interface using its IP address: `-H tcp://192.168.59.103:2375`. It is -conventional to use port `2375` for un-encrypted, and port `2376` for encrypted -communication with the daemon. - -> **Note**: If you're using an HTTPS encrypted socket, keep in mind that only -> TLS1.0 and greater are supported. Protocols SSLv3 and under are not -> supported anymore for security reasons. - -On Systemd based systems, you can communicate with the daemon via -[Systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), -use `dockerd -H fd://`. Using `fd://` will work perfectly for most setups but -you can also specify individual sockets: `dockerd -H fd://3`. If the -specified socket activated files aren't found, then Docker will exit. You can -find examples of using Systemd socket activation with Docker and Systemd in the -[Docker source tree](https://github.com/docker/docker/tree/master/contrib/init/systemd/). - -You can configure the Docker daemon to listen to multiple sockets at the same -time using multiple `-H` options: - -```bash -# listen using the default unix socket, and on 2 specific IP addresses on this host. - -$ sudo dockerd -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2 -``` - -The Docker client will honor the `DOCKER_HOST` environment variable to set the -`-H` flag for the client. Use **one** of the following commands: - -```bash -$ docker -H tcp://0.0.0.0:2375 ps -``` - -```bash -$ export DOCKER_HOST="tcp://0.0.0.0:2375" - -$ docker ps -``` - -Setting the `DOCKER_TLS_VERIFY` environment variable to any value other than -the empty string is equivalent to setting the `--tlsverify` flag. The following -are equivalent: - -```bash -$ docker --tlsverify ps -# or -$ export DOCKER_TLS_VERIFY=1 -$ docker ps -``` - -The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` -environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes -precedence over `HTTP_PROXY`. - -#### Bind Docker to another host/port or a Unix socket - -> **Warning**: -> Changing the default `docker` daemon binding to a -> TCP port or Unix *docker* user group will increase your security risks -> by allowing non-root users to gain *root* access on the host. Make sure -> you control access to `docker`. If you are binding -> to a TCP port, anyone with access to that port has full Docker access; -> so it is not advisable on an open network. - -With `-H` it is possible to make the Docker daemon to listen on a -specific IP and port. By default, it will listen on -`unix:///var/run/docker.sock` to allow only local connections by the -*root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP -to give access to everybody, but that is **not recommended** because -then it is trivial for someone to gain root access to the host where the -daemon is running. - -Similarly, the Docker client can use `-H` to connect to a custom port. -The Docker client will default to connecting to `unix:///var/run/docker.sock` -on Linux, and `tcp://127.0.0.1:2376` on Windows. - -`-H` accepts host and port assignment in the following format: - - tcp://[host]:[port][path] or unix://path - -For example: - -- `tcp://` -> TCP connection to `127.0.0.1` on either port `2376` when TLS encryption - is on, or port `2375` when communication is in plain text. -- `tcp://host:2375` -> TCP connection on - host:2375 -- `tcp://host:2375/path` -> TCP connection on - host:2375 and prepend path to all requests -- `unix://path/to/socket` -> Unix socket located - at `path/to/socket` - -`-H`, when empty, will default to the same value as -when no `-H` was passed in. - -`-H` also accepts short form for TCP bindings: `host:` or `host:port` or `:port` - -Run Docker in daemon mode: - -```bash -$ sudo /dockerd -H 0.0.0.0:5555 & -``` - -Download an `ubuntu` image: - -```bash -$ docker -H :5555 pull ubuntu -``` - -You can use multiple `-H`, for example, if you want to listen on both -TCP and a Unix socket - -```bash -# Run docker in daemon mode -$ sudo /dockerd -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock & -# Download an ubuntu image, use default Unix socket -$ docker pull ubuntu -# OR use the TCP port -$ docker -H tcp://127.0.0.1:2375 pull ubuntu -``` - -### Daemon storage-driver - -The Docker daemon has support for several different image layer storage -drivers: `aufs`, `devicemapper`, `btrfs`, `zfs`, `overlay` and `overlay2`. - -The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that -is unlikely to be merged into the main kernel. These are also known to cause -some serious kernel crashes. However `aufs` allows containers to share -executable and shared library memory, so is a useful choice when running -thousands of containers with the same program or libraries. - -The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) -snapshots. For each devicemapper graph location – typically -`/var/lib/docker/devicemapper` – a thin pool is created based on two block -devices, one for data and one for metadata. By default, these block devices -are created automatically by using loopback mounts of automatically created -sparse files. Refer to [Storage driver options](#storage-driver-options) below -for a way how to customize this setup. -[~jpetazzo/Resizing Docker containers with the Device Mapper plugin](http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) -article explains how to tune your existing setup without the use of options. - -The `btrfs` driver is very fast for `docker build` - but like `devicemapper` -does not share executable memory between devices. Use -`dockerd -s btrfs -g /mnt/btrfs_partition`. - -The `zfs` driver is probably not as fast as `btrfs` but has a longer track record -on stability. Thanks to `Single Copy ARC` shared blocks between clones will be -cached only once. Use `dockerd -s zfs`. To select a different zfs filesystem -set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options). - -The `overlay` is a very fast union filesystem. It is now merged in the main -Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). `overlay` -also supports page cache sharing, this means multiple containers accessing -the same file can share a single page cache entry (or entries), it makes -`overlay` as efficient with memory as `aufs` driver. Call -`dockerd -s overlay` to use it. - -> **Note**: As promising as `overlay` is, the feature is still quite young and -> should not be used in production. Most notably, using `overlay` can cause -> excessive inode consumption (especially as the number of images grows), as -> well as > being incompatible with the use of RPMs. - -The `overlay2` uses the same fast union filesystem but takes advantage of -[additional features](https://lkml.org/lkml/2015/2/11/106) added in Linux -kernel 4.0 to avoid excessive inode consumption. Call `dockerd -s overlay2` -to use it. - -> **Note**: Both `overlay` and `overlay2` are currently unsupported on `btrfs` -> or any Copy on Write filesystem and should only be used over `ext4` partitions. - -### Options per storage driver - -Particular storage-driver can be configured with options specified with -`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm`, -options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. - -#### Devicemapper options - -##### `dm.thinpooldev` - -Specifies a custom block storage device to use for the thin pool. - -If using a block device for device mapper storage, it is best to use `lvm` -to create and manage the thin-pool volume. This volume is then handed to Docker -to exclusively create snapshot volumes needed for images and containers. - -Managing the thin-pool outside of Engine makes for the most feature-rich -method of having Docker utilize device mapper thin provisioning as the -backing storage for Docker containers. The highlights of the lvm-based -thin-pool management feature include: automatic or interactive thin-pool -resize support, dynamically changing thin-pool features, automatic thinp -metadata checking when lvm activates the thin-pool, etc. - -As a fallback if no thin pool is provided, loopback files are -created. Loopback is very slow, but can be used without any -pre-configuration of storage. It is strongly recommended that you do -not use loopback in production. Ensure your Engine daemon has a -`--storage-opt dm.thinpooldev` argument provided. - -###### Example: - -```bash -$ sudo dockerd --storage-opt dm.thinpooldev=/dev/mapper/thin-pool -``` - -##### `dm.basesize` - -Specifies the size to use when creating the base device, which limits the -size of images and containers. The default value is 10G. Note, thin devices -are inherently "sparse", so a 10G device which is mostly empty doesn't use -10 GB of space on the pool. However, the filesystem will use more space for -the empty case the larger the device is. - -The base device size can be increased at daemon restart which will allow -all future images and containers (based on those new images) to be of the -new base device size. - -###### Examples - -```bash -$ sudo dockerd --storage-opt dm.basesize=50G -``` - -This will increase the base device size to 50G. The Docker daemon will throw an -error if existing base device size is larger than 50G. A user can use -this option to expand the base device size however shrinking is not permitted. - -This value affects the system-wide "base" empty filesystem -that may already be initialized and inherited by pulled images. Typically, -a change to this value requires additional steps to take effect: - - ```bash -$ sudo service docker stop - -$ sudo rm -rf /var/lib/docker - -$ sudo service docker start -``` - - -##### `dm.loopdatasize` - -> **Note**: This option configures devicemapper loopback, which should not -> be used in production. - -Specifies the size to use when creating the loopback file for the -"data" device which is used for the thin pool. The default size is -100G. The file is sparse, so it will not initially take up this -much space. - -###### Example - -```bash -$ sudo dockerd --storage-opt dm.loopdatasize=200G -``` - -##### `dm.loopmetadatasize` - -> **Note**: This option configures devicemapper loopback, which should not -> be used in production. - -Specifies the size to use when creating the loopback file for the -"metadata" device which is used for the thin pool. The default size -is 2G. The file is sparse, so it will not initially take up -this much space. - -###### Example - -```bash -$ sudo dockerd --storage-opt dm.loopmetadatasize=4G -``` - -##### `dm.fs` - -Specifies the filesystem type to use for the base device. The supported -options are "ext4" and "xfs". The default is "xfs" - -###### Example - -```bash -$ sudo dockerd --storage-opt dm.fs=ext4 -``` - -##### `dm.mkfsarg` - -Specifies extra mkfs arguments to be used when creating the base device. - -###### Example - -```bash -$ sudo dockerd --storage-opt "dm.mkfsarg=-O ^has_journal" -``` - -##### `dm.mountopt` - -Specifies extra mount options used when mounting the thin devices. - -###### Example - -```bash -$ sudo dockerd --storage-opt dm.mountopt=nodiscard -``` - -##### `dm.datadev` - -(Deprecated, use `dm.thinpooldev`) - -Specifies a custom blockdevice to use for data for the thin pool. - -If using a block device for device mapper storage, ideally both `datadev` and -`metadatadev` should be specified to completely avoid using the loopback -device. - -###### Example - -```bash -$ sudo dockerd \ - --storage-opt dm.datadev=/dev/sdb1 \ - --storage-opt dm.metadatadev=/dev/sdc1 -``` - -##### `dm.metadatadev` - -(Deprecated, use `dm.thinpooldev`) - -Specifies a custom blockdevice to use for metadata for the thin pool. - -For best performance the metadata should be on a different spindle than the -data, or even better on an SSD. - -If setting up a new metadata pool it is required to be valid. This can be -achieved by zeroing the first 4k to indicate empty metadata, like this: - -```bash -$ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 -``` - -###### Example - -```bash -$ sudo dockerd \ - --storage-opt dm.datadev=/dev/sdb1 \ - --storage-opt dm.metadatadev=/dev/sdc1 -``` - -##### `dm.blocksize` - -Specifies a custom blocksize to use for the thin pool. The default -blocksize is 64K. - -###### Example - -```bash -$ sudo dockerd --storage-opt dm.blocksize=512K -``` - -##### `dm.blkdiscard` - -Enables or disables the use of `blkdiscard` when removing devicemapper -devices. This is enabled by default (only) if using loopback devices and is -required to resparsify the loopback file on image/container removal. - -Disabling this on loopback can lead to *much* faster container removal -times, but will make the space used in `/var/lib/docker` directory not be -returned to the system for other use when containers are removed. - -###### Examples - -```bash -$ sudo dockerd --storage-opt dm.blkdiscard=false -``` - -##### `dm.override_udev_sync_check` - -Overrides the `udev` synchronization checks between `devicemapper` and `udev`. -`udev` is the device manager for the Linux kernel. - -To view the `udev` sync support of a Docker daemon that is using the -`devicemapper` driver, run: - -```bash -$ docker info -[...] -Udev Sync Supported: true -[...] -``` - -When `udev` sync support is `true`, then `devicemapper` and udev can -coordinate the activation and deactivation of devices for containers. - -When `udev` sync support is `false`, a race condition occurs between -the`devicemapper` and `udev` during create and cleanup. The race condition -results in errors and failures. (For information on these failures, see -[docker#4036](https://github.com/docker/docker/issues/4036)) - -To allow the `docker` daemon to start, regardless of `udev` sync not being -supported, set `dm.override_udev_sync_check` to true: - -```bash -$ sudo dockerd --storage-opt dm.override_udev_sync_check=true -``` - -When this value is `true`, the `devicemapper` continues and simply warns -you the errors are happening. - -> **Note**: The ideal is to pursue a `docker` daemon and environment that does -> support synchronizing with `udev`. For further discussion on this -> topic, see [docker#4036](https://github.com/docker/docker/issues/4036). -> Otherwise, set this flag for migrating existing Docker daemons to -> a daemon with a supported environment. - -##### `dm.use_deferred_removal` - -Enables use of deferred device removal if `libdm` and the kernel driver -support the mechanism. - -Deferred device removal means that if device is busy when devices are -being removed/deactivated, then a deferred removal is scheduled on -device. And devices automatically go away when last user of the device -exits. - -For example, when a container exits, its associated thin device is removed. -If that device has leaked into some other mount namespace and can't be -removed, the container exit still succeeds and this option causes the -system to schedule the device for deferred removal. It does not wait in a -loop trying to remove a busy device. - -###### Example - -```bash -$ sudo dockerd --storage-opt dm.use_deferred_removal=true -``` - -##### `dm.use_deferred_deletion` - -Enables use of deferred device deletion for thin pool devices. By default, -thin pool device deletion is synchronous. Before a container is deleted, -the Docker daemon removes any associated devices. If the storage driver -can not remove a device, the container deletion fails and daemon returns. - -```none -Error deleting container: Error response from daemon: Cannot destroy container -``` - -To avoid this failure, enable both deferred device deletion and deferred -device removal on the daemon. - -```bash -$ sudo dockerd \ - --storage-opt dm.use_deferred_deletion=true \ - --storage-opt dm.use_deferred_removal=true -``` - -With these two options enabled, if a device is busy when the driver is -deleting a container, the driver marks the device as deleted. Later, when -the device isn't in use, the driver deletes it. - -In general it should be safe to enable this option by default. It will help -when unintentional leaking of mount point happens across multiple mount -namespaces. - -##### `dm.min_free_space` - -Specifies the min free space percent in a thin pool require for new device -creation to succeed. This check applies to both free data space as well -as free metadata space. Valid values are from 0% - 99%. Value 0% disables -free space checking logic. If user does not specify a value for this option, -the Engine uses a default value of 10%. - -Whenever a new a thin pool device is created (during `docker pull` or during -container creation), the Engine checks if the minimum free space is -available. If sufficient space is unavailable, then device creation fails -and any relevant `docker` operation fails. - -To recover from this error, you must create more free space in the thin pool -to recover from the error. You can create free space by deleting some images -and containers from the thin pool. You can also add more storage to the thin -pool. - -To add more space to a LVM (logical volume management) thin pool, just add -more storage to the volume group container thin pool; this should automatically -resolve any errors. If your configuration uses loop devices, then stop the -Engine daemon, grow the size of loop files and restart the daemon to resolve -the issue. - -###### Example - -```bash -$ sudo dockerd --storage-opt dm.min_free_space=10% -``` - -##### `dm.xfs_nospace_max_retries` - -Specifies the maximum number of retries XFS should attempt to complete -IO when ENOSPC (no space) error is returned by underlying storage device. - -By default XFS retries infinitely for IO to finish and this can result -in unkillable process. To change this behavior one can set -xfs_nospace_max_retries to say 0 and XFS will not retry IO after getting -ENOSPC and will shutdown filesystem. - -###### Example - -```bash -$ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 -``` - -#### ZFS options - -##### `zfs.fsname` - -Set zfs filesystem under which docker will create its own datasets. -By default docker will pick up the zfs filesystem where docker graph -(`/var/lib/docker`) is located. - -###### Example - -```bash -$ sudo dockerd -s zfs --storage-opt zfs.fsname=zroot/docker -``` - -#### Btrfs options - -##### `btrfs.min_space` - -Specifies the minimum size to use when creating the subvolume which is used -for containers. If user uses disk quota for btrfs when creating or running -a container with **--storage-opt size** option, docker should ensure the -**size** cannot be smaller than **btrfs.min_space**. - -###### Example - -```bash -$ sudo dockerd -s btrfs --storage-opt btrfs.min_space=10G -``` - -#### Overlay2 options - -##### `overlay2.override_kernel_check` - -Overrides the Linux kernel version check allowing overlay2. Support for -specifying multiple lower directories needed by overlay2 was added to the -Linux kernel in 4.0.0. However, some older kernel versions may be patched -to add multiple lower directory support for OverlayFS. This option should -only be used after verifying this support exists in the kernel. Applying -this option on a kernel without this support will cause failures on mount. - -### Docker runtime execution options - -The Docker daemon relies on a -[OCI](https://github.com/opencontainers/runtime-spec) compliant runtime -(invoked via the `containerd` daemon) as its interface to the Linux -kernel `namespaces`, `cgroups`, and `SELinux`. - -By default, the Docker daemon automatically starts `containerd`. If you want to -control `containerd` startup, manually start `containerd` and pass the path to -the `containerd` socket using the `--containerd` flag. For example: - -```bash -$ sudo dockerd --containerd /var/run/dev/docker-containerd.sock -``` - -Runtimes can be registered with the daemon either via the -configuration file or using the `--add-runtime` command line argument. - -The following is an example adding 2 runtimes via the configuration: - -```json -{ - "default-runtime": "runc", - "runtimes": { - "runc": { - "path": "runc" - }, - "custom": { - "path": "/usr/local/bin/my-runc-replacement", - "runtimeArgs": [ - "--debug" - ] - } - } -} -``` - -This is the same example via the command line: - -```bash -$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement -``` - -> **Note**: Defining runtime arguments via the command line is not supported. - -#### Options for the runtime - -You can configure the runtime using options specified -with the `--exec-opt` flag. All the flag's options have the `native` prefix. A -single `native.cgroupdriver` option is available. - -The `native.cgroupdriver` option specifies the management of the container's -cgroups. You can only specify `cgroupfs` or `systemd`. If you specify -`systemd` and it is not available, the system errors out. If you omit the -`native.cgroupdriver` option,` cgroupfs` is used. - -This example sets the `cgroupdriver` to `systemd`: - -```bash -$ sudo dockerd --exec-opt native.cgroupdriver=systemd -``` - -Setting this option applies to all containers the daemon launches. - -Also Windows Container makes use of `--exec-opt` for special purpose. Docker user -can specify default container isolation technology with this, for example: - -```console -> dockerd --exec-opt isolation=hyperv -``` - -Will make `hyperv` the default isolation technology on Windows. If no isolation -value is specified on daemon start, on Windows client, the default is -`hyperv`, and on Windows server, the default is `process`. - -#### Daemon DNS options - -To set the DNS server for all Docker containers, use: - -```bash -$ sudo dockerd --dns 8.8.8.8 -``` - -To set the DNS search domain for all Docker containers, use: - -```bash -$ sudo dockerd --dns-search example.com -``` - -#### Insecure registries - -Docker considers a private registry either secure or insecure. In the rest of -this section, *registry* is used for *private registry*, and `myregistry:5000` -is a placeholder example for a private registry. - -A secure registry uses TLS and a copy of its CA certificate is placed on the -Docker host at `/etc/docker/certs.d/myregistry:5000/ca.crt`. An insecure -registry is either not using TLS (i.e., listening on plain text HTTP), or is -using TLS with a CA certificate not known by the Docker daemon. The latter can -happen when the certificate was not found under -`/etc/docker/certs.d/myregistry:5000/`, or if the certificate verification -failed (i.e., wrong CA). - -By default, Docker assumes all, but local (see local registries below), -registries are secure. Communicating with an insecure registry is not possible -if Docker assumes that registry is secure. In order to communicate with an -insecure registry, the Docker daemon requires `--insecure-registry` in one of -the following two forms: - -* `--insecure-registry myregistry:5000` tells the Docker daemon that - myregistry:5000 should be considered insecure. -* `--insecure-registry 10.1.0.0/16` tells the Docker daemon that all registries - whose domain resolve to an IP address is part of the subnet described by the - CIDR syntax, should be considered insecure. - -The flag can be used multiple times to allow multiple registries to be marked -as insecure. - -If an insecure registry is not marked as insecure, `docker pull`, -`docker push`, and `docker search` will result in an error message prompting -the user to either secure or pass the `--insecure-registry` flag to the Docker -daemon as described above. - -Local registries, whose IP address falls in the 127.0.0.0/8 range, are -automatically marked as insecure as of Docker 1.3.2. It is not recommended to -rely on this, as it may change in the future. - -Enabling `--insecure-registry`, i.e., allowing un-encrypted and/or untrusted -communication, can be useful when running a local registry. However, -because its use creates security vulnerabilities it should ONLY be enabled for -testing purposes. For increased security, users should add their CA to their -system's list of trusted CAs instead of enabling `--insecure-registry`. - -##### Legacy Registries - -Enabling `--disable-legacy-registry` forces a docker daemon to only interact with registries which support the V2 protocol. Specifically, the daemon will not attempt `push`, `pull` and `login` to v1 registries. The exception to this is `search` which can still be performed on v1 registries. - -#### Running a Docker daemon behind an HTTPS_PROXY - -When running inside a LAN that uses an `HTTPS` proxy, the Docker Hub -certificates will be replaced by the proxy's certificates. These certificates -need to be added to your Docker host's configuration: - -1. Install the `ca-certificates` package for your distribution -2. Ask your network admin for the proxy's CA certificate and append them to - `/etc/pki/tls/certs/ca-bundle.crt` -3. Then start your Docker daemon with `HTTPS_PROXY=http://username:password@proxy:port/ dockerd`. - The `username:` and `password@` are optional - and are only needed if your - proxy is set up to require authentication. - -This will only add the proxy and authentication to the Docker daemon's requests - -your `docker build`s and running containers will need extra configuration to -use the proxy - -#### Default `ulimit` settings - -`--default-ulimit` allows you to set the default `ulimit` options to use for -all containers. It takes the same options as `--ulimit` for `docker run`. If -these defaults are not set, `ulimit` settings will be inherited, if not set on -`docker run`, from the Docker daemon. Any `--ulimit` options passed to -`docker run` will overwrite these defaults. - -Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to -set the maximum number of processes available to a user, not to a container. For details -please check the [run](run.md) reference. - -#### Node discovery - -The `--cluster-advertise` option specifies the `host:port` or `interface:port` -combination that this particular daemon instance should use when advertising -itself to the cluster. The daemon is reached by remote hosts through this value. -If you specify an interface, make sure it includes the IP address of the actual -Docker host. For Engine installation created through `docker-machine`, the -interface is typically `eth1`. - -The daemon uses [libkv](https://github.com/docker/libkv/) to advertise -the node within the cluster. Some key-value backends support mutual -TLS. To configure the client TLS settings used by the daemon can be configured -using the `--cluster-store-opt` flag, specifying the paths to PEM encoded -files. For example: - -```bash -$ sudo dockerd \ - --cluster-advertise 192.168.1.2:2376 \ - --cluster-store etcd://192.168.1.2:2379 \ - --cluster-store-opt kv.cacertfile=/path/to/ca.pem \ - --cluster-store-opt kv.certfile=/path/to/cert.pem \ - --cluster-store-opt kv.keyfile=/path/to/key.pem -``` - -The currently supported cluster store options are: - -| Option | Description | -|-----------------------|-------------| -| `discovery.heartbeat` | Specifies the heartbeat timer in seconds which is used by the daemon as a `keepalive` mechanism to make sure discovery module treats the node as alive in the cluster. If not configured, the default value is 20 seconds. | -| `discovery.ttl` | Specifies the TTL (time-to-live) in seconds which is used by the discovery module to timeout a node if a valid heartbeat is not received within the configured ttl value. If not configured, the default value is 60 seconds. | -| `kv.cacertfile` | Specifies the path to a local file with PEM encoded CA certificates to trust. | -| `kv.certfile` | Specifies the path to a local file with a PEM encoded certificate. This certificate is used as the client cert for communication with the Key/Value store. | -| `kv.keyfile` | Specifies the path to a local file with a PEM encoded private key. This private key is used as the client key for communication with the Key/Value store. | -| `kv.path` | Specifies the path in the Key/Value store. If not configured, the default value is 'docker/nodes'. | - -#### Access authorization - -Docker's access authorization can be extended by authorization plugins that your -organization can purchase or build themselves. You can install one or more -authorization plugins when you start the Docker `daemon` using the -`--authorization-plugin=PLUGIN_ID` option. - -```bash -$ sudo dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... -``` - -The `PLUGIN_ID` value is either the plugin's name or a path to its specification -file. The plugin's implementation determines whether you can specify a name or -path. Consult with your Docker administrator to get information about the -plugins available to you. - -Once a plugin is installed, requests made to the `daemon` through the command -line or Docker's Engine API are allowed or denied by the plugin. If you have -multiple plugins installed, at least one must allow the request for it to -complete. - -For information about how to create an authorization plugin, see [authorization -plugin](../../extend/plugins_authorization.md) section in the Docker extend section of this documentation. - - -#### Daemon user namespace options - -The Linux kernel [user namespace support](http://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling -a process, and therefore a container, to have a unique range of user and -group IDs which are outside the traditional user and group range utilized by -the host system. Potentially the most important security improvement is that, -by default, container processes running as the `root` user will have expected -administrative privilege (with some restrictions) inside the container but will -effectively be mapped to an unprivileged `uid` on the host. - -When user namespace support is enabled, Docker creates a single daemon-wide mapping -for all containers running on the same engine instance. The mappings will -utilize the existing subordinate user and group ID feature available on all modern -Linux distributions. -The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and -[`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be -read for the user, and optional group, specified to the `--userns-remap` -parameter. If you do not wish to specify your own user and/or group, you can -provide `default` as the value to this flag, and a user will be created on your behalf -and provided subordinate uid and gid ranges. This default user will be named -`dockremap`, and entries will be created for it in `/etc/passwd` and -`/etc/group` using your distro's standard user and group creation tools. - -> **Note**: The single mapping per-daemon restriction is in place for now -> because Docker shares image layers from its local cache across all -> containers running on the engine instance. Since file ownership must be -> the same for all containers sharing the same layer content, the decision -> was made to map the file ownership on `docker pull` to the daemon's user and -> group mappings so that there is no delay for running containers once the -> content is downloaded. This design preserves the same performance for `docker -> pull`, `docker push`, and container startup as users expect with -> user namespaces disabled. - -##### Start the daemon with user namespaces enabled - -To enable user namespace support, start the daemon with the -`--userns-remap` flag, which accepts values in the following formats: - - - uid - - uid:gid - - username - - username:groupname - -If numeric IDs are provided, translation back to valid user or group names -will occur so that the subordinate uid and gid information can be read, given -these resources are name-based, not id-based. If the numeric ID information -provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon -startup will fail with an error message. - -**Example: starting with default Docker user management:** - -```bash -$ sudo dockerd --userns-remap=default -``` - -When `default` is provided, Docker will create - or find the existing - user and group -named `dockremap`. If the user is created, and the Linux distribution has -appropriate support, the `/etc/subuid` and `/etc/subgid` files will be populated -with a contiguous 65536 length range of subordinate user and group IDs, starting -at an offset based on prior entries in those files. For example, Ubuntu will -create the following range, based on an existing user named `user1` already owning -the first 65536 range: - -```bash -$ cat /etc/subuid -user1:100000:65536 -dockremap:165536:65536 -``` - -If you have a preferred/self-managed user with subordinate ID mappings already -configured, you can provide that username or uid to the `--userns-remap` flag. -If you have a group that doesn't match the username, you may provide the `gid` -or group name as well; otherwise the username will be used as the group name -when querying the system for the subordinate group ID range. - -The output of `docker info` can be used to determine if the daemon is running -with user namespaces enabled or not. If the daemon is configured with user -namespaces, the Security Options entry in the response will list "userns" as -one of the enabled security features. - -##### Behavior differences when user namespaces are enabled - -When you start the Docker daemon with `--userns-remap`, Docker segregates the graph directory -where the images are stored by adding an extra directory with a name corresponding to the -remapped UID and GID. For example, if the remapped UID and GID begin with `165536`, all -images and containers running with that remap setting are located in `/var/lib/docker/165536.165536` -instead of `/var/lib/docker/`. - -In addition, the files and directories within the new directory, which correspond to -images and container layers, are also owned by the new UID and GID. To set the ownership -correctly, you need to re-pull the images and restart the containers after starting the -daemon with `--userns-remap`. - -##### Detailed information on `subuid`/`subgid` ranges - -Given potential advanced use of the subordinate ID ranges by power users, the -following paragraphs define how the Docker daemon currently uses the range entries -found within the subordinate range files. - -The simplest case is that only one contiguous range is defined for the -provided user or group. In this case, Docker will use that entire contiguous -range for the mapping of host uids and gids to the container process. This -means that the first ID in the range will be the remapped root user, and the -IDs above that initial ID will map host ID 1 through the end of the range. - -From the example `/etc/subuid` content shown above, the remapped root -user would be uid 165536. - -If the system administrator has set up multiple ranges for a single user or -group, the Docker daemon will read all the available ranges and use the -following algorithm to create the mapping ranges: - -1. The range segments found for the particular user will be sorted by *start ID* ascending. -2. Map segments will be created from each range in increasing value with a length matching the length of each segment. Therefore the range segment with the lowest numeric starting value will be equal to the remapped root, and continue up through host uid/gid equal to the range segment length. As an example, if the lowest segment starts at ID 1000 and has a length of 100, then a map of 1000 -> 0 (the remapped root) up through 1100 -> 100 will be created from this segment. If the next segment starts at ID 10000, then the next map will start with mapping 10000 -> 101 up to the length of this second segment. This will continue until no more segments are found in the subordinate files for this user. -3. If more than five range segments exist for a single user, only the first five will be utilized, matching the kernel's limitation of only five entries in `/proc/self/uid_map` and `proc/self/gid_map`. - -##### Disable user namespace for a container - -If you enable user namespaces on the daemon, all containers are started -with user namespaces enabled. In some situations you might want to disable -this feature for a container, for example, to start a privileged container (see -[user namespace known restrictions](#user-namespace-known-restrictions)). -To enable those advanced features for a specific container use `--userns=host` -in the `run/exec/create` command. -This option will completely disable user namespace mapping for the container's user. - -##### User namespace known restrictions - -The following standard Docker features are currently incompatible when -running a Docker daemon with user namespaces enabled: - - - sharing PID or NET namespaces with the host (`--pid=host` or `--net=host`) - - Using `--privileged` mode flag on `docker run` (unless also specifying `--userns=host`) - -In general, user namespaces are an advanced feature and will require -coordination with other capabilities. For example, if volumes are mounted from -the host, file ownership will have to be pre-arranged if the user or -administrator wishes the containers to have expected access to the volume -contents. Note that when using external volume or graph driver plugins, those -external software programs must be made aware of user and group mapping ranges -if they are to work seamlessly with user namespace support. - -Finally, while the `root` user inside a user namespaced container process has -many of the expected admin privileges that go along with being the superuser, the -Linux kernel has restrictions based on internal knowledge that this is a user namespaced -process. The most notable restriction that we are aware of at this time is the -inability to use `mknod`. Permission will be denied for device creation even as -container `root` inside a user namespace. - -### Miscellaneous options - -IP masquerading uses address translation to allow containers without a public -IP to talk to other machines on the Internet. This may interfere with some -network topologies and can be disabled with `--ip-masq=false`. - -Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and -for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be -set like this: - - DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 - # or - export DOCKER_TMPDIR=/mnt/disk2/tmp - /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 - -#### Default cgroup parent - -The `--cgroup-parent` option allows you to set the default cgroup parent -to use for containers. If this option is not set, it defaults to `/docker` for -fs cgroup driver and `system.slice` for systemd cgroup driver. - -If the cgroup has a leading forward slash (`/`), the cgroup is created -under the root cgroup, otherwise the cgroup is created under the daemon -cgroup. - -Assuming the daemon is running in cgroup `daemoncgroup`, -`--cgroup-parent=/foobar` creates a cgroup in -`/sys/fs/cgroup/memory/foobar`, whereas using `--cgroup-parent=foobar` -creates the cgroup in `/sys/fs/cgroup/memory/daemoncgroup/foobar` - -The systemd cgroup driver has different rules for `--cgroup-parent`. Systemd -represents hierarchy by slice and the name of the slice encodes the location in -the tree. So `--cgroup-parent` for systemd cgroups should be a slice name. A -name can consist of a dash-separated series of names, which describes the path -to the slice from the root slice. For example, `--cgroup-parent=user-a-b.slice` -means the memory cgroup for the container is created in -`/sys/fs/cgroup/memory/user.slice/user-a.slice/user-a-b.slice/docker-.scope`. - -This setting can also be set per container, using the `--cgroup-parent` -option on `docker create` and `docker run`, and takes precedence over -the `--cgroup-parent` option on the daemon. - -#### Daemon metrics - -The `--metrics-addr` option takes a tcp address to serve the metrics API. -This feature is still experimental, therefore, the daemon must be running in experimental -mode for this feature to work. - -To serve the metrics API on localhost:1337 you would specify `--metrics-addr 127.0.0.1:1337` -allowing you to make requests on the API at `127.0.0.1:1337/metrics` to receive metrics in the -[prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/) format. - -If you are running a prometheus server you can add this address to your scrape configs -to have prometheus collect metrics on Docker. For more information -on prometheus you can view the website [here](https://prometheus.io/). - -```none -scrape_configs: - - job_name: 'docker' - static_configs: - - targets: ['127.0.0.1:1337'] -``` - -Please note that this feature is still marked as experimental as metrics and metric -names could change while this feature is still in experimental. Please provide -feedback on what you would like to see collected in the API. - -#### Daemon configuration file - -The `--config-file` option allows you to set any configuration option -for the daemon in a JSON format. This file uses the same flag names as keys, -except for flags that allow several entries, where it uses the plural -of the flag name, e.g., `labels` for the `label` flag. - -The options set in the configuration file must not conflict with options set -via flags. The docker daemon fails to start if an option is duplicated between -the file and the flags, regardless their value. We do this to avoid -silently ignore changes introduced in configuration reloads. -For example, the daemon fails to start if you set daemon labels -in the configuration file and also set daemon labels via the `--label` flag. -Options that are not present in the file are ignored when the daemon starts. - -##### On Linux - -The default location of the configuration file on Linux is -`/etc/docker/daemon.json`. The `--config-file` flag can be used to specify a - non-default location. - -This is a full example of the allowed configuration options on Linux: - -```json -{ - "authorization-plugins": [], - "data-root": "", - "dns": [], - "dns-opts": [], - "dns-search": [], - "exec-opts": [], - "exec-root": "", - "experimental": false, - "storage-driver": "", - "storage-opts": [], - "labels": [], - "live-restore": true, - "log-driver": "", - "log-opts": {}, - "mtu": 0, - "pidfile": "", - "cluster-store": "", - "cluster-store-opts": {}, - "cluster-advertise": "", - "max-concurrent-downloads": 3, - "max-concurrent-uploads": 5, - "default-shm-size": "64M", - "shutdown-timeout": 15, - "debug": true, - "hosts": [], - "log-level": "", - "tls": true, - "tlsverify": true, - "tlscacert": "", - "tlscert": "", - "tlskey": "", - "swarm-default-advertise-addr": "", - "api-cors-header": "", - "selinux-enabled": false, - "userns-remap": "", - "group": "", - "cgroup-parent": "", - "default-ulimits": {}, - "init": false, - "init-path": "/usr/libexec/docker-init", - "ipv6": false, - "iptables": false, - "ip-forward": false, - "ip-masq": false, - "userland-proxy": false, - "userland-proxy-path": "/usr/libexec/docker-proxy", - "ip": "0.0.0.0", - "bridge": "", - "bip": "", - "fixed-cidr": "", - "fixed-cidr-v6": "", - "default-gateway": "", - "default-gateway-v6": "", - "icc": false, - "raw-logs": false, - "registry-mirrors": [], - "seccomp-profile": "", - "insecure-registries": [], - "disable-legacy-registry": false, - "no-new-privileges": false, - "default-runtime": "runc", - "oom-score-adjust": -500, - "runtimes": { - "runc": { - "path": "runc" - }, - "custom": { - "path": "/usr/local/bin/my-runc-replacement", - "runtimeArgs": [ - "--debug" - ] - } - } -} -``` - -> **Note:** You cannot set options in `daemon.json` that have already been set on -> daemon startup as a flag. -> On systems that use `systemd` to start the Docker daemon, `-H` is already set, so -> you cannot use the `hosts` key in `daemon.json` to add listening addresses. -> See https://docs.docker.com/engine/admin/systemd/#custom-docker-daemon-options for how -> to accomplish this task with a systemd drop-in file. - -##### On Windows - -The default location of the configuration file on Windows is - `%programdata%\docker\config\daemon.json`. The `--config-file` flag can be - used to specify a non-default location. - -This is a full example of the allowed configuration options on Windows: - -```json -{ - "authorization-plugins": [], - "data-root": "", - "dns": [], - "dns-opts": [], - "dns-search": [], - "exec-opts": [], - "experimental": false, - "storage-driver": "", - "storage-opts": [], - "labels": [], - "log-driver": "", - "mtu": 0, - "pidfile": "", - "cluster-store": "", - "cluster-advertise": "", - "max-concurrent-downloads": 3, - "max-concurrent-uploads": 5, - "shutdown-timeout": 15, - "debug": true, - "hosts": [], - "log-level": "", - "tlsverify": true, - "tlscacert": "", - "tlscert": "", - "tlskey": "", - "swarm-default-advertise-addr": "", - "group": "", - "default-ulimits": {}, - "bridge": "", - "fixed-cidr": "", - "raw-logs": false, - "registry-mirrors": [], - "insecure-registries": [], - "disable-legacy-registry": false -} -``` - -#### Configuration reload behavior - -Some options can be reconfigured when the daemon is running without requiring -to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event -in Windows with the key `Global\docker-daemon-config-$PID`. The options can -be modified in the configuration file but still will check for conflicts with -the provided flags. The daemon fails to reconfigure itself -if there are conflicts, but it won't stop execution. - -The list of currently supported options that can be reconfigured is this: - -- `debug`: it changes the daemon to debug mode when set to true. -- `cluster-store`: it reloads the discovery store with the new address. -- `cluster-store-opts`: it uses the new options to reload the discovery store. -- `cluster-advertise`: it modifies the address advertised after reloading. -- `labels`: it replaces the daemon labels with a new set of labels. -- `live-restore`: Enables [keeping containers alive during daemon downtime](https://docs.docker.com/engine/admin/live-restore/). -- `max-concurrent-downloads`: it updates the max concurrent downloads for each pull. -- `max-concurrent-uploads`: it updates the max concurrent uploads for each push. -- `default-runtime`: it updates the runtime to be used if not is - specified at container creation. It defaults to "default" which is - the runtime shipped with the official docker packages. -- `runtimes`: it updates the list of available OCI runtimes that can - be used to run containers -- `authorization-plugin`: specifies the authorization plugins to use. -- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure resgitries, these existing ones will be removed from daemon's config. -- `registry-mirrors`: it replaces the daemon registry mirrors with a new set of registry mirrors. If some existing registry mirrors in daemon's configuration are not in newly reloaded registry mirrors, these existing ones will be removed from daemon's config. - -Updating and reloading the cluster configurations such as `--cluster-store`, -`--cluster-advertise` and `--cluster-store-opts` will take effect only if -these configurations were not previously configured. If `--cluster-store` -has been provided in flags and `cluster-advertise` not, `cluster-advertise` -can be added in the configuration file without accompanied by `--cluster-store`. -Configuration reload will log a warning message if it detects a change in -previously configured cluster configurations. - - -### Run multiple daemons - -> **Note:** Running multiple daemons on a single host is considered as "experimental". The user should be aware of -> unsolved problems. This solution may not work properly in some cases. Solutions are currently under development -> and will be delivered in the near future. - -This section describes how to run multiple Docker daemons on a single host. To -run multiple daemons, you must configure each daemon so that it does not -conflict with other daemons on the same host. You can set these options either -by providing them as flags, or by using a [daemon configuration file](#daemon-configuration-file). - -The following daemon options must be configured for each daemon: - -```none --b, --bridge= Attach containers to a network bridge ---exec-root=/var/run/docker Root of the Docker execdriver ---data-root=/var/lib/docker Root of persisted Docker data --p, --pidfile=/var/run/docker.pid Path to use for daemon PID file --H, --host=[] Daemon socket(s) to connect to ---iptables=true Enable addition of iptables rules ---config-file=/etc/docker/daemon.json Daemon configuration file ---tlscacert="~/.docker/ca.pem" Trust certs signed only by this CA ---tlscert="~/.docker/cert.pem" Path to TLS certificate file ---tlskey="~/.docker/key.pem" Path to TLS key file -``` - -When your daemons use different values for these flags, you can run them on the same host without any problems. -It is very important to properly understand the meaning of those options and to use them correctly. - -- The `-b, --bridge=` flag is set to `docker0` as default bridge network. It is created automatically when you install Docker. -If you are not using the default, you must create and configure the bridge manually or just set it to 'none': `--bridge=none` -- `--exec-root` is the path where the container state is stored. The default value is `/var/run/docker`. Specify the path for -your running daemon here. -- `--data-root` is the path where persisted data such as images, volumes, and -cluster state are stored. The default value is `/var/lib/docker`. To avoid any -conflict with other daemons, set this parameter separately for each daemon. -- `-p, --pidfile=/var/run/docker.pid` is the path where the process ID of the daemon is stored. Specify the path for your -pid file here. -- `--host=[]` specifies where the Docker daemon will listen for client connections. If unspecified, it defaults to `/var/run/docker.sock`. -- `--iptables=false` prevents the Docker daemon from adding iptables rules. If -multiple daemons manage iptables rules, they may overwrite rules set by another -daemon. Be aware that disabling this option requires you to manually add -iptables rules to expose container ports. If you prevent Docker from adding -iptables rules, Docker will also not add IP masquerading rules, even if you set -`--ip-masq` to `true`. Without IP masquerading rules, Docker containers will not be -able to connect to external hosts or the internet when using network other than -default bridge. -- `--config-file=/etc/docker/daemon.json` is the path where configuration file is stored. You can use it instead of -daemon flags. Specify the path for each daemon. -- `--tls*` Docker daemon supports `--tlsverify` mode that enforces encrypted and authenticated remote connections. -The `--tls*` options enable use of specific certificates for individual daemons. - -Example script for a separate “bootstrap” instance of the Docker daemon without network: - -```bash -$ sudo dockerd \ - -H unix:///var/run/docker-bootstrap.sock \ - -p /var/run/docker-bootstrap.pid \ - --iptables=false \ - --ip-masq=false \ - --bridge=none \ - --data-root=/var/lib/docker-bootstrap \ - --exec-root=/var/run/docker-bootstrap -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/events.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/events.md deleted file mode 100644 index 9bf051363..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/events.md +++ /dev/null @@ -1,349 +0,0 @@ ---- -title: "events" -description: "The events command description and usage" -keywords: "events, container, report" ---- - - - -# events - -```markdown -Usage: docker events [OPTIONS] - -Get real time events from the server - -Options: - -f, --filter value Filter output based on conditions provided (default []) - --format string Format the output using the given Go template - --help Print usage - --since string Show all events created since timestamp - --until string Stream events until this timestamp -``` - -## Description - -Use `docker events` to get real-time events from the server. These events differ -per Docker object type. - -### Object types - -#### Containers - -Docker containers report the following events: - -- `attach` -- `commit` -- `copy` -- `create` -- `destroy` -- `detach` -- `die` -- `exec_create` -- `exec_detach` -- `exec_start` -- `export` -- `health_status` -- `kill` -- `oom` -- `pause` -- `rename` -- `resize` -- `restart` -- `start` -- `stop` -- `top` -- `unpause` -- `update` - -#### Images - -Docker images report the following events: - -- `delete` -- `import` -- `load` -- `pull` -- `push` -- `save` -- `tag` -- `untag` - -#### Plugins - -Docker plugins report the following events: - -- `install` -- `enable` -- `disable` -- `remove` - -#### Volumes - -Docker volumes report the following events: - -- `create` -- `mount` -- `unmount` -- `destroy` - -#### Networks - -Docker networks report the following events: - -- `create` -- `connect` -- `disconnect` -- `destroy` - -#### Daemons - -Docker daemons report the following events: - -- `reload` - -### Limiting, filtering, and formatting the output - -#### Limit events by time - -The `--since` and `--until` parameters can be Unix timestamps, date formatted -timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed -relative to the client machine’s time. If you do not provide the `--since` option, -the command returns only new and/or live events. Supported formats for date -formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the client will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. - -#### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If you would -like to use multiple filters, pass multiple flags (e.g., -`--filter "foo=bar" --filter "bif=baz"`) - -Using the same filter multiple times will be handled as a *OR*; for example -`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display -events for container 588a23dac085 *OR* container a8f7720b8c22 - -Using multiple filters will be handled as a *AND*; for example -`--filter container=588a23dac085 --filter event=start` will display events for -container container 588a23dac085 *AND* the event type is *start* - -The currently supported filters are: - -* container (`container=`) -* daemon (`daemon=`) -* event (`event=`) -* image (`image=`) -* label (`label=` or `label==`) -* network (`network=`) -* plugin (`plugin=`) -* type (`type=`) -* volume (`volume=`) - -#### Format - -If a format (`--format`) is specified, the given template will be executed -instead of the default -format. Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -If a format is set to `{{json .}}`, the events are streamed as valid JSON -Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . - -## Examples - -### Basic example - -You'll need two shells for this example. - -**Shell 1: Listening for events:** - -```bash -$ docker events -``` - -**Shell 2: Start and Stop containers:** - -```bash -$ docker create --name test alpine:latest top -$ docker start test -$ docker stop test -``` - -**Shell 1: (Again .. now showing events):** - -```none -2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test) -2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge) -2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) -2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) -2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) -2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) -2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) -``` - -To exit the `docker events` command, use `CTRL+C`. - -### Filter events by time - -You can filter the output by an absolute timestamp or relative time on the host -machine, using the following different time syntaxes: - -```bash -$ docker events --since 1483283804 -2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) -2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) -2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) -2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) -2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) -2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) -2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) -2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - -$ docker events --since '2017-01-05' -2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) -2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) -2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) -2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) -2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) -2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) -2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) -2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - -$ docker events --since '2013-09-03T15:49:29' -2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) -2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) -2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) -2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) -2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) -2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) -2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) -2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - -$ docker events --since '10m' -2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) -2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) -2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) -2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) -2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) -2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) -2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) -2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) -``` - -### Filter events by criteria - -The following commands show several different ways to filter the `docker event` -output. - -```bash -$ docker events --filter 'event=stop' - -2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) -2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain) - -$ docker events --filter 'image=alpine' - -2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner) -2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) -2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15) -2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9) -2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner) -2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner) - -$ docker events --filter 'container=test' - -2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) -2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) -2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) -2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) - -$ docker events --filter 'container=test' --filter 'container=d9cdb1525ea8' - -2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) -2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) -2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9) -2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test) -2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test) - -$ docker events --filter 'container=test' --filter 'event=stop' - -2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test) - -$ docker events --filter 'type=volume' - -2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) -2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate) -2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local) -2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) - -$ docker events --filter 'type=network' - -2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge) -2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge) - -$ docker events --filter 'container=container_1' --filter 'container=container_2' - -2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) -2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) -2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) -2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - -$ docker events --filter 'type=volume' - -2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) -2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) -2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) -2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) - -$ docker events --filter 'type=network' - -2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) -2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) -``` - -The `type=plugin` filter is experimental. - -```bash -$ docker events --filter 'type=plugin' - -2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) -2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) -``` - -### Format the output - -```bash -$ docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' - -Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 -Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 -Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 -Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 -Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 -Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 -``` - -#### Format as JSON - -```none - $ docker events --format '{{json .}}' - - {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. - {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. - {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/exec.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/exec.md deleted file mode 100644 index 1ae46cf19..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/exec.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "exec" -description: "The exec command description and usage" -keywords: "command, container, run, execute" ---- - - - -# exec - -```markdown -Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] - -Run a command in a running container - -Options: - -d, --detach Detached mode: run command in the background - --detach-keys Override the key sequence for detaching a container - -e, --env=[] Set environment variables - --help Print usage - -i, --interactive Keep STDIN open even if not attached - --privileged Give extended privileges to the command - -t, --tty Allocate a pseudo-TTY - -u, --user Username or UID (format: [:]) -``` - -## Description - -The `docker exec` command runs a new command in a running container. - -The command started using `docker exec` only runs while the container's primary -process (`PID 1`) is running, and it is not restarted if the container is -restarted. - -## Examples - -### Run `docker exec` on a running container - -First, start a container. - -```bash -$ docker run --name ubuntu_bash --rm -i -t ubuntu bash -``` - -This will create a container named `ubuntu_bash` and start a Bash session. - -Next, execute a command on the container. - -```bash -$ docker exec -d ubuntu_bash touch /tmp/execWorks -``` - -This will create a new file `/tmp/execWorks` inside the running container -`ubuntu_bash`, in the background. - -Next, execute an interactive `bash` shell on the container. - -```bash -$ docker exec -it ubuntu_bash bash -``` - -This will create a new Bash session in the container `ubuntu_bash`. - -### Try to run `docker exec` on a paused container - -If the container is paused, then the `docker exec` command will fail with an error: - -```bash -$ docker pause test - -test - -$ docker ps - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -1ae3b36715d2 ubuntu:latest "bash" 17 seconds ago Up 16 seconds (Paused) test - -$ docker exec test ls - -FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec - -$ echo $? -1 -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/export.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/export.md deleted file mode 100644 index 9de509714..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/export.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "export" -description: "The export command description and usage" -keywords: "export, file, system, container" ---- - - - -# export - -```markdown -Usage: docker export [OPTIONS] CONTAINER - -Export a container's filesystem as a tar archive - -Options: - --help Print usage - -o, --output string Write to a file, instead of STDOUT -``` - -## Description - -The `docker export` command does not export the contents of volumes associated -with the container. If a volume is mounted on top of an existing directory in -the container, `docker export` will export the contents of the *underlying* -directory, not the contents of the volume. - -Refer to [Backup, restore, or migrate data volumes](https://docs.docker.com/engine/tutorials/dockervolumes/#backup-restore-or-migrate-data-volumes) -in the user guide for examples on exporting data in a volume. - -## Examples - -Each of these commands has the same result. - -```bash -$ docker export red_panda > latest.tar -``` - -```bash -$ docker export --output="latest.tar" red_panda -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/history.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/history.md deleted file mode 100644 index b68cc8695..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/history.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "history" -description: "The history command description and usage" -keywords: "docker, image, history" ---- - - - -# history - -```markdown -Usage: docker history [OPTIONS] IMAGE - -Show the history of an image - -Options: - --help Print usage - -H, --human Print sizes and dates in human readable format (default true) - --no-trunc Don't truncate output - -q, --quiet Only show numeric IDs -``` - - -## Examples - -To see how the `docker:latest` image was built: - -```bash -$ docker history docker - -IMAGE CREATED CREATED BY SIZE COMMENT -3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B -8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB -be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB -4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB -750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi - -# image - -```markdown -Usage: docker image COMMAND - -Manage images - -Options: - --help Print usage - -Commands: - build Build an image from a Dockerfile - history Show the history of an image - import Import the contents from a tarball to create a filesystem image - inspect Display detailed information on one or more images - load Load an image from a tar archive or STDIN - ls List images - prune Remove unused images - pull Pull an image or a repository from a registry - push Push an image or a repository to a registry - rm Remove one or more images - save Save one or more images to a tar archive (streamed to STDOUT by default) - tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE - -Run 'docker image COMMAND --help' for more information on a command. - -``` - -## Description - -Manage images. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/image_prune.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/image_prune.md deleted file mode 100644 index e9cae8c6a..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/image_prune.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: "image prune" -description: "Remove all stopped images" -keywords: "image, prune, delete, remove" ---- - - - -# image prune - -```markdown -Usage: docker image prune [OPTIONS] - -Remove unused images - -Options: - -a, --all Remove all unused images, not just dangling ones - --filter filter Provide filter values (e.g. 'until=') - -f, --force Do not prompt for confirmation - --help Print usage -``` - -## Description - -Remove all dangling images. If `-a` is specified, will also remove all images not referenced by any container. - -## Examples - -Example output: - -```bash -$ docker image prune -a - -WARNING! This will remove all images without at least one container associated to them. -Are you sure you want to continue? [y/N] y -Deleted Images: -untagged: alpine:latest -untagged: alpine@sha256:3dcdb92d7432d56604d4545cbd324b14e647b313626d99b889d0626de158f73a -deleted: sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba -deleted: sha256:4fe15f8d0ae69e169824f25f1d4da3015a48feeeeebb265cd2e328e15c6a869f -untagged: alpine:3.3 -untagged: alpine@sha256:4fa633f4feff6a8f02acfc7424efd5cb3e76686ed3218abf4ca0fa4a2a358423 -untagged: my-jq:latest -deleted: sha256:ae67841be6d008a374eff7c2a974cde3934ffe9536a7dc7ce589585eddd83aff -deleted: sha256:34f6f1261650bc341eb122313372adc4512b4fceddc2a7ecbb84f0958ce5ad65 -deleted: sha256:cf4194e8d8db1cb2d117df33f2c75c0369c3a26d96725efb978cc69e046b87e7 -untagged: my-curl:latest -deleted: sha256:b2789dd875bf427de7f9f6ae001940073b3201409b14aba7e5db71f408b8569e -deleted: sha256:96daac0cb203226438989926fc34dd024f365a9a8616b93e168d303cfe4cb5e9 -deleted: sha256:5cbd97a14241c9cd83250d6b6fc0649833c4a3e84099b968dd4ba403e609945e -deleted: sha256:a0971c4015c1e898c60bf95781c6730a05b5d8a2ae6827f53837e6c9d38efdec -deleted: sha256:d8359ca3b681cc5396a4e790088441673ed3ce90ebc04de388bfcd31a0716b06 -deleted: sha256:83fc9ba8fb70e1da31dfcc3c88d093831dbd4be38b34af998df37e8ac538260c -deleted: sha256:ae7041a4cc625a9c8e6955452f7afe602b401f662671cea3613f08f3d9343b35 -deleted: sha256:35e0f43a37755b832f0bbea91a2360b025ee351d7309dae0d9737bc96b6d0809 -deleted: sha256:0af941dd29f00e4510195dd00b19671bc591e29d1495630e7e0f7c44c1e6a8c0 -deleted: sha256:9fc896fc2013da84f84e45b3096053eb084417b42e6b35ea0cce5a3529705eac -deleted: sha256:47cf20d8c26c46fff71be614d9f54997edacfe8d46d51769706e5aba94b16f2b -deleted: sha256:2c675ee9ed53425e31a13e3390bf3f539bf8637000e4bcfbb85ee03ef4d910a1 - -Total reclaimed space: 16.43 MB -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* until (``) - only remove images created before given timestamp - -The `until` filter can be Unix timestamps, date formatted -timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed -relative to the daemon machine’s time. Supported formats for date -formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the daemon will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. - -The following removes images created before `2017-01-04T00:00:00`: - -```bash -$ docker images --format 'table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}' -REPOSITORY TAG IMAGE ID CREATED AT SIZE -foo latest 2f287ac753da 2017-01-04 13:42:23 -0800 PST 3.98 MB -alpine latest 88e169ea8f46 2016-12-27 10:17:25 -0800 PST 3.98 MB -busybox latest e02e811dd08f 2016-10-07 14:03:58 -0700 PDT 1.09 MB - -$ docker image prune -a --force --filter "until=2017-01-04T00:00:00" - -Deleted Images: -untagged: alpine:latest -untagged: alpine@sha256:dfbd4a3a8ebca874ebd2474f044a0b33600d4523d03b0df76e5c5986cb02d7e8 -untagged: busybox:latest -untagged: busybox@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 -deleted: sha256:e02e811dd08fd49e7f6032625495118e63f597eb150403d02e3238af1df240ba -deleted: sha256:e88b3f82283bc59d5e0df427c824e9f95557e661fcb0ea15fb0fb6f97760f9d9 - -Total reclaimed space: 1.093 MB - -$ docker images --format 'table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}' - -REPOSITORY TAG IMAGE ID CREATED AT SIZE -foo latest 2f287ac753da 2017-01-04 13:42:23 -0800 PST 3.98 MB -``` - -The following removes images created more than 10 days (`240h`) ago: - -```bash -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE -foo latest 2f287ac753da 14 seconds ago 3.98 MB -alpine latest 88e169ea8f46 8 days ago 3.98 MB -debian jessie 7b0a06c805e8 2 months ago 123 MB -busybox latest e02e811dd08f 2 months ago 1.09 MB -golang 1.7.0 138c2e655421 4 months ago 670 MB - -$ docker image prune -a --force --filter "until=240h" - -Deleted Images: -untagged: golang:1.7.0 -untagged: golang@sha256:6765038c2b8f407fd6e3ecea043b44580c229ccfa2a13f6d85866cf2b4a9628e -deleted: sha256:138c2e6554219de65614d88c15521bfb2da674cbb0bf840de161f89ff4264b96 -deleted: sha256:ec353c2e1a673f456c4b78906d0d77f9d9456cfb5229b78c6a960bfb7496b76a -deleted: sha256:fe22765feaf3907526b4921c73ea6643ff9e334497c9b7e177972cf22f68ee93 -deleted: sha256:ff845959c80148421a5c3ae11cc0e6c115f950c89bc949646be55ed18d6a2912 -deleted: sha256:a4320831346648c03db64149eafc83092e2b34ab50ca6e8c13112388f25899a7 -deleted: sha256:4c76020202ee1d9709e703b7c6de367b325139e74eebd6b55b30a63c196abaf3 -deleted: sha256:d7afd92fb07236c8a2045715a86b7d5f0066cef025018cd3ca9a45498c51d1d6 -deleted: sha256:9e63c5bce4585dd7038d830a1f1f4e44cb1a1515b00e620ac718e934b484c938 -untagged: debian:jessie -untagged: debian@sha256:c1af755d300d0c65bb1194d24bce561d70c98a54fb5ce5b1693beb4f7988272f -deleted: sha256:7b0a06c805e8f23807fb8856621c60851727e85c7bcb751012c813f122734c8d -deleted: sha256:f96222d75c5563900bc4dd852179b720a0885de8f7a0619ba0ac76e92542bbc8 - -Total reclaimed space: 792.6 MB - -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE -foo latest 2f287ac753da About a minute ago 3.98 MB -alpine latest 88e169ea8f46 8 days ago 3.98 MB -busybox latest e02e811dd08f 2 months ago 1.09 MB -``` - -## Related commands - -* [system df](system_df.md) -* [container prune](container_prune.md) -* [volume prune](volume_prune.md) -* [network prune](network_prune.md) -* [system prune](system_prune.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/images.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/images.md deleted file mode 100644 index 9f7f555ed..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/images.md +++ /dev/null @@ -1,342 +0,0 @@ ---- -title: "images" -description: "The images command description and usage" -keywords: "list, docker, images" ---- - - - -# images - -```markdown -Usage: docker images [OPTIONS] [REPOSITORY[:TAG]] - -List images - -Options: - -a, --all Show all images (default hides intermediate images) - --digests Show digests - -f, --filter value Filter output based on conditions provided (default []) - - dangling=(true|false) - - label= or label== - - before=([:tag]||) - - since=([:tag]||) - - reference=(pattern of an image reference) - --format string Pretty-print images using a Go template - --help Print usage - --no-trunc Don't truncate output - -q, --quiet Only show numeric IDs -``` - -## Description - -The default `docker images` will show all top level -images, their repository and tags, and their size. - -Docker images have intermediate layers that increase reusability, -decrease disk usage, and speed up `docker build` by -allowing each step to be cached. These intermediate layers are not shown -by default. - -The `SIZE` is the cumulative space taken up by the image and all -its parent images. This is also the disk space used by the contents of the -Tar file created when you `docker save` an image. - -An image will be listed more than once if it has multiple repository names -or tags. This single image (identifiable by its matching `IMAGE ID`) -uses up the `SIZE` listed only once. - -## Examples - -### List the most recently created images - -```bash -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE - 77af4d6b9913 19 hours ago 1.089 GB -committ latest b6fa739cedf5 19 hours ago 1.089 GB - 78a85c484f71 19 hours ago 1.089 GB -docker latest 30557a29d5ab 20 hours ago 1.089 GB - 5ed6274db6ce 24 hours ago 1.089 GB -postgres 9 746b819f315e 4 days ago 213.4 MB -postgres 9.3 746b819f315e 4 days ago 213.4 MB -postgres 9.3.5 746b819f315e 4 days ago 213.4 MB -postgres latest 746b819f315e 4 days ago 213.4 MB -``` - -### List images by name and tag - -The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument -that restricts the list to images that match the argument. If you specify -`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the -given repository. - -For example, to list all images in the "java" repository, run this command : - -```bash -$ docker images java - -REPOSITORY TAG IMAGE ID CREATED SIZE -java 8 308e519aac60 6 days ago 824.5 MB -java 7 493d82594c15 3 months ago 656.3 MB -java latest 2711b1d6f3aa 5 months ago 603.9 MB -``` - -The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, -`docker images jav` does not match the image `java`. - -If both `REPOSITORY` and `TAG` are provided, only images matching that -repository and tag are listed. To find all local images in the "java" -repository with tag "8" you can use: - -```bash -$ docker images java:8 - -REPOSITORY TAG IMAGE ID CREATED SIZE -java 8 308e519aac60 6 days ago 824.5 MB -``` - -If nothing matches `REPOSITORY[:TAG]`, the list is empty. - -```bash -$ docker images java:0 - -REPOSITORY TAG IMAGE ID CREATED SIZE -``` - -### List the full length image IDs - -```bash -$ docker images --no-trunc - -REPOSITORY TAG IMAGE ID CREATED SIZE - sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB -committest latest sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB - sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB -docker latest sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB - sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB - sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB - sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB -tryout latest sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB - sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB -``` - -### List image digests - -Images that use the v2 or later format have a content-addressable identifier -called a `digest`. As long as the input used to generate the image is -unchanged, the digest value is predictable. To list image digest values, use -the `--digests` flag: - -```bash -$ docker images --digests -REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE -localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB -``` - -When pushing or pulling to a 2.0 registry, the `push` or `pull` command -output includes the image digest. You can `pull` using a digest value. You can -also reference by digest in `create`, `run`, and `rmi` commands, as well as the -`FROM` image reference in a Dockerfile. - -### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* dangling (boolean - true or false) -* label (`label=` or `label==`) -* before (`[:]`, `` or ``) - filter images created before given id or references -* since (`[:]`, `` or ``) - filter images created since given id or references -* reference (pattern of an image reference) - filter images whose reference matches the specified pattern - -#### Show untagged images (dangling) - -```bash -$ docker images --filter "dangling=true" - -REPOSITORY TAG IMAGE ID CREATED SIZE - 8abc22fbb042 4 weeks ago 0 B - 48e5f45168b9 4 weeks ago 2.489 MB - bf747efa0e2f 4 weeks ago 0 B - 980fe10e5736 12 weeks ago 101.4 MB - dea752e4e117 12 weeks ago 101.4 MB - 511136ea3c5a 8 months ago 0 B -``` - -This will display untagged images that are the leaves of the images tree (not -intermediary layers). These images occur when a new build of an image takes the -`repo:tag` away from the image ID, leaving it as `:` or untagged. -A warning will be issued if trying to remove an image when a container is presently -using it. By having this flag it allows for batch cleanup. - -You can use this in conjunction with `docker rmi ...`: - -```bash -$ docker rmi $(docker images -f "dangling=true" -q) - -8abc22fbb042 -48e5f45168b9 -bf747efa0e2f -980fe10e5736 -dea752e4e117 -511136ea3c5a -``` - -> **Note**: Docker warns you if any containers exist that are using these -> untagged images. - - -#### Show images with a given label - -The `label` filter matches images based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches images with the `com.example.version` label regardless of its value. - -```bash -$ docker images --filter "label=com.example.version" - -REPOSITORY TAG IMAGE ID CREATED SIZE -match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB -match-me-2 latest dea752e4e117 About a minute ago 188.3 MB -``` - -The following filter matches images with the `com.example.version` label with the `1.0` value. - -```bash -$ docker images --filter "label=com.example.version=1.0" - -REPOSITORY TAG IMAGE ID CREATED SIZE -match-me latest 511136ea3c5a About a minute ago 188.3 MB -``` - -In this example, with the `0.1` value, it returns an empty set because no matches were found. - -```bash -$ docker images --filter "label=com.example.version=0.1" -REPOSITORY TAG IMAGE ID CREATED SIZE -``` - -#### Filter images by time - -The `before` filter shows only images created before the image with -given id or reference. For example, having these images: - -```bash -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE -image1 latest eeae25ada2aa 4 minutes ago 188.3 MB -image2 latest dea752e4e117 9 minutes ago 188.3 MB -image3 latest 511136ea3c5a 25 minutes ago 188.3 MB -``` - -Filtering with `before` would give: - -```bash -$ docker images --filter "before=image1" - -REPOSITORY TAG IMAGE ID CREATED SIZE -image2 latest dea752e4e117 9 minutes ago 188.3 MB -image3 latest 511136ea3c5a 25 minutes ago 188.3 MB -``` - -Filtering with `since` would give: - -```bash -$ docker images --filter "since=image3" -REPOSITORY TAG IMAGE ID CREATED SIZE -image1 latest eeae25ada2aa 4 minutes ago 188.3 MB -image2 latest dea752e4e117 9 minutes ago 188.3 MB -``` - -#### Filter images by reference - -The `reference` filter shows only images whose reference matches -the specified pattern. - -```bash - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest e02e811dd08f 5 weeks ago 1.09 MB - busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB - busybox musl 733eb3059dce 5 weeks ago 1.21 MB - busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB -``` - -Filtering with `reference` would give: - -```bash - $ docker images --filter=reference='busy*:*libc' - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB - busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB -``` - -### Format the output - -The formatting option (`--format`) will pretty print container output -using a Go template. - -Valid placeholders for the Go template are listed below: - -| Placeholder | Description| -| ---- | ---- | -| `.ID` | Image ID | -| `.Repository` | Image repository | -| `.Tag` | Image tag | -| `.Digest` | Image digest | -| `.CreatedSince` | Elapsed time since the image was created | -| `.CreatedAt` | Time when the image was created | -| `.Size` | Image disk size | - -When using the `--format` option, the `image` command will either -output the data exactly as the template declares or, when using the -`table` directive, will include column headers as well. - -The following example uses a template without headers and outputs the -`ID` and `Repository` entries separated by a colon for all images: - -```bash -$ docker images --format "{{.ID}}: {{.Repository}}" - -77af4d6b9913: -b6fa739cedf5: committ -78a85c484f71: -30557a29d5ab: docker -5ed6274db6ce: -746b819f315e: postgres -746b819f315e: postgres -746b819f315e: postgres -746b819f315e: postgres -``` - -To list all images with their repository and tag in a table format you -can use: - -```bash -$ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" - -IMAGE ID REPOSITORY TAG -77af4d6b9913 -b6fa739cedf5 committ latest -78a85c484f71 -30557a29d5ab docker latest -5ed6274db6ce -746b819f315e postgres 9 -746b819f315e postgres 9.3 -746b819f315e postgres 9.3.5 -746b819f315e postgres latest -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/import.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/import.md deleted file mode 100644 index 57edf650c..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/import.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: "import" -description: "The import command description and usage" -keywords: "import, file, system, container" ---- - - - -# import - -```markdown -Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] - -Import the contents from a tarball to create a filesystem image - -Options: - -c, --change value Apply Dockerfile instruction to the created image (default []) - --help Print usage - -m, --message string Set commit message for imported image -``` - -## Description - -You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The -`URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) -containing a filesystem or to an individual file on the Docker host. If you -specify an archive, Docker untars it in the container relative to the `/` -(root). If you specify an individual file, you must specify the full path within -the host. To import from a remote location, specify a `URI` that begins with the -`http://` or `https://` protocol. - -The `--change` option will apply `Dockerfile` instructions to the image -that is created. -Supported `Dockerfile` instructions: -`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - -## Examples - -### Import from a remote location - -This will create a new untagged image. - -```bash -$ docker import http://example.com/exampleimage.tgz -``` - -### Import from a local file - -- Import to docker via pipe and `STDIN`. - - ```bash - $ cat exampleimage.tgz | docker import - exampleimagelocal:new - ``` - -- Import with a commit message. - - ```bash - $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new - ``` - -- Import to docker from a local archive. - - ```bash - $ docker import /path/to/exampleimage.tgz - ``` - -### Import from a local directory - -```bash -$ sudo tar -c . | docker import - exampleimagedir -``` - -### Import from a local directory with new configurations - -```bash -$ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir -``` - -Note the `sudo` in this example – you must preserve -the ownership of the files (especially root ownership) during the -archiving with tar. If you are not root (or the sudo command) when you -tar, then the ownerships might not get preserved. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/index.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/index.md deleted file mode 100644 index f38fc52a8..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/index.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: "Docker commands" -description: "Docker's CLI command description and usage" -keywords: "Docker, Docker documentation, CLI, command line" -identifier: "smn_cli_guide" ---- - - - -# The Docker commands - -This section contains reference information on using Docker's command line -client. Each command has a reference page along with samples. If you are -unfamiliar with the command line, you should start by reading about how to [Use -the Docker command line](cli.md). - -You start the Docker daemon with the command line. How you start the daemon -affects your Docker containers. For that reason you should also make sure to -read the [`dockerd`](dockerd.md) reference page. - -## Commands by object - -### Docker management commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [dockerd](dockerd.md) | Launch the Docker daemon | -| [info](info.md) | Display system-wide information | -| [inspect](inspect.md)| Return low-level information on a container or image | -| [version](version.md) | Show the Docker version information | - - -### Image commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [build](build.md) | Build an image from a Dockerfile | -| [commit](commit.md) | Create a new image from a container's changes | -| [history](history.md) | Show the history of an image | -| [images](images.md) | List images | -| [import](import.md) | Import the contents from a tarball to create a filesystem image | -| [load](load.md) | Load an image from a tar archive or STDIN | -| [image prune](image_prune.md) | Remove unused images | -| [rmi](rmi.md) | Remove one or more images | -| [save](save.md) | Save images to a tar archive | -| [tag](tag.md) | Tag an image into a repository | - -### Container commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [attach](attach.md) | Attach to a running container | -| [container prune](container_prune.md) | Remove all stopped containers | -| [cp](cp.md) | Copy files/folders from a container to a HOSTDIR or to STDOUT | -| [create](create.md) | Create a new container | -| [diff](diff.md) | Inspect changes on a container's filesystem | -| [events](events.md) | Get real time events from the server | -| [exec](exec.md) | Run a command in a running container | -| [export](export.md) | Export a container's filesystem as a tar archive | -| [kill](kill.md) | Kill a running container | -| [logs](logs.md) | Fetch the logs of a container | -| [pause](pause.md) | Pause all processes within a container | -| [port](port.md) | List port mappings or a specific mapping for the container | -| [ps](ps.md) | List containers | -| [rename](rename.md) | Rename a container | -| [restart](restart.md) | Restart a running container | -| [rm](rm.md) | Remove one or more containers | -| [run](run.md) | Run a command in a new container | -| [start](start.md) | Start one or more stopped containers | -| [stats](stats.md) | Display a live stream of container(s) resource usage statistics | -| [stop](stop.md) | Stop a running container | -| [top](top.md) | Display the running processes of a container | -| [unpause](unpause.md) | Unpause all processes within a container | -| [update](update.md) | Update configuration of one or more containers | -| [wait](wait.md) | Block until a container stops, then print its exit code | - -### Hub and registry commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [login](login.md) | Register or log in to a Docker registry | -| [logout](logout.md) | Log out from a Docker registry | -| [pull](pull.md) | Pull an image or a repository from a Docker registry | -| [push](push.md) | Push an image or a repository to a Docker registry | -| [search](search.md) | Search the Docker Hub for images | - -### Network and connectivity commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [network connect](network_connect.md) | Connect a container to a network | -| [network create](network_create.md) | Create a new network | -| [network disconnect](network_disconnect.md) | Disconnect a container from a network | -| [network inspect](network_inspect.md) | Display information about a network | -| [network ls](network_ls.md) | Lists all the networks the Engine `daemon` knows about | -| [network prune](network_prune.md) | Remove all unused networks | -| [network rm](network_rm.md) | Removes one or more networks | - -### Shared data volume commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [volume create](volume_create.md) | Creates a new volume where containers can consume and store data | -| [volume inspect](volume_inspect.md) | Display information about a volume | -| [volume ls](volume_ls.md) | Lists all the volumes Docker knows about | -| [volume prune](volume_prune.md) | Remove all unused volumes | -| [volume rm](volume_rm.md) | Remove one or more volumes | - -### Swarm node commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager | -| [node inspect](node_inspect.md) | Inspect a node in the swarm | -| [node ls](node_ls.md) | List nodes in the swarm | -| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager | -| [node ps](node_ps.md) | List tasks running on one or more nodes | -| [node rm](node_rm.md) | Remove one or more nodes from the swarm | -| [node update](node_update.md) | Update attributes for a node | - -### Swarm management commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [swarm init](swarm_init.md) | Initialize a swarm | -| [swarm join](swarm_join.md) | Join a swarm as a manager node or worker node | -| [swarm leave](swarm_leave.md) | Remove the current node from the swarm | -| [swarm join-token](swarm_join_token.md) | Display or rotate join tokens | -| [swarm unlock](swarm_unlock.md) | Unlock swarm | -| [swarm unlock-key](swarm_unlock_key.md) | Manage the unlock key | -| [swarm update](swarm_update.md) | Update attributes of a swarm | - -### Swarm service commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [service create](service_create.md) | Create a new service | -| [service inspect](service_inspect.md) | Inspect a service | -| [service logs](service_logs.md) | Fetch the logs of a service | -| [service ls](service_ls.md) | List services in the swarm | -| [service ps](service_ps.md) | List the tasks of a service | -| [service rm](service_rm.md) | Remove a service from the swarm | -| [service scale](service_scale.md) | Set the number of replicas for the desired state of the service | -| [service update](service_update.md) | Update the attributes of a service | - -### Swarm secret commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [secret create](secret_create.md) | Create a secret from a file or STDIN as content | -| [secret inspect](service_inspect.md) | Inspect the specified secret | -| [secret ls](secret_ls.md) | List secrets in the swarm | -| [secret rm](secret_rm.md) | Remove the specified secrets from the swarm | - -### Swarm stack commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [stack deploy](stack_deploy.md) | Deploy a new stack or update an existing stack | -| [stack ls](stack_ls.md) | List stacks in the swarm | -| [stack ps](stack_ps.md) | List the tasks in the stack | -| [stack rm](stack_rm.md) | Remove the stack from the swarm | -| [stack services](stack_services.md) | List the services in the stack | - -### Plugin commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [plugin create](plugin_create.md) | Create a plugin from a rootfs and configuration | -| [plugin disable](plugin_disable.md) | Disable a plugin | -| [plugin enbale](plugin_enable.md) | Enable a plugin | -| [plugin inspect](plugin_inspect.md) | Display detailed information on a plugin | -| [plugin install](plugin_install.md) | Install a plugin | -| [plugin ls](plugin_ls.md) | List plugins | -| [plugin push](plugin_push.md) | Push a plugin to a registry | -| [plugin rm](plugin_rm.md) | Remove a plugin | -| [plugin set](plugin_set.md) | Change settings for a plugin | diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/info.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/info.md deleted file mode 100644 index 798c3f04d..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/info.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -title: "info" -description: "The info command description and usage" -keywords: "display, docker, information" ---- - - - -# info - -```markdown -Usage: docker info [OPTIONS] - -Display system-wide information - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -## Description - -This command displays system wide information regarding the Docker installation. -Information displayed includes the kernel version, number of containers and images. -The number of images shown is the number of unique images. The same image tagged -under different names is counted only once. - -If a format is specified, the given template will be executed instead of the -default format. Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -Depending on the storage driver in use, additional information can be shown, such -as pool name, data file, metadata file, data space used, total data space, metadata -space used, and total metadata space. - -The data file is where the images are stored and the metadata file is where the -meta data regarding those images are stored. When run for the first time Docker -allocates a certain amount of data space and meta data space from the space -available on the volume where `/var/lib/docker` is mounted. - -## Examples - -### Show output - -The example below shows the output for a daemon running on Red Hat Enterprise Linux, -using the `devicemapper` storage driver. As can be seen in the output, additional -information about the `devicemapper` storage driver is shown: - -```bash -$ docker info - -Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 -Images: 52 -Server Version: 1.10.3 -Storage Driver: devicemapper - Pool Name: docker-202:2-25583803-pool - Pool Blocksize: 65.54 kB - Base Device Size: 10.74 GB - Backing Filesystem: xfs - Data file: /dev/loop0 - Metadata file: /dev/loop1 - Data Space Used: 1.68 GB - Data Space Total: 107.4 GB - Data Space Available: 7.548 GB - Metadata Space Used: 2.322 MB - Metadata Space Total: 2.147 GB - Metadata Space Available: 2.145 GB - Udev Sync Supported: true - Deferred Removal Enabled: false - Deferred Deletion Enabled: false - Deferred Deleted Device Count: 0 - Data loop file: /var/lib/docker/devicemapper/devicemapper/data - Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata - Library Version: 1.02.107-RHEL7 (2015-12-01) -Execution Driver: native-0.2 -Logging Driver: json-file -Plugins: - Volume: local - Network: null host bridge -Kernel Version: 3.10.0-327.el7.x86_64 -Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) -OSType: linux -Architecture: x86_64 -CPUs: 1 -Total Memory: 991.7 MiB -Name: ip-172-30-0-91.ec2.internal -ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S -Docker Root Dir: /var/lib/docker -Debug mode (client): false -Debug mode (server): false -Username: gordontheturtle -Registry: https://index.docker.io/v1/ -Insecure registries: - myinsecurehost:5000 - 127.0.0.0/8 -``` - -### Show debugging output - -Here is a sample output for a daemon running on Ubuntu, using the overlay2 -storage driver and a node that is part of a 2-node swarm: - -```bash -$ docker -D info - -Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 -Images: 52 -Server Version: 1.13.0 -Storage Driver: overlay2 - Backing Filesystem: extfs - Supports d_type: true - Native Overlay Diff: false -Logging Driver: json-file -Cgroup Driver: cgroupfs -Plugins: - Volume: local - Network: bridge host macvlan null overlay -Swarm: active - NodeID: rdjq45w1op418waxlairloqbm - Is Manager: true - ClusterID: te8kdyw33n36fqiz74bfjeixd - Managers: 1 - Nodes: 2 - Orchestration: - Task History Retention Limit: 5 - Raft: - Snapshot Interval: 10000 - Number of Old Snapshots to Retain: 0 - Heartbeat Tick: 1 - Election Tick: 3 - Dispatcher: - Heartbeat Period: 5 seconds - CA Configuration: - Expiry Duration: 3 months - Node Address: 172.16.66.128 172.16.66.129 - Manager Addresses: - 172.16.66.128:2477 -Runtimes: runc -Default Runtime: runc -Init Binary: docker-init -containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 -runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 -init version: N/A (expected: v0.13.0) -Security Options: - apparmor - seccomp - Profile: default -Kernel Version: 4.4.0-31-generic -Operating System: Ubuntu 16.04.1 LTS -OSType: linux -Architecture: x86_64 -CPUs: 2 -Total Memory: 1.937 GiB -Name: ubuntu -ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 -Docker Root Dir: /var/lib/docker -Debug Mode (client): true -Debug Mode (server): true - File Descriptors: 30 - Goroutines: 123 - System Time: 2016-11-12T17:24:37.955404361-08:00 - EventsListeners: 0 -Http Proxy: http://test:test@proxy.example.com:8080 -Https Proxy: https://test:test@proxy.example.com:8080 -No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com -Registry: https://index.docker.io/v1/ -WARNING: No swap limit support -Labels: - storage=ssd - staging=true -Experimental: false -Insecure Registries: - 127.0.0.0/8 -Registry Mirrors: - http://192.168.1.2/ - http://registry-mirror.example.com:5000/ -Live Restore Enabled: false -``` - -The global `-D` option causes all `docker` commands to output debug information. - -### Format the output - -You can also specify the output format: - -```bash -$ docker info --format '{{json .}}' - -{"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} -``` - -### Run `docker info` on Windows - -Here is a sample output for a daemon running on Windows Server 2016: - -```none -E:\docker>docker info - -Containers: 1 - Running: 0 - Paused: 0 - Stopped: 1 -Images: 17 -Server Version: 1.13.0 -Storage Driver: windowsfilter - Windows: -Logging Driver: json-file -Plugins: - Volume: local - Network: nat null overlay -Swarm: inactive -Default Isolation: process -Kernel Version: 10.0 14393 (14393.206.amd64fre.rs1_release.160912-1937) -Operating System: Windows Server 2016 Datacenter -OSType: windows -Architecture: x86_64 -CPUs: 8 -Total Memory: 3.999 GiB -Name: WIN-V0V70C0LU5P -ID: NYMS:B5VK:UMSL:FVDZ:EWB5:FKVK:LPFL:FJMQ:H6FT:BZJ6:L2TD:XH62 -Docker Root Dir: C:\control -Debug Mode (client): false -Debug Mode (server): false -Registry: https://index.docker.io/v1/ -Insecure Registries: - 127.0.0.0/8 -Registry Mirrors: - http://192.168.1.2/ - http://registry-mirror.example.com:5000/ -Live Restore Enabled: false -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md deleted file mode 100644 index 9ce4f5f51..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: "inspect" -description: "The inspect command description and usage" -keywords: "inspect, container, json" ---- - - - -# inspect - -```markdown -Usage: docker inspect [OPTIONS] NAME|ID [NAME|ID...] - -Return low-level information on Docker object(s) (e.g. container, image, volume, -network, node, service, or task) identified by name or ID - -Options: - -f, --format Format the output using the given Go template - --help Print usage - -s, --size Display total file sizes if the type is container - --type Return JSON for specified type -``` - -## Description - -Docker inspect provides detailed information on constructs controlled by Docker. - -By default, `docker inspect` will render results in a JSON array. - -## Request a custom response format (--format) - -If a format is specified, the given template will be executed for each result. - -Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -## Specify target type (--type) - -`--type container|image|node|network|secret|service|volume|task|plugin` - -The `docker inspect` command matches any type of object by either ID or name. -In some cases multiple type of objects (for example, a container and a volume) -exist with the same name, making the result ambigious. - -To restrict `docker inspect` to a specific type of object, use the `--type` -option. - -The following example inspects a _volume_ named "myvolume" - -```bash -$ docker inspect --type=volume myvolume -``` - -## Examples - -### Get an instance's IP address - -For the most part, you can pick out any field from the JSON in a fairly -straightforward manner. - -```bash -$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $INSTANCE_ID -``` - -### Get an instance's MAC address - -```bash -$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID -``` - -### Get an instance's log path - -```bash -$ docker inspect --format='{{.LogPath}}' $INSTANCE_ID -``` - -### Get an instance's image name - -```bash -$ docker inspect --format='{{.Config.Image}}' $INSTANCE_ID -``` - -### List all port bindings - -You can loop over arrays and maps in the results to produce simple text -output: - -```bash -$ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID -``` - -### Find a specific port mapping - -The `.Field` syntax doesn't work when the field name begins with a -number, but the template language's `index` function does. The -`.NetworkSettings.Ports` section contains a map of the internal port -mappings to a list of external address/port objects. To grab just the -numeric public port, you use `index` to find the specific port map, and -then `index` 0 contains the first object inside of that. Then we ask for -the `HostPort` field to get the public address. - -```bash -$ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID -``` - -### Get a subsection in JSON format - -If you request a field which is itself a structure containing other -fields, by default you get a Go-style dump of the inner values. -Docker adds a template function, `json`, which can be applied to get -results in JSON format. - -```bash -$ docker inspect --format='{{json .Config}}' $INSTANCE_ID -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/kill.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/kill.md deleted file mode 100644 index 97b15add9..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/kill.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "kill" -description: "The kill command description and usage" -keywords: "container, kill, signal" ---- - - - -# kill - -```markdown -Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] - -Kill one or more running containers - -Options: - --help Print usage - -s, --signal string Signal to send to the container (default "KILL") -``` - -## Description - -The main process inside the container will be sent `SIGKILL`, or any -signal specified with option `--signal`. - -> **Note**: `ENTRYPOINT` and `CMD` in the *shell* form run as a subcommand of -> `/bin/sh -c`, which does not pass signals. This means that the executable is -> not the container’s PID 1 and does not receive Unix signals. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/load.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/load.md deleted file mode 100644 index 3ce6c19e2..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/load.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "load" -description: "The load command description and usage" -keywords: "stdin, tarred, repository" ---- - - - -# load - -```markdown -Usage: docker load [OPTIONS] - -Load an image from a tar archive or STDIN - -Options: - --help Print usage - -i, --input string Read from tar archive file, instead of STDIN. - The tarball may be compressed with gzip, bzip, or xz - -q, --quiet Suppress the load output but still outputs the imported images -``` -## Description - -`docker load` loads a tarred repository from a file or the standard input stream. -It restores both images and tags. - -## Examples - -```bash -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE - -$ docker load < busybox.tar.gz - -Loaded image: busybox:latest -$ docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -busybox latest 769b9341d937 7 weeks ago 2.489 MB - -$ docker load --input fedora.tar - -Loaded image: fedora:rawhide - -Loaded image: fedora:20 - -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE -busybox latest 769b9341d937 7 weeks ago 2.489 MB -fedora rawhide 0d20aec6529d 7 weeks ago 387 MB -fedora 20 58394af37342 7 weeks ago 385.5 MB -fedora heisenbug 58394af37342 7 weeks ago 385.5 MB -fedora latest 58394af37342 7 weeks ago 385.5 MB -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/login.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/login.md deleted file mode 100644 index 0b8e69728..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/login.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: "login" -description: "The login command description and usage" -keywords: "registry, login, image" ---- - - - -# login - -```markdown -Usage: docker login [OPTIONS] [SERVER] - -Log in to a Docker registry. -If no server is specified, the default is defined by the daemon. - -Options: - --help Print usage - -p, --password string Password - -u, --username string Username -``` - -## Description - -Login to a registry. - -### Login to a self-hosted registry - -If you want to login to a self-hosted registry you can specify this by -adding the server name. - -```bash -$ docker login localhost:8080 -``` - -### Privileged user requirement - -`docker login` requires user to use `sudo` or be `root`, except when: - -1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. -2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details. - -You can log into any public or private repository for which you have -credentials. When you log in, the command stores encoded credentials in -`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. - -### Credentials store - -The Docker Engine can keep user credentials in an external credentials store, -such as the native keychain of the operating system. Using an external store -is more secure than storing credentials in the Docker configuration file. - -To use a credentials store, you need an external helper program to interact -with a specific keychain or external store. Docker requires the helper -program to be in the client's host `$PATH`. - -This is the list of currently available credentials helpers and where -you can download them from: - -- D-Bus Secret Service: https://github.com/docker/docker-credential-helpers/releases -- Apple macOS keychain: https://github.com/docker/docker-credential-helpers/releases -- Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases - -You need to specify the credentials store in `$HOME/.docker/config.json` -to tell the docker engine to use it. The value of the config property should be -the suffix of the program to use (i.e. everything after `docker-credential-`). -For example, to use `docker-credential-osxkeychain`: - -```json -{ - "credsStore": "osxkeychain" -} -``` - -If you are currently logged in, run `docker logout` to remove -the credentials from the file and run `docker login` again. - -### Credential helper protocol - -Credential helpers can be any program or script that follows a very simple protocol. -This protocol is heavily inspired by Git, but it differs in the information shared. - -The helpers always use the first argument in the command to identify the action. -There are only three possible values for that argument: `store`, `get`, and `erase`. - -The `store` command takes a JSON payload from the standard input. That payload carries -the server address, to identify the credential, the user name, and either a password -or an identity token. - -```json -{ - "ServerURL": "https://index.docker.io/v1", - "Username": "david", - "Secret": "passw0rd1" -} -``` - -If the secret being stored is an identity token, the Username should be set to -``. - -The `store` command can write error messages to `STDOUT` that the docker engine -will show if there was an issue. - -The `get` command takes a string payload from the standard input. That payload carries -the server address that the docker engine needs credentials for. This is -an example of that payload: `https://index.docker.io/v1`. - -The `get` command writes a JSON payload to `STDOUT`. Docker reads the user name -and password from this payload: - -```json -{ - "Username": "david", - "Secret": "passw0rd1" -} -``` - -The `erase` command takes a string payload from `STDIN`. That payload carries -the server address that the docker engine wants to remove credentials for. This is -an example of that payload: `https://index.docker.io/v1`. - -The `erase` command can write error messages to `STDOUT` that the docker engine -will show if there was an issue. - -### Credential helpers - -Credential helpers are similar to the credential store above, but act as the -designated programs to handle credentials for *specific registries*. The default -credential store (`credsStore` or the config file itself) will not be used for -operations concerning credentials of the specified registries. - -### Logging out - -If you are currently logged in, run `docker logout` to remove -the credentials from the default store. - -Credential helpers are specified in a similar way to `credsStore`, but -allow for multiple helpers to be configured at a time. Keys specify the -registry domain, and values specify the suffix of the program to use -(i.e. everything after `docker-credential-`). -For example: - -```json -{ - "credHelpers": { - "registry.example.com": "registryhelper", - "awesomereg.example.org": "hip-star", - "unicorn.example.io": "vcbait" - } -} -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/logout.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/logout.md deleted file mode 100644 index 1e150eb84..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/logout.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "logout" -description: "The logout command description and usage" -keywords: "logout, docker, registry" ---- - - - -# logout - -```markdown -Usage: docker logout [SERVER] - -Log out from a Docker registry. -If no server is specified, the default is defined by the daemon. - -Options: - --help Print usage -``` - -## Examples - -```bash -$ docker logout localhost:8080 -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/logs.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/logs.md deleted file mode 100644 index 75f25f765..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/logs.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: "logs" -description: "The logs command description and usage" -keywords: "logs, retrieve, docker" ---- - - - -# logs - -```markdown -Usage: docker logs [OPTIONS] CONTAINER - -Fetch the logs of a container - -Options: - --details Show extra details provided to logs - -f, --follow Follow log output - --help Print usage - --since string Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes) - --tail string Number of lines to show from the end of the logs (default "all") - -t, --timestamps Show timestamps -``` - -## Description - -The `docker logs` command batch-retrieves logs present at the time of execution. - -> **Note**: this command is only functional for containers that are started with -> the `json-file` or `journald` logging driver. - -For more information about selecting and configuring logging drivers, refer to -[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). - -The `docker logs --follow` command will continue streaming the new output from -the container's `STDOUT` and `STDERR`. - -Passing a negative number or a non-integer to `--tail` is invalid and the -value is set to `all` in that case. - -The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) -, for example `2014-09-16T06:17:46.000000000Z`, to each -log entry. To ensure that the timestamps are aligned the -nano-second part of the timestamp will be padded with zero when necessary. - -The `docker logs --details` command will add on extra attributes, such as -environment variables and labels, provided to `--log-opt` when creating the -container. - -The `--since` option shows only the container logs generated after -a given date. You can specify the date as an RFC 3339 date, a UNIX -timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date -format you may also use RFC3339Nano, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the client will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. You can combine the -`--since` option with either or both of the `--follow` or `--tail` options. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/network.md deleted file mode 100644 index 4555740da..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "network" -description: "The network command description and usage" -keywords: "network" ---- - - - -# network - -```markdown -Usage: docker network COMMAND - -Manage networks - -Options: - --help Print usage - -Commands: - connect Connect a container to a network - create Create a network - disconnect Disconnect a container from a network - inspect Display detailed information on one or more networks - ls List networks - prune Remove all unused networks - rm Remove one or more networks - -Run 'docker network COMMAND --help' for more information on a command. -``` - -## Description - -Manage networks. You can use subcommands to create, inspect, list, remove, -prune, connect, and disconnect networks. - -## Related commands - -* [network create](network_create.md) -* [network inspect](network_inspect.md) -* [network list](network_list.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [network connect](network_connect.md) -* [network disconnect](network_disconnect.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md deleted file mode 100644 index ba01fc6ad..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: "network connect" -description: "The network connect command description and usage" -keywords: "network, connect, user-defined" ---- - - - -# network connect - -```markdown -Usage: docker network connect [OPTIONS] NETWORK CONTAINER - -Connect a container to a network - -Options: - --alias value Add network-scoped alias for the container (default []) - --help Print usage - --ip string IPv4 address (e.g., 172.30.100.104) - --ip6 string IPv6 address (e.g., 2001:db8::33) - --link value Add link to another container (default []) - --link-local-ip value Add a link-local address for the container (default []) -``` - -## Description - -Connects a container to a network. You can connect a container by name -or by ID. Once connected, the container can communicate with other containers in -the same network. - -## Examples - -### Connect a running container to a network - -```bash -$ docker network connect multi-host-network container1 -``` - -### Connect a container to a network when it starts - -You can also use the `docker run --network=` option to start a container and immediately connect it to a network. - -```bash -$ docker run -itd --network=multi-host-network busybox -``` - -### Specify the IP address a container will use on a given network - -You can specify the IP address you want to be assigned to the container's interface. - -```bash -$ docker network connect --ip 10.10.36.122 multi-host-network container2 -``` - -### Use the legacy `--link` option - -You can use `--link` option to link another container with a preferred alias - -```bash -$ docker network connect --link container1:c1 multi-host-network container2 -``` - -### Create a network alias for a container - -`--alias` option can be used to resolve the container by another name in the network -being connected to. - -```bash -$ docker network connect --alias db --alias mysql multi-host-network container2 -``` - -### Network implications of stopping, pausing, or restarting containers - -You can pause, restart, and stop containers that are connected to a network. -A container connects to its configured networks when it runs. - -If specified, the container's IP address(es) is reapplied when a stopped -container is restarted. If the IP address is no longer available, the container -fails to start. One way to guarantee that the IP address is available is -to specify an `--ip-range` when creating the network, and choose the static IP -address(es) from outside that range. This ensures that the IP address is not -given to another container while this container is not on the network. - -```bash -$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network -``` - -```bash -$ docker network connect --ip 172.20.128.2 multi-host-network container2 -``` - -To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. - -Once connected in network, containers can communicate using only another -container's IP address or name. For `overlay` networks or custom plugins that -support multi-host connectivity, containers connected to the same multi-host -network but launched from different Engines can also communicate in this way. - -You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. - -## Related commands - -* [network inspect](network_inspect.md) -* [network create](network_create.md) -* [network disconnect](network_disconnect.md) -* [network ls](network_ls.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) -* [Work with networks](https://docs.docker.com/engine/userguide/networking/work-with-networks/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md deleted file mode 100644 index 4b95c5e50..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md +++ /dev/null @@ -1,224 +0,0 @@ ---- -title: "network create" -description: "The network create command description and usage" -keywords: "network, create" ---- - - - -# network create - -```markdown -Usage: docker network create [OPTIONS] NETWORK - -Create a network - -Options: - --attachable Enable manual container attachment - --ingress Specify the network provides the routing-mesh - --aux-address value Auxiliary IPv4 or IPv6 addresses used by Network - driver (default map[]) - -d, --driver string Driver to manage the Network (default "bridge") - --gateway value IPv4 or IPv6 Gateway for the master subnet (default []) - --help Print usage - --internal Restrict external access to the network - --ip-range value Allocate container ip from a sub-range (default []) - --ipam-driver string IP Address Management Driver (default "default") - --ipam-opt value Set IPAM driver specific options (default map[]) - --ipv6 Enable IPv6 networking - --label value Set metadata on a network (default []) - -o, --opt value Set driver specific options (default map[]) - --subnet value Subnet in CIDR format that represents a - network segment (default []) -``` - -## Description - -Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the -built-in network drivers. If you have installed a third party or your own custom -network driver you can specify that `DRIVER` here also. If you don't specify the -`--driver` option, the command automatically creates a `bridge` network for you. -When you install Docker Engine it creates a `bridge` network automatically. This -network corresponds to the `docker0` bridge that Engine has traditionally relied -on. When you launch a new container with `docker run` it automatically connects to -this bridge network. You cannot remove this default bridge network, but you can -create new ones using the `network create` command. - -```bash -$ docker network create -d bridge my-bridge-network -``` - -Bridge networks are isolated networks on a single Engine installation. If you -want to create a network that spans multiple Docker hosts each running an -Engine, you must create an `overlay` network. Unlike `bridge` networks, overlay -networks require some pre-existing conditions before you can create one. These -conditions are: - -* Access to a key-value store. Engine supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores. -* A cluster of hosts with connectivity to the key-value store. -* A properly configured Engine `daemon` on each host in the cluster. - -The `dockerd` options that support the `overlay` network are: - -* `--cluster-store` -* `--cluster-store-opt` -* `--cluster-advertise` - -To read more about these options and how to configure them, see ["*Get started -with multi-host network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay). - -While not required, it is a good idea to install Docker Swarm to -manage the cluster that makes up your network. Swarm provides sophisticated -discovery and server management tools that can assist your implementation. - -Once you have prepared the `overlay` network prerequisites you simply choose a -Docker host in the cluster and issue the following to create the network: - -```bash -$ docker network create -d overlay my-multihost-network -``` - -Network names must be unique. The Docker daemon attempts to identify naming -conflicts but this is not guaranteed. It is the user's responsibility to avoid -name conflicts. - -## Examples - -### Connect containers - -When you start a container, use the `--network` flag to connect it to a network. -This example adds the `busybox` container to the `mynet` network: - -```bash -$ docker run -itd --network=mynet busybox -``` - -If you want to add a container to a network after the container is already -running, use the `docker network connect` subcommand. - -You can connect multiple containers to the same network. Once connected, the -containers can communicate using only another container's IP address or name. -For `overlay` networks or custom plugins that support multi-host connectivity, -containers connected to the same multi-host network but launched from different -Engines can also communicate in this way. - -You can disconnect a container from a network using the `docker network -disconnect` command. - -### Specify advanced options - -When you create a network, Engine creates a non-overlapping subnetwork for the -network by default. This subnetwork is not a subdivision of an existing -network. It is purely for ip-addressing purposes. You can override this default -and specify subnetwork values directly using the `--subnet` option. On a -`bridge` network you can only create a single subnet: - -```bash -$ docker network create --driver=bridge --subnet=192.168.0.0/16 br0 -``` - -Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` -options. - -```bash -$ docker network create \ - --driver=bridge \ - --subnet=172.28.0.0/16 \ - --ip-range=172.28.5.0/24 \ - --gateway=172.28.5.254 \ - br0 -``` - -If you omit the `--gateway` flag the Engine selects one for you from inside a -preferred pool. For `overlay` networks and for network driver plugins that -support it you can create multiple subnetworks. - -```bash -$ docker network create -d overlay \ - --subnet=192.168.0.0/16 \ - --subnet=192.170.0.0/16 \ - --gateway=192.168.0.100 \ - --gateway=192.170.0.100 \ - --ip-range=192.168.1.0/24 \ - --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ - --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ - my-multihost-network -``` - -Be sure that your subnetworks do not overlap. If they do, the network create -fails and Engine returns an error. - -### Bridge driver options - -When creating a custom network, the default network driver (i.e. `bridge`) has -additional options that can be passed. The following are those options and the -equivalent docker daemon flags used for docker0 bridge: - -| Option | Equivalent | Description | -|--------------------------------------------------|-------------|-------------------------------------------------------| -| `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | -| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | -| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | -| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | -| `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU | - -The following arguments can be passed to `docker network create` for any -network driver, again with their approximate equivalents to `docker daemon`. - -| Argument | Equivalent | Description | -|--------------|----------------|--------------------------------------------| -| `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet | -| `--ip-range` | `--fixed-cidr` | Allocate IPs from a range | -| `--internal` | - | Restrict external access to the network | -| `--ipv6` | `--ipv6` | Enable IPv6 networking | -| `--subnet` | `--bip` | Subnet for network | - -For example, let's use `-o` or `--opt` options to specify an IP address binding -when publishing ports: - -```bash -$ docker network create \ - -o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" \ - simple-network -``` - -### Network internal mode - -By default, when you connect a container to an `overlay` network, Docker also -connects a bridge network to it to provide external connectivity. If you want -to create an externally isolated `overlay` network, you can specify the -`--internal` option. - -### Network ingress mode - -You can create the network which will be used to provide the routing-mesh in the -swarm cluster. You do so by specifying `--ingress` when creating the network. Only -one ingress network can be created at the time. The network can be removed only -if no services depend on it. Any option available when creating a overlay network -is also available when creating the ingress network, besides the `--attachable` option. - -```bash -$ docker network create -d overlay \ - --subnet=10.11.0.0/16 \ - --ingress \ - --opt com.docker.network.mtu=9216 \ - --opt encrypted=true \ - my-ingress-network -``` - -## Related commands - -* [network inspect](network_inspect.md) -* [network connect](network_connect.md) -* [network disconnect](network_disconnect.md) -* [network ls](network_ls.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md deleted file mode 100644 index e855894d2..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "network disconnect" -description: "The network disconnect command description and usage" -keywords: "network, disconnect, user-defined" ---- - - - -# network disconnect - -```markdown -Usage: docker network disconnect [OPTIONS] NETWORK CONTAINER - -Disconnect a container from a network - -Options: - -f, --force Force the container to disconnect from a network - --help Print usage -``` - -## Description - -Disconnects a container from a network. The container must be running to -disconnect it from the network. - -## Examples - -```bash - $ docker network disconnect multi-host-network container1 -``` - - -## Related commands - -* [network inspect](network_inspect.md) -* [network connect](network_connect.md) -* [network create](network_create.md) -* [network ls](network_ls.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md deleted file mode 100644 index 1a856ddcb..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: "network inspect" -description: "The network inspect command description and usage" -keywords: "network, inspect, user-defined" ---- - - - -# network inspect - -```markdown -Usage: docker network inspect [OPTIONS] NETWORK [NETWORK...] - -Display detailed information on one or more networks - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -## Description - -Returns information about one or more networks. By default, this command renders -all results in a JSON object. - -## Examples - -## Inspect the `bridge` network - -Connect two containers to the default `bridge` network: - -```bash -$ sudo docker run -itd --name=container1 busybox -f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 - -$ sudo docker run -itd --name=container2 busybox -bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 -``` - -The `network inspect` command shows the containers, by id, in its -results. For networks backed by multi-host network driver, such as Overlay, -this command also shows the container endpoints in other hosts in the -cluster. These endpoints are represented as "ep-{endpoint-id}" in the output. -However, for swarm mode networks, only the endpoints that are local to the -node are shown. - -You can specify an alternate format to execute a given -template for each result. Go's -[text/template](http://golang.org/pkg/text/template/) package describes all the -details of the format. - -```none -$ sudo docker network inspect bridge - -[ - { - "Name": "bridge", - "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", - "Created": "2016-10-19T04:33:30.360899459Z", - "Scope": "local", - "Driver": "bridge", - "IPAM": { - "Driver": "default", - "Config": [ - { - "Subnet": "172.17.42.1/16", - "Gateway": "172.17.42.1" - } - ] - }, - "Internal": false, - "Containers": { - "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { - "Name": "container2", - "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", - "MacAddress": "02:42:ac:11:00:02", - "IPv4Address": "172.17.0.2/16", - "IPv6Address": "" - }, - "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { - "Name": "container1", - "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", - "MacAddress": "02:42:ac:11:00:01", - "IPv4Address": "172.17.0.1/16", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.bridge.default_bridge": "true", - "com.docker.network.bridge.enable_icc": "true", - "com.docker.network.bridge.enable_ip_masquerade": "true", - "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", - "com.docker.network.bridge.name": "docker0", - "com.docker.network.driver.mtu": "1500" - }, - "Labels": {} - } -] -``` - -### Inspect a user-defined network - -Create and inspect a user-defined network: - -```bash -$ docker network create simple-network - -69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a -``` - -```none -$ docker network inspect simple-network - -[ - { - "Name": "simple-network", - "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", - "Created": "2016-10-19T04:33:30.360899459Z", - "Scope": "local", - "Driver": "bridge", - "IPAM": { - "Driver": "default", - "Config": [ - { - "Subnet": "172.22.0.0/16", - "Gateway": "172.22.0.1" - } - ] - }, - "Containers": {}, - "Options": {}, - "Labels": {} - } -] -``` - -### Inspect the `ingress` network - -For swarm mode overlay networks `network inspect` also shows the IP address and node name -of the peers. Peers are the nodes in the swarm cluster which have at least one task attached -to the network. Node name is of the format `-`. - -```none -$ docker network inspect ingress - -[ - { - "Name": "ingress", - "Id": "j0izitrut30h975vk4m1u5kk3", - "Created": "2016-11-08T06:49:59.803387552Z", - "Scope": "swarm", - "Driver": "overlay", - "EnableIPv6": false, - "IPAM": { - "Driver": "default", - "Options": null, - "Config": [ - { - "Subnet": "10.255.0.0/16", - "Gateway": "10.255.0.1" - } - ] - }, - "Internal": false, - "Attachable": false, - "Containers": { - "ingress-sbox": { - "Name": "ingress-endpoint", - "EndpointID": "40e002d27b7e5d75f60bc72199d8cae3344e1896abec5eddae9743755fe09115", - "MacAddress": "02:42:0a:ff:00:03", - "IPv4Address": "10.255.0.3/16", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.driver.overlay.vxlanid_list": "256" - }, - "Labels": {}, - "Peers": [ - { - "Name": "net-1-1d22adfe4d5c", - "IP": "192.168.33.11" - }, - { - "Name": "net-2-d55d838b34af", - "IP": "192.168.33.12" - }, - { - "Name": "net-3-8473f8140bd9", - "IP": "192.168.33.13" - } - ] - } -] -``` - -### Using `verbose` option for `network inspect` - -`docker network inspect --verbose` for swarm mode overlay networks shows service-specific -details such as the service's VIP and port mappings. It also shows IPs of service tasks, -and the IPs of the nodes where the tasks are running. - -Following is an example output for a overlay network `ov1` that has one service `s1` -attached to. service `s1` in this case has three replicas. - -```bash -$ docker network inspect --verbose ov1 -[ - { - "Name": "ov1", - "Id": "ybmyjvao9vtzy3oorxbssj13b", - "Created": "2017-03-13T17:04:39.776106792Z", - "Scope": "swarm", - "Driver": "overlay", - "EnableIPv6": false, - "IPAM": { - "Driver": "default", - "Options": null, - "Config": [ - { - "Subnet": "10.0.0.0/24", - "Gateway": "10.0.0.1" - } - ] - }, - "Internal": false, - "Attachable": false, - "Containers": { - "020403bd88a15f60747fd25d1ad5fa1272eb740e8a97fc547d8ad07b2f721c5e": { - "Name": "s1.1.pjn2ik0sfgkfzed3h0s00gs9o", - "EndpointID": "ad16946f416562d658f3bb30b9830d73ad91ccf6feae44411269cd0ff674714e", - "MacAddress": "02:42:0a:00:00:04", - "IPv4Address": "10.0.0.4/24", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.driver.overlay.vxlanid_list": "4097" - }, - "Labels": {}, - "Peers": [ - { - "Name": "net-3-5d3cfd30a58c", - "IP": "192.168.33.13" - }, - { - "Name": "net-1-6ecbc0040a73", - "IP": "192.168.33.11" - }, - { - "Name": "net-2-fb80208efd75", - "IP": "192.168.33.12" - } - ], - "Services": { - "s1": { - "VIP": "10.0.0.2", - "Ports": [], - "LocalLBIndex": 257, - "Tasks": [ - { - "Name": "s1.2.q4hcq2aiiml25ubtrtg4q1txt", - "EndpointID": "040879b027e55fb658e8b60ae3b87c6cdac7d291e86a190a3b5ac6567b26511a", - "EndpointIP": "10.0.0.5", - "Info": { - "Host IP": "192.168.33.11" - } - }, - { - "Name": "s1.3.yawl4cgkp7imkfx469kn9j6lm", - "EndpointID": "106edff9f120efe44068b834e1cddb5b39dd4a3af70211378b2f7a9e562bbad8", - "EndpointIP": "10.0.0.3", - "Info": { - "Host IP": "192.168.33.12" - } - }, - { - "Name": "s1.1.pjn2ik0sfgkfzed3h0s00gs9o", - "EndpointID": "ad16946f416562d658f3bb30b9830d73ad91ccf6feae44411269cd0ff674714e", - "EndpointIP": "10.0.0.4", - "Info": { - "Host IP": "192.168.33.13" - } - } - ] - } - } - } -] -``` - -## Related commands - -* [network disconnect ](network_disconnect.md) -* [network connect](network_connect.md) -* [network create](network_create.md) -* [network ls](network_ls.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md deleted file mode 100644 index 8bb8a2c48..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md +++ /dev/null @@ -1,250 +0,0 @@ ---- -title: "network ls" -description: "The network ls command description and usage" -keywords: "network, list, user-defined" ---- - - - -# docker network ls - -```markdown -Usage: docker network ls [OPTIONS] - -List networks - -Aliases: - ls, list - -Options: - -f, --filter filter Provide filter values (e.g. 'driver=bridge') - --format string Pretty-print networks using a Go template - --help Print usage - --no-trunc Do not truncate the output - -q, --quiet Only display network IDs -``` - -## Description - -Lists all the networks the Engine `daemon` knows about. This includes the -networks that span across multiple hosts in a cluster. - -## Examples - -### List all networks - -```bash -$ sudo docker network ls -NETWORK ID NAME DRIVER SCOPE -7fca4eb8c647 bridge bridge local -9f904ee27bf5 none null local -cf03ee007fb4 host host local -78b03ee04fc4 multi-host overlay swarm -``` - -Use the `--no-trunc` option to display the full network id: - -```bash -$ docker network ls --no-trunc -NETWORK ID NAME DRIVER SCOPE -18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null local -c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host local -7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge local -95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge local -63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge local -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. For example, -`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. - -The currently supported filters are: - -* driver -* id (network's id) -* label (`label=` or `label==`) -* name (network's name) -* scope (`swarm|global|local`) -* type (`custom|builtin`) - -#### Driver - -The `driver` filter matches networks based on their driver. - -The following example matches networks with the `bridge` driver: - -```bash -$ docker network ls --filter driver=bridge -NETWORK ID NAME DRIVER SCOPE -db9db329f835 test1 bridge local -f6e212da9dfd test2 bridge local -``` - -#### ID - -The `id` filter matches on all or part of a network's ID. - -The following filter matches all networks with an ID containing the -`63d1ff1f77b0...` string. - -```bash -$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 -NETWORK ID NAME DRIVER SCOPE -63d1ff1f77b0 dev bridge local -``` - -You can also filter for a substring in an ID as this shows: - -```bash -$ docker network ls --filter id=95e74588f40d -NETWORK ID NAME DRIVER SCOPE -95e74588f40d foo bridge local - -$ docker network ls --filter id=95e -NETWORK ID NAME DRIVER SCOPE -95e74588f40d foo bridge local -``` - -#### Label - -The `label` filter matches networks based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches networks with the `usage` label regardless of its value. - -```bash -$ docker network ls -f "label=usage" -NETWORK ID NAME DRIVER SCOPE -db9db329f835 test1 bridge local -f6e212da9dfd test2 bridge local -``` - -The following filter matches networks with the `usage` label with the `prod` value. - -```bash -$ docker network ls -f "label=usage=prod" -NETWORK ID NAME DRIVER SCOPE -f6e212da9dfd test2 bridge local -``` - -#### Name - -The `name` filter matches on all or part of a network's name. - -The following filter matches all networks with a name containing the `foobar` string. - -```bash -$ docker network ls --filter name=foobar -NETWORK ID NAME DRIVER SCOPE -06e7eef0a170 foobar bridge local -``` - -You can also filter for a substring in a name as this shows: - -```bash -$ docker network ls --filter name=foo -NETWORK ID NAME DRIVER SCOPE -95e74588f40d foo bridge local -06e7eef0a170 foobar bridge local -``` - -#### Scope - -The `scope` filter matches networks based on their scope. - -The following example matches networks with the `swarm` scope: - -```bash -$ docker network ls --filter scope=swarm -NETWORK ID NAME DRIVER SCOPE -xbtm0v4f1lfh ingress overlay swarm -ic6r88twuu92 swarmnet overlay swarm -``` - -The following example matches networks with the `local` scope: - -```bash -$ docker network ls --filter scope=local -NETWORK ID NAME DRIVER SCOPE -e85227439ac7 bridge bridge local -0ca0e19443ed host host local -ca13cc149a36 localnet bridge local -f9e115d2de35 none null local -``` - -#### Type - -The `type` filter supports two values; `builtin` displays predefined networks -(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. - -The following filter matches all user defined networks: - -```bash -$ docker network ls --filter type=custom -NETWORK ID NAME DRIVER SCOPE -95e74588f40d foo bridge local -63d1ff1f77b0 dev bridge local -``` - -By having this flag it allows for batch cleanup. For example, use this filter -to delete all user defined networks: - -```bash -$ docker network rm `docker network ls --filter type=custom -q` -``` - -A warning will be issued when trying to remove a network that has containers -attached. - -### Formatting - -The formatting options (`--format`) pretty-prints networks output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description --------------|------------------------------------------------------------------------------------------ -`.ID` | Network ID -`.Name` | Network name -`.Driver` | Network driver -`.Scope` | Network scope (local, global) -`.IPv6` | Whether IPv6 is enabled on the network or not. -`.Internal` | Whether the network is internal or not. -`.Labels` | All labels assigned to the network. -`.Label` | Value of a specific label for this network. For example `{{.Label "project.version"}}` -`.CreatedAt` | Time when the network was created - -When using the `--format` option, the `network ls` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`ID` and `Driver` entries separated by a colon for all networks: - -```bash -$ docker network ls --format "{{.ID}}: {{.Driver}}" -afaaab448eb2: bridge -d1584f8dc718: host -391df270dc66: null -``` - -## Related commands - -* [network disconnect ](network_disconnect.md) -* [network connect](network_connect.md) -* [network create](network_create.md) -* [network inspect](network_inspect.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md deleted file mode 100644 index 12d1caac0..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: "network prune" -description: "Remove unused networks" -keywords: "network, prune, delete" ---- - -# network prune - -```markdown -Usage: docker network prune [OPTIONS] - -Remove all unused networks - -Options: - --filter filter Provide filter values (e.g. 'until=') - -f, --force Do not prompt for confirmation - --help Print usage -``` - -## Description - -Remove all unused networks. Unused networks are those which are not referenced -by any containers. - -## Examples - -```bash -$ docker network prune - -WARNING! This will remove all networks not used by at least one container. -Are you sure you want to continue? [y/N] y -Deleted Networks: -n1 -n2 -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* until (``) - only remove networks created before given timestamp - -The `until` filter can be Unix timestamps, date formatted -timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed -relative to the daemon machine’s time. Supported formats for date -formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the daemon will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. - -The following removes networks created more than 5 minutes ago. Note that -system networks such as `bridge`, `host`, and `none` will never be pruned: - -```none -$ docker network ls - -NETWORK ID NAME DRIVER SCOPE -7430df902d7a bridge bridge local -ea92373fd499 foo-1-day-ago bridge local -ab53663ed3c7 foo-1-min-ago bridge local -97b91972bc3b host host local -f949d337b1f5 none null local - -$ docker network prune --force --filter until=5m - -Deleted Networks: -foo-1-day-ago - -$ docker network ls - -NETWORK ID NAME DRIVER SCOPE -7430df902d7a bridge bridge local -ab53663ed3c7 foo-1-min-ago bridge local -97b91972bc3b host host local -f949d337b1f5 none null local -``` - -## Related commands - -* [network disconnect ](network_disconnect.md) -* [network connect](network_connect.md) -* [network create](network_create.md) -* [network ls](network_ls.md) -* [network inspect](network_inspect.md) -* [network rm](network_rm.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) -* [system df](system_df.md) -* [container prune](container_prune.md) -* [image prune](image_prune.md) -* [volume prune](volume_prune.md) -* [system prune](system_prune.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md deleted file mode 100644 index aab487a04..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: "network rm" -description: "the network rm command description and usage" -keywords: "network, rm, user-defined" ---- - - - -# network rm - -```markdown -Usage: docker network rm NETWORK [NETWORK...] - -Remove one or more networks - -Aliases: - rm, remove - -Options: - --help Print usage -``` - -## Description - -Removes one or more networks by name or identifier. To remove a network, -you must first disconnect any containers connected to it. - -## Examples - -### Remove a network - -To remove the network named 'my-network': - -```bash - $ docker network rm my-network -``` - -### Remove multiple networks - -To delete multiple networks in a single `docker network rm` command, provide -multiple network names or ids. The following example deletes a network with id -`3695c422697f` and a network named `my-network`: - -```bash - $ docker network rm 3695c422697f my-network -``` - -When you specify multiple networks, the command attempts to delete each in turn. -If the deletion of one network fails, the command continues to the next on the -list and tries to delete that. The command reports success or failure for each -deletion. - -## Related commands - -* [network disconnect ](network_disconnect.md) -* [network connect](network_connect.md) -* [network create](network_create.md) -* [network ls](network_ls.md) -* [network inspect](network_inspect.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/node.md deleted file mode 100644 index 3a7d4b3a7..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node.md +++ /dev/null @@ -1,42 +0,0 @@ - ---- -title: "node" -description: "The node command description and usage" -keywords: "node" ---- - - - -# node - -```markdown -Usage: docker node COMMAND - -Manage Swarm nodes - -Options: - --help Print usage - -Commands: - demote Demote one or more nodes from manager in the swarm - inspect Display detailed information on one or more nodes - ls List nodes in the swarm - promote Promote one or more nodes to manager in the swarm - ps List tasks running on one or more nodes, defaults to current node - rm Remove one or more nodes from the swarm - update Update a node - -Run 'docker node COMMAND --help' for more information on a command. -``` - -## Description - -Manage nodes. - diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md deleted file mode 100644 index e6e59d894..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "node demote" -description: "The node demote command description and usage" -keywords: "node, demote" ---- - - - -# node demote - -```markdown -Usage: docker node demote NODE [NODE...] - -Demote one or more nodes from manager in the swarm - -Options: - --help Print usage - -``` - -## Description - -Demotes an existing manager so that it is no longer a manager. This command -targets a docker engine that is a manager in the swarm. - - -## Examples - -```bash -$ docker node demote -``` - -## Related commands - -* [node inspect](node_inspect.md) -* [node ls](node_ls.md) -* [node promote](node_promote.md) -* [node ps](node_ps.md) -* [node rm](node_rm.md) -* [node update](node_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md deleted file mode 100644 index 6d7dc739e..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: "node inspect" -description: "The node inspect command description and usage" -keywords: "node, inspect" ---- - - - -# node inspect - -```markdown -Usage: docker node inspect [OPTIONS] self|NODE [NODE...] - -Display detailed information on one or more nodes - -Options: - -f, --format string Format the output using the given Go template - --help Print usage - --pretty Print the information in a human friendly format -``` - -## Description - -Returns information about a node. By default, this command renders all results -in a JSON array. You can specify an alternate format to execute a -given template for each result. Go's -[text/template](http://golang.org/pkg/text/template/) package describes all the -details of the format. - -## Examples - -### Inspect a node - -```none -$ docker node inspect swarm-manager - -[ -{ - "ID": "e216jshn25ckzbvmwlnh5jr3g", - "Version": { - "Index": 10 - }, - "CreatedAt": "2016-06-16T22:52:44.9910662Z", - "UpdatedAt": "2016-06-16T22:52:45.230878043Z", - "Spec": { - "Role": "manager", - "Availability": "active" - }, - "Description": { - "Hostname": "swarm-manager", - "Platform": { - "Architecture": "x86_64", - "OS": "linux" - }, - "Resources": { - "NanoCPUs": 1000000000, - "MemoryBytes": 1039843328 - }, - "Engine": { - "EngineVersion": "1.12.0", - "Plugins": [ - { - "Type": "Volume", - "Name": "local" - }, - { - "Type": "Network", - "Name": "overlay" - }, - { - "Type": "Network", - "Name": "null" - }, - { - "Type": "Network", - "Name": "host" - }, - { - "Type": "Network", - "Name": "bridge" - }, - { - "Type": "Network", - "Name": "overlay" - } - ] - } - }, - "Status": { - "State": "ready", - "Addr": "168.0.32.137" - }, - "ManagerStatus": { - "Leader": true, - "Reachability": "reachable", - "Addr": "168.0.32.137:2377" - } -} -] -``` - -### Specify an output format - -```none -$ docker node inspect --format '{{ .ManagerStatus.Leader }}' self - -false - -$ docker node inspect --pretty self -ID: e216jshn25ckzbvmwlnh5jr3g -Hostname: swarm-manager -Joined at: 2016-06-16 22:52:44.9910662 +0000 utc -Status: - State: Ready - Availability: Active - Address: 172.17.0.2 -Manager Status: - Address: 172.17.0.2:2377 - Raft Status: Reachable - Leader: Yes -Platform: - Operating System: linux - Architecture: x86_64 -Resources: - CPUs: 4 - Memory: 7.704 GiB -Plugins: - Network: overlay, bridge, null, host, overlay - Volume: local -Engine Version: 1.12.0 -``` - -## Related commands - -* [node demote](node_demote.md) -* [node ls](node_ls.md) -* [node promote](node_promote.md) -* [node ps](node_ps.md) -* [node rm](node_rm.md) -* [node update](node_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md deleted file mode 100644 index 8162e11b5..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: "node ls" -description: "The node ls command description and usage" -keywords: "node, list" ---- - - - -# node ls - -```markdown -Usage: docker node ls [OPTIONS] - -List nodes in the swarm - -Aliases: - ls, list - -Options: - -f, --filter filter Filter output based on conditions provided - --format string Pretty-print nodes using a Go template - --help Print usage - -q, --quiet Only display IDs -``` - -## Description - -Lists all the nodes that the Docker Swarm manager knows about. You can filter -using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section -for more information about available filter options. - -## Examples - -```bash -$ docker node ls - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active -38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active -e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader -``` -> **Note**: -> In the above example output, there is a hidden column of `.Self` that indicates if the -> node is the same node as the current docker daemon. A `*` (e.g., `e216jshn25ckzbvmwlnh5jr3g *`) -> means this node is the current docker daemon. - - -### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* [id](node_ls.md#id) -* [label](node_ls.md#label) -* [membership](node_ls.md#membership) -* [name](node_ls.md#name) -* [role](node_ls.md#role) - -#### id - -The `id` filter matches all or part of a node's id. - -```bash -$ docker node ls -f id=1 - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active -``` - -#### label - -The `label` filter matches nodes based on engine labels and on the presence of a `label` alone or a `label` and a value. Node labels are currently not used for filtering. - -The following filter matches nodes with the `foo` label regardless of its value. - -```bash -$ docker node ls -f "label=foo" - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active -``` - -#### membersip - -The `membership` filter matches nodes based on the presence of a `membership` and a value -`accepted` or `pending`. - -The following filter matches nodes with the `membership` of `accepted`. - -```bash -$ docker node ls -f "membership=accepted" - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active -38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active -``` - -#### name - -The `name` filter matches on all or part of a node hostname. - -The following filter matches the nodes with a name equal to `swarm-master` string. - -```bash -$ docker node ls -f name=swarm-manager1 - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader -``` - -#### role - -The `role` filter matches nodes based on the presence of a `role` and a value `worker` or `manager`. - -The following filter matches nodes with the `manager` role. - -```bash -$ docker node ls -f "role=manager" - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader -``` - -### Formatting - -The formatting options (`--format`) pretty-prints nodes output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description ------------------|------------------------------------------------------------------------------------------ -`.ID` | Node ID -`.Self` | Node of the daemon (`true/false`, `true`indicates that the node is the same as current docker daemon) -`.Hostname` | Node hostname -`.Status` | Node status -`.Availability` | Node availability ("active", "pause", or "drain") -`.ManagerStatus` | Manager status of the node - -When using the `--format` option, the `node ls` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`ID` and `Hostname` entries separated by a colon for all nodes: - -```bash -$ docker node ls --format "{{.ID}}: {{.Hostname}}" -e216jshn25ckzbvmwlnh5jr3g: swarm-manager1 -`` - - -## Related commands - -* [node demote](node_demote.md) -* [node inspect](node_inspect.md) -* [node promote](node_promote.md) -* [node ps](node_ps.md) -* [node rm](node_rm.md) -* [node update](node_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md deleted file mode 100644 index 1ebbe9550..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "node promote" -description: "The node promote command description and usage" -keywords: "node, promote" ---- - - - -# node promote - -```markdown -Usage: docker node promote NODE [NODE...] - -Promote one or more nodes to manager in the swarm - -Options: - --help Print usage -``` - -## Description - -Promotes a node to manager. This command targets a docker engine that is a -manager in the swarm. - -## Examples - -```bash -$ docker node promote -``` - -## Related commands - -* [node demote](node_demote.md) -* [node inspect](node_inspect.md) -* [node ls](node_ls.md) -* [node ps](node_ps.md) -* [node rm](node_rm.md) -* [node update](node_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md deleted file mode 100644 index 0bf76e0d8..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: "node ps" -description: "The node ps command description and usage" -keywords: node, tasks, ps -aliases: ["/engine/reference/commandline/node_tasks/"] ---- - - - -# node ps - -```markdown -Usage: docker node ps [OPTIONS] [NODE...] - -List tasks running on one or more nodes, defaults to current node. - -Options: - -f, --filter filter Filter output based on conditions provided - --format string Pretty-print tasks using a Go template - --help Print usage - --no-resolve Do not map IDs to Names - --no-trunc Do not truncate output - -q, --quiet Only display task IDs -``` - -## Description - -Lists all the tasks on a Node that Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. - -## Examples - -```bash -$ docker node ps swarm-manager1 -NAME IMAGE NODE DESIRED STATE CURRENT STATE -redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours -redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds -redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds -redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds -redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* [name](#name) -* [id](#id) -* [label](#label) -* [desired-state](#desired-state) - -#### name - -The `name` filter matches on all or part of a task's name. - -The following filter matches all tasks with a name containing the `redis` string. - -```bash -$ docker node ps -f name=redis swarm-manager1 - -NAME IMAGE NODE DESIRED STATE CURRENT STATE -redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours -redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds -redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds -redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds -redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds -``` - -#### id - -The `id` filter matches a task's id. - -```bash -$ docker node ps -f id=bg8c07zzg87di2mufeq51a2qp swarm-manager1 - -NAME IMAGE NODE DESIRED STATE CURRENT STATE -redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds -``` - -#### label - -The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches tasks with the `usage` label regardless of its value. - -```bash -$ docker node ps -f "label=usage" - -NAME IMAGE NODE DESIRED STATE CURRENT STATE -redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 10 minutes -redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 9 minutes -``` - - -#### desired-state - -The `desired-state` filter can take the values `running`, `shutdown`, or `accepted`. - - -### Formatting - -The formatting options (`--format`) pretty-prints tasks output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description -----------------|------------------------------------------------------------------------------------------ -`.Name` | Task name -`.Image` | Task image -`.Node` | Node ID -`.DesiredState` | Desired state of the task (`running`, `shutdown`, or `accepted`) -`.CurrentState` | Current state of the task -`.Error` | Error -`.Ports` | Task published ports - -When using the `--format` option, the `node ps` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`Name` and `Image` entries separated by a colon for all tasks: - -```bash -$ docker node ps --format "{{.Name}}: {{.Image}}" -top.1: busybox -top.2: busybox -top.3: busybox -``` - -## Related commands - -* [node demote](node_demote.md) -* [node inspect](node_inspect.md) -* [node ls](node_ls.md) -* [node promote](node_promote.md) -* [node rm](node_rm.md) -* [node update](node_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md deleted file mode 100644 index c2fdd4d15..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: "node rm" -description: "The node rm command description and usage" -keywords: "node, remove" ---- - - - -# node rm - -```markdown -Usage: docker node rm [OPTIONS] NODE [NODE...] - -Remove one or more nodes from the swarm - -Aliases: - rm, remove - -Options: - -f, --force Force remove a node from the swarm - --help Print usage -``` - -## Description - -When run from a manager node, removes the specified nodes from a swarm. - - -## Examples - -### Remove a stopped node from the swarm - -```bash -$ docker node rm swarm-node-02 - -Node swarm-node-02 removed from swarm -``` -### Attempt to remove a running node from a swarm - -Removes the specified nodes from the swarm, but only if the nodes are in the -down state. If you attempt to remove an active node you will receive an error: - -```non -$ docker node rm swarm-node-03 - -Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not -down and can't be removed -``` - -### Forcibly remove an inaccessible node from a swarm - -If you lose access to a worker node or need to shut it down because it has been -compromised or is not behaving as expected, you can use the `--force` option. -This may cause transient errors or interruptions, depending on the type of task -being run on the node. - -```bash -$ docker node rm --force swarm-node-03 - -Node swarm-node-03 removed from swarm -``` - -A manager node must be demoted to a worker node (using `docker node demote`) -before you can remove it from the swarm. - -## Related commands - -* [node demote](node_demote.md) -* [node inspect](node_inspect.md) -* [node ls](node_ls.md) -* [node promote](node_promote.md) -* [node ps](node_ps.md) -* [node update](node_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md deleted file mode 100644 index 11117c7a9..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "node update" -description: "The node update command description and usage" -keywords: "resources, update, dynamically" ---- - - - -# update - -```markdown -Usage: docker node update [OPTIONS] NODE - -Update a node - -Options: - --availability string Availability of the node ("active"|"pause"|"drain") - --help Print usage - --label-add value Add or update a node label (key=value) (default []) - --label-rm value Remove a node label if exists (default []) - --role string Role of the node ("worker"|"manager") -``` - -## Description - -Update metadata about a node, such as its availability, labels, or roles. - -## Examples - -### Add label metadata to a node - -Add metadata to a swarm node using node labels. You can specify a node label as -a key with an empty value: - -``` bash -$ docker node update --label-add foo worker1 -``` - -To add multiple labels to a node, pass the `--label-add` flag for each label: - -```bash -$ docker node update --label-add foo --label-add bar worker1 -``` - -When you [create a service](service_create.md), -you can use node labels as a constraint. A constraint limits the nodes where the -scheduler deploys tasks for a service. - -For example, to add a `type` label to identify nodes where the scheduler should -deploy message queue service tasks: - -``` bash -$ docker node update --label-add type=queue worker1 -``` - -The labels you set for nodes using `docker node update` apply only to the node -entity within the swarm. Do not confuse them with the docker daemon labels for -[dockerd](https://docs.docker.com/engine/userguide/labels-custom-metadata/#daemon-labels). - -For more information about labels, refer to [apply custom -metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). - -## Related commands - -* [node demote](node_demote.md) -* [node inspect](node_inspect.md) -* [node ls](node_ls.md) -* [node promote](node_promote.md) -* [node ps](node_ps.md) -* [node rm](node_rm.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/pause.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/pause.md deleted file mode 100644 index 5bb652b92..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/pause.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "pause" -description: "The pause command description and usage" -keywords: "cgroups, container, suspend, SIGSTOP" ---- - - - -# pause - -```markdown -Usage: docker pause CONTAINER [CONTAINER...] - -Pause all processes within one or more containers - -Options: - --help Print usage -``` - -## Description - -The `docker pause` command suspends all processes in the specified containers. -On Linux, this uses the cgroups freezer. Traditionally, when suspending a process -the `SIGSTOP` signal is used, which is observable by the process being suspended. -With the cgroups freezer the process is unaware, and unable to capture, -that it is being suspended, and subsequently resumed. On Windows, only Hyper-V -containers can be paused. - -See the -[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) -for further details. - -## Examples - -```bash -$ docker pause my_container -``` - -## Related commands - -* [unpause](unpause.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin.md deleted file mode 100644 index 75082477d..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "plugin" -description: "The plugin command description and usage" -keywords: "plugin" ---- - - - -# plugin - -```markdown -Usage: docker plugin COMMAND - -Manage plugins - -Options: - --help Print usage - -Commands: - create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. - disable Disable a plugin - enable Enable a plugin - inspect Display detailed information on one or more plugins - install Install a plugin - ls List plugins - push Push a plugin to a registry - rm Remove one or more plugins - set Change settings for a plugin - upgrade Upgrade an existing plugin - -Run 'docker plugin COMMAND --help' for more information on a command. - -``` - -## Description - -Manage plugins. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md deleted file mode 100644 index 6f1754326..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "plugin create" -description: "the plugin create command description and usage" -keywords: "plugin, create" ---- - - - -# plugin create - -```markdown -Usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR - -Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. - -Options: - --compress Compress the context using gzip - --help Print usage -``` - -## Description - -Creates a plugin. Before creating the plugin, prepare the plugin's root filesystem as well as -[the config.json](../../extend/config.md) - -## Examples - -The following example shows how to create a sample `plugin`. - -```bash -$ ls -ls /home/pluginDir - -4 -rw-r--r-- 1 root root 431 Nov 7 01:40 config.json -0 drwxr-xr-x 19 root root 420 Nov 7 01:40 rootfs - -$ docker plugin create plugin /home/pluginDir - -plugin - -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -672d8144ec02 plugin latest A sample plugin for Docker false -``` - -The plugin can subsequently be enabled for local use or pushed to the public registry. - -## Related commands - -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md deleted file mode 100644 index 2ff81887f..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: "plugin disable" -description: "the plugin disable command description and usage" -keywords: "plugin, disable" ---- - - - -# plugin disable - -```markdown -Usage: docker plugin disable [OPTIONS] PLUGIN - -Disable a plugin - -Options: - -f, --force Force the disable of an active plugin - --help Print usage -``` - -## Description - -Disables a plugin. The plugin must be installed before it can be disabled, -see [`docker plugin install`](plugin_install.md). Without the `-f` option, -a plugin that has references (eg, volumes, networks) cannot be disabled. - -## Examples - -The following example shows that the `sample-volume-plugin` plugin is installed -and enabled: - -```bash -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true -``` - -To disable the plugin, use the following command: - -```bash -$ docker plugin disable tiborvass/sample-volume-plugin - -tiborvass/sample-volume-plugin - -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false -``` - -## Related commands - -* [plugin create](plugin_create.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md deleted file mode 100644 index 2098a115a..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: "plugin enable" -description: "the plugin enable command description and usage" -keywords: "plugin, enable" ---- - - - -# plugin enable - -```markdown -Usage: docker plugin enable [OPTIONS] PLUGIN - -Enable a plugin - -Options: - --help Print usage - --timeout int HTTP client timeout (in seconds) -``` - -## Description - -Enables a plugin. The plugin must be installed before it can be enabled, -see [`docker plugin install`](plugin_install.md). - -## Examples - -The following example shows that the `sample-volume-plugin` plugin is installed, -but disabled: - -```bash -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false -``` - -To enable the plugin, use the following command: - -```bash -$ docker plugin enable tiborvass/sample-volume-plugin - -tiborvass/sample-volume-plugin - -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true -``` - -## Related commands - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md deleted file mode 100644 index c0e6573cb..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: "plugin inspect" -description: "The plugin inspect command description and usage" -keywords: "plugin, inspect" ---- - - - -# plugin inspect - -```markdown -Usage: docker plugin inspect [OPTIONS] PLUGIN [PLUGIN...] - -Display detailed information on one or more plugins - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -## Description - -Returns information about a plugin. By default, this command renders all results -in a JSON array. - -## Examples - - -```none -$ docker plugin inspect tiborvass/sample-volume-plugin:latest - -{ - "Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21", - "Name": "tiborvass/sample-volume-plugin:latest", - "PluginReference": "tiborvas/sample-volume-plugin:latest", - "Enabled": true, - "Config": { - "Mounts": [ - { - "Name": "", - "Description": "", - "Settable": null, - "Source": "/data", - "Destination": "/data", - "Type": "bind", - "Options": [ - "shared", - "rbind" - ] - }, - { - "Name": "", - "Description": "", - "Settable": null, - "Source": null, - "Destination": "/foobar", - "Type": "tmpfs", - "Options": null - } - ], - "Env": [ - "DEBUG=1" - ], - "Args": null, - "Devices": null - }, - "Manifest": { - "ManifestVersion": "v0", - "Description": "A test plugin for Docker", - "Documentation": "https://docs.docker.com/engine/extend/plugins/", - "Interface": { - "Types": [ - "docker.volumedriver/1.0" - ], - "Socket": "plugins.sock" - }, - "Entrypoint": [ - "plugin-sample-volume-plugin", - "/data" - ], - "Workdir": "", - "User": { - }, - "Network": { - "Type": "host" - }, - "Capabilities": null, - "Mounts": [ - { - "Name": "", - "Description": "", - "Settable": null, - "Source": "/data", - "Destination": "/data", - "Type": "bind", - "Options": [ - "shared", - "rbind" - ] - }, - { - "Name": "", - "Description": "", - "Settable": null, - "Source": null, - "Destination": "/foobar", - "Type": "tmpfs", - "Options": null - } - ], - "Devices": [ - { - "Name": "device", - "Description": "a host device to mount", - "Settable": null, - "Path": "/dev/cpu_dma_latency" - } - ], - "Env": [ - { - "Name": "DEBUG", - "Description": "If set, prints debug messages", - "Settable": null, - "Value": "1" - } - ], - "Args": { - "Name": "args", - "Description": "command line arguments", - "Settable": null, - "Value": [ - - ] - } - } -} -``` - -(output formatted for readability) - -### Formatting the output - -```bash -$ docker plugin inspect -f '{{.Id}}' tiborvass/sample-volume-plugin:latest - -8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21 -``` - - -## Related commands - -* [plugin create](plugin_create.md) -* [plugin enable](plugin_enable.md) -* [plugin disable](plugin_disable.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md deleted file mode 100644 index 78d9a61b7..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "plugin install" -description: "the plugin install command description and usage" -keywords: "plugin, install" ---- - - - -# plugin install - -```markdown -Usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...] - -Install a plugin - -Options: - --alias string Local name for plugin - --disable Do not enable the plugin on install - --disable-content-trust Skip image verification (default true) - --grant-all-permissions Grant all permissions necessary to run the plugin - --help Print usage -``` - -## Description - -Installs and enables a plugin. Docker looks first for the plugin on your Docker -host. If the plugin does not exist locally, then the plugin is pulled from -the registry. Note that the minimum required registry version to distribute -plugins is 2.3.0 - -## Examples - -The following example installs `vieus/sshfs` plugin and [sets](plugin_set.md) its -`DEBUG` environment variable to `1`. To install, `pull` the plugin from Docker -Hub and prompt the user to accept the list of privileges that the plugin needs, -set the plugin's parameters and enable the plugin. - -```bash -$ docker plugin install vieux/sshfs DEBUG=1 - -Plugin "vieux/sshfs" is requesting the following privileges: - - network: [host] - - device: [/dev/fuse] - - capabilities: [CAP_SYS_ADMIN] -Do you grant the above permissions? [y/N] y -vieux/sshfs -``` - -After the plugin is installed, it appears in the list of plugins: - -```bash -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 vieux/sshfs latest sshFS plugin for Docker true -``` - -## Related commands - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md deleted file mode 100644 index e5793dc21..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: "plugin ls" -description: "The plugin ls command description and usage" -keywords: "plugin, list" ---- - - - -# plugin ls - -```markdown -Usage: docker plugin ls [OPTIONS] - -List plugins - -Aliases: - ls, list - -Options: - -f, --filter filter Provide filter values (e.g. 'enabled=true') - --format string Pretty-print plugins using a Go template - --help Print usage - --no-trunc Don't truncate output - -q, --quiet Only display plugin IDs -``` - -## Description - -Lists all the plugins that are currently installed. You can install plugins -using the [`docker plugin install`](plugin_install.md) command. -You can also filter using the `-f` or `--filter` flag. -Refer to the [filtering](#filtering) section for more information about available filter options. - -## Examples - -```bash -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* enabled (boolean - true or false, 0 or 1) -* capability (string - currently `volumedriver`, `networkdriver`, `ipamdriver`, or `authz`) - -#### enabled - -The `enabled` filter matches on plugins enabled or disabled. - -#### capability - -The `capability` filter matches on plugin capabilities. One plugin -might have multiple capabilities. Currently `volumedriver`, `networkdriver`, -`ipamdriver`, and `authz` are supported capabilities. - -```bash -$ docker plugin install --disable tiborvass/no-remove - -tiborvass/no-remove - -$ docker plugin ls --filter enabled=true - -NAME TAG DESCRIPTION ENABLED -``` - - -### Formatting - -The formatting options (`--format`) pretty-prints plugins output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description ----------------|------------------------------------------------------------------------------------------ -`.ID` | Plugin ID -`.Name` | Plugin name -`.Description` | Plugin description -`.Enabled` | Whether plugin is enabled or not -`.PluginReference` | The reference used to push/pull from a registry - -When using the `--format` option, the `plugin ls` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`ID` and `Name` entries separated by a colon for all plugins: - -```bash -$ docker plugin ls --format "{{.ID}}: {{.Name}}" - -4be01827a72e: tiborvass/no-remove -``` - - -## Related commands - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md deleted file mode 100644 index f27a49894..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "plugin push" -description: "the plugin push command description and usage" -keywords: "plugin, push" ---- - - - -```markdown -Usage: docker plugin push [OPTIONS] PLUGIN[:TAG] - -Push a plugin to a registry - -Options: - --disable-content-trust Skip image signing (default true) - --help Print usage -``` - -## Description - -After you have created a plugin using `docker plugin create` and the plugin is -ready for distribution, use `docker plugin push` to share your images to Docker -Hub or a self-hosted registry. - -Registry credentials are managed by [docker login](login.md). - -## Examples - -The following example shows how to push a sample `user/plugin`. - -```bash -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d456 user/plugin latest A sample plugin for Docker false -$ docker plugin push user/plugin -``` - -## Related commands - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md deleted file mode 100644 index c820c869f..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "plugin rm" -description: "the plugin rm command description and usage" -keywords: "plugin, rm" ---- - - - -# plugin rm - -```markdown -Usage: docker plugin rm [OPTIONS] PLUGIN [PLUGIN...] - -Remove one or more plugins - -Aliases: - rm, remove - -Options: - -f, --force Force the removal of an active plugin - --help Print usage -``` - -## Description - -Removes a plugin. You cannot remove a plugin if it is enabled, you must disable -a plugin using the [`docker plugin disable`](plugin_disable.md) before removing -it (or use --force, use of force is not recommended, since it can affect -functioning of running containers using the plugin). - -## Examples - -The following example disables and removes the `sample-volume-plugin:latest` -plugin: - -```bash -$ docker plugin disable tiborvass/sample-volume-plugin -tiborvass/sample-volume-plugin - -$ docker plugin rm tiborvass/sample-volume-plugin:latest -tiborvass/sample-volume-plugin -``` - -## Related commands - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md deleted file mode 100644 index 5092b0b0c..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: "plugin set" -description: "the plugin set command description and usage" -keywords: "plugin, set" ---- - - - -# plugin set - -```markdown -Usage: docker plugin set PLUGIN KEY=VALUE [KEY=VALUE...] - -Change settings for a plugin - -Options: - --help Print usage -``` - -## Description - -Change settings for a plugin. The plugin must be disabled. - -The settings currently supported are: - * env variables - * source of mounts - * path of devices - * args - -## What is settable ? - -Look at the plugin manifest, it's easy to see what fields are settable, -by looking at the `Settable` field. - -Here is an extract of a plugin manifest: - -``` -{ - "config": { - ... - "args": { - "name": "myargs", - "settable": ["value"], - "value": ["foo", "bar"] - }, - "env": [ - { - "name": "DEBUG", - "settable": ["value"], - "value": "0" - }, - { - "name": "LOGGING", - "value": "1" - } - ], - "devices": [ - { - "name": "mydevice", - "path": "/dev/foo", - "settable": ["path"] - } - ], - "mounts": [ - { - "destination": "/baz", - "name": "mymount", - "options": ["rbind"], - "settable": ["source"], - "source": "/foo", - "type": "bind" - } - ], - ... - } -} -``` - -In this example, we can see that the `value` of the `DEBUG` environment variable is settable, -the `source` of the `mymount` mount is also settable. Same for the `path` of `mydevice` and `value` of `myargs`. - -On the contrary, the `LOGGING` environment variable doesn't have any settable field, which implies that user cannot tweak it. - -## Examples - -### Change an environment variable - -The following example change the env variable `DEBUG` on the -`sample-volume-plugin` plugin. - -```bash -$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin -[DEBUG=0] - -$ docker plugin set tiborvass/sample-volume-plugin DEBUG=1 - -$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin -[DEBUG=1] -``` - -### Change the source of a mount - -The following example change the source of the `mymount` mount on -the `myplugin` plugin. - -```bash -$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin -/foo - -$ docker plugins set myplugin mymount.source=/bar - -$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin -/bar -``` - -> **Note**: Since only `source` is settable in `mymount`, -> `docker plugins set mymount=/bar myplugin` would work too. - -### Change a device path - -The following example change the path of the `mydevice` device on -the `myplugin` plugin. - -```bash -$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin -/dev/foo - -$ docker plugins set myplugin mydevice.path=/dev/bar - -$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin -/dev/bar -``` - -> **Note**: Since only `path` is settable in `mydevice`, -> `docker plugins set mydevice=/dev/bar myplugin` would work too. - -### Change the source of the arguments - -The following example change the value of the args on the `myplugin` plugin. - -```bash -$ docker plugin inspect -f '{{.Settings.Args}}' myplugin -["foo", "bar"] - -$ docker plugins set myplugin myargs="foo bar baz" - -$ docker plugin inspect -f '{{.Settings.Args}}' myplugin -["foo", "bar", "baz"] -``` - -## Related commands - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md deleted file mode 100644 index 38191fff7..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "plugin upgrade" -description: "the plugin upgrade command description and usage" -keywords: "plugin, upgrade" ---- - - - -# plugin upgrade - -```markdown -Usage: docker plugin upgrade [OPTIONS] PLUGIN [REMOTE] - -Upgrade a plugin - -Options: - --disable-content-trust Skip image verification (default true) - --grant-all-permissions Grant all permissions necessary to run the plugin - --help Print usage - --skip-remote-check Do not check if specified remote plugin matches existing plugin image -``` - -## Description - -Upgrades an existing plugin to the specified remote plugin image. If no remote -is specified, Docker will re-pull the current image and use the updated version. -All existing references to the plugin will continue to work. -The plugin must be disabled before running the upgrade. - -## Examples - -The following example installs `vieus/sshfs` plugin, uses it to create and use -a volume, then upgrades the plugin. - -```bash -$ docker plugin install vieux/sshfs DEBUG=1 - -Plugin "vieux/sshfs:next" is requesting the following privileges: - - network: [host] - - device: [/dev/fuse] - - capabilities: [CAP_SYS_ADMIN] -Do you grant the above permissions? [y/N] y -vieux/sshfs:next - -$ docker volume create -d vieux/sshfs:next -o sshcmd=root@1.2.3.4:/tmp/shared -o password=XXX sshvolume -sshvolume -$ docker run -it -v sshvolume:/data alpine sh -c "touch /data/hello" -$ docker plugin disable -f vieux/sshfs:next -viex/sshfs:next - -# Here docker volume ls doesn't show 'sshfsvolume', since the plugin is disabled -$ docker volume ls -DRIVER VOLUME NAME - -$ docker plugin upgrade vieux/sshfs:next vieux/sshfs:next -Plugin "vieux/sshfs:next" is requesting the following privileges: - - network: [host] - - device: [/dev/fuse] - - capabilities: [CAP_SYS_ADMIN] -Do you grant the above permissions? [y/N] y -Upgrade plugin vieux/sshfs:next to vieux/sshfs:next -$ docker plugin enable vieux/sshfs:next -viex/sshfs:next -$ docker volume ls -DRIVER VOLUME NAME -viuex/sshfs:next sshvolume -$ docker run -it -v sshvolume:/data alpine sh -c "ls /data" -hello -``` - -## Related commands - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/port.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/port.md deleted file mode 100644 index c38763ea3..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/port.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "port" -description: "The port command description and usage" -keywords: "port, mapping, container" ---- - - - -# port - -```markdown -Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] - -List port mappings or a specific mapping for the container - -Options: - --help Print usage -``` - -## Examples - -### Show all mapped ports - -You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or -just a specific mapping: - -```bash -$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test -$ docker port test -7890/tcp -> 0.0.0.0:4321 -9876/tcp -> 0.0.0.0:1234 -$ docker port test 7890/tcp -0.0.0.0:4321 -$ docker port test 7890/udp -2014/06/24 11:53:36 Error: No public port '7890/udp' published for test -$ docker port test 7890 -0.0.0.0:4321 -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/ps.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/ps.md deleted file mode 100644 index 51bab4834..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/ps.md +++ /dev/null @@ -1,432 +0,0 @@ ---- -title: "ps" -description: "The ps command description and usage" -keywords: "container, running, list" ---- - - - -# ps - -```markdown -Usage: docker ps [OPTIONS] - -List containers - -Options: - -a, --all Show all containers (default shows just running) - -f, --filter value Filter output based on conditions provided (default []) - - ancestor=([:tag]||) - containers created from an image or a descendant. - - before=(|) - - expose=([/]|/[]) - - exited= an exit code of - - health=(starting|healthy|unhealthy|none) - - id= a container's ID - - isolation=(`default`|`process`|`hyperv`) (Windows daemon only) - - is-task=(true|false) - - label= or label== - - name= a container's name - - network=(|) - - publish=([/]|/[]) - - since=(|) - - status=(created|restarting|removing|running|paused|exited) - - volume=(|) - --format string Pretty-print containers using a Go template - --help Print usage - -n, --last int Show n last created containers (includes all states) (default -1) - -l, --latest Show the latest created container (includes all states) - --no-trunc Don't truncate output - -q, --quiet Only display numeric IDs - -s, --size Display total file sizes -``` - -## Examples - -### Prevent truncating output - -Running `docker ps --no-trunc` showing 2 linked containers. - -```bash -$ docker ps - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp -d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db -``` - -### Show both running and stopped containers - -The `docker ps` command only shows running containers by default. To see all -containers, use the `-a` (or `--all`) flag: - -```bash -$ docker ps -a -``` - -`docker ps` groups exposed ports into a single range if possible. E.g., a -container that exposes TCP ports `100, 101, 102` displays `100-102/tcp` in -the `PORTS` column. - -### Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more -than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* id (container's id) -* label (`label=` or `label==`) -* name (container's name) -* exited (int - the code of exited containers. Only useful with `--all`) -* status (`created|restarting|running|removing|paused|exited|dead`) -* ancestor (`[:]`, `` or ``) - filters containers that were created from the given image or a descendant. -* before (container's id or name) - filters containers created before given id or name -* since (container's id or name) - filters containers created since given id or name -* isolation (`default|process|hyperv`) (Windows daemon only) -* volume (volume name or mount point) - filters containers that mount volumes. -* network (network id or name) - filters containers connected to the provided network -* health (starting|healthy|unhealthy|none) - filters containers based on healthcheck status -* publish=(container's published port) - filters published ports by containers -* expose=(container's exposed port) - filters exposed ports by containers - -#### label - -The `label` filter matches containers based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches containers with the `color` label regardless of its value. - -```bash -$ docker ps --filter "label=color" - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley -d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani -``` - -The following filter matches containers with the `color` label with the `blue` value. - -```bash -$ docker ps --filter "label=color=blue" - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani -``` - -#### name - -The `name` filter matches on all or part of a container's name. - -The following filter matches all containers with a name containing the `nostalgic_stallman` string. - -```bash -$ docker ps --filter "name=nostalgic_stallman" - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman -``` - -You can also filter for a substring in a name as this shows: - -```bash -$ docker ps --filter "name=nostalgic" - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -715ebfcee040 busybox "top" 3 seconds ago Up 1 second i_am_nostalgic -9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman -673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley -``` - -#### exited - -The `exited` filter matches containers by exist status code. For example, to -filter for containers that have exited successfully: - -```bash -$ docker ps -a --filter 'exited=0' - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey -106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani -48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds -``` - -#### Filter by exit signal - -You can use a filter to locate containers that exited with status of `137` -meaning a `SIGKILL(9)` killed them. - -```none -$ docker ps -a --filter 'exited=137' - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -b3e1c0ed5bfe ubuntu:latest "sleep 1000" 12 seconds ago Exited (137) 5 seconds ago grave_kowalevski -a2eb5558d669 redis:latest "/entrypoint.sh redi 2 hours ago Exited (137) 2 hours ago sharp_lalande -``` - -Any of these events result in a `137` status: - -* the `init` process of the container is killed manually -* `docker kill` kills the container -* Docker daemon restarts which kills all running containers - -#### status - -The `status` filter matches containers by status. You can filter using -`created`, `restarting`, `running`, `removing`, `paused`, `exited` and `dead`. For example, -to filter for `running` containers: - -```bash -$ docker ps --filter status=running - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic -d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top -9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman -``` - -To filter for `paused` containers: - -```bash -$ docker ps --filter status=paused - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley -``` - -#### ancestor - -The `ancestor` filter matches containers based on its image or a descendant of -it. The filter supports the following image representation: - -- image -- image:tag -- image:tag@digest -- short-id -- full-id - -If you don't specify a `tag`, the `latest` tag is used. For example, to filter -for containers that use the latest `ubuntu` image: - -```bash -$ docker ps --filter ancestor=ubuntu - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace -5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet -82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose -bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath -``` - -Match containers based on the `ubuntu-c1` image which, in this case, is a child -of `ubuntu`: - -```bash -$ docker ps --filter ancestor=ubuntu-c1 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace -``` - -Match containers based on the `ubuntu` version `12.04.5` image: - -```bash -$ docker ps --filter ancestor=ubuntu:12.04.5 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose -``` - -The following matches containers based on the layer `d0e008c6cf02` or an image -that have this layer in its layer stack. - -```bash -$ docker ps --filter ancestor=d0e008c6cf02 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose -``` - -#### Create time - -##### before - -The `before` filter shows only containers created before the container with -given id or name. For example, having these containers created: - -```bash -$ docker ps - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9c3527ed70ce busybox "top" 14 seconds ago Up 15 seconds desperate_dubinsky -4aace5031105 busybox "top" 48 seconds ago Up 49 seconds focused_hamilton -6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat -``` - -Filtering with `before` would give: - -```bash -$ docker ps -f before=9c3527ed70ce - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4aace5031105 busybox "top" About a minute ago Up About a minute focused_hamilton -6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat -``` - -##### since - -The `since` filter shows only containers created since the container with given -id or name. For example, with the same containers as in `before` filter: - -```bash -$ docker ps -f since=6e63f6ff38b0 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9c3527ed70ce busybox "top" 10 minutes ago Up 10 minutes desperate_dubinsky -4aace5031105 busybox "top" 10 minutes ago Up 10 minutes focused_hamilton -``` - -#### volume - -The `volume` filter shows only containers that mount a specific volume or have -a volume mounted in a specific path: - -```bash -$ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" -CONTAINER ID MOUNTS -9c3527ed70ce remote-volume - -$ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" -CONTAINER ID MOUNTS -9c3527ed70ce remote-volume -``` - -#### network - -The `network` filter shows only containers that are connected to a network with -a given name or id. - -The following filter matches all containers that are connected to a network -with a name containing `net1`. - -```bash -$ docker run -d --net=net1 --name=test1 ubuntu top -$ docker run -d --net=net2 --name=test2 ubuntu top - -$ docker ps --filter network=net1 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 -``` - -The network filter matches on both the network's name and id. The following -example shows all containers that are attached to the `net1` network, using -the network id as a filter; - -```bash -$ docker network inspect --format "{{.ID}}" net1 - -8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 - -$ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 -``` - -#### publish and expose - -The `publish` and `expose` filters show only containers that have published or exposed port with a given port -number, port range, and/or protocol. The default protocol is `tcp` when not specified. - -The following filter matches all containers that have published port of 80: - -```bash -$ docker run -d --publish=80 busybox top -$ docker run -d --expose=8080 busybox top - -$ docker ps -a - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9833437217a5 busybox "top" 5 seconds ago Up 4 seconds 8080/tcp dreamy_mccarthy -fc7e477723b7 busybox "top" 50 seconds ago Up 50 seconds 0.0.0.0:32768->80/tcp admiring_roentgen - -$ docker ps --filter publish=80 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -fc7e477723b7 busybox "top" About a minute ago Up About a minute 0.0.0.0:32768->80/tcp admiring_roentgen -``` - -The following filter matches all containers that have exposed TCP port in the range of `8000-8080`: -```bash -$ docker ps --filter expose=8000-8080/tcp - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9833437217a5 busybox "top" 21 seconds ago Up 19 seconds 8080/tcp dreamy_mccarthy -``` - -The following filter matches all containers that have exposed UDP port `80`: -```bash -$ docker ps --filter publish=80/udp - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -``` - -### Formatting - -The formatting option (`--format`) pretty-prints container output using a Go -template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description ---------------|---------------------------------------------------------------------------------------------------- -`.ID` | Container ID -`.Image` | Image ID -`.Command` | Quoted command -`.CreatedAt` | Time when the container was created. -`.RunningFor` | Elapsed time since the container was started. -`.Ports` | Exposed ports. -`.Status` | Container status. -`.Size` | Container disk size. -`.Names` | Container names. -`.Labels` | All labels assigned to the container. -`.Label` | Value of a specific label for this container. For example `'{{.Label "com.docker.swarm.cpu"}}'` -`.Mounts` | Names of the volumes mounted in this container. -`.Networks` | Names of the networks attached to this container. - -When using the `--format` option, the `ps` command will either output the data -exactly as the template declares or, when using the `table` directive, includes -column headers as well. - -The following example uses a template without headers and outputs the `ID` and -`Command` entries separated by a colon for all running containers: - -```bash -$ docker ps --format "{{.ID}}: {{.Command}}" - -a87ecb4f327c: /bin/sh -c #(nop) MA -01946d9d34d8: /bin/sh -c #(nop) MA -c1d3b0166030: /bin/sh -c yum -y up -41d50ecd2f57: /bin/sh -c #(nop) MA -``` - -To list all running containers with their labels in a table format you can use: - -```bash -$ docker ps --format "table {{.ID}}\t{{.Labels}}" - -CONTAINER ID LABELS -a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd -01946d9d34d8 -c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 -41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/pull.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/pull.md deleted file mode 100644 index 7bf3df836..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/pull.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: "pull" -description: "The pull command description and usage" -keywords: "pull, image, hub, docker" ---- - - - -# pull - -```markdown -Usage: docker pull [OPTIONS] NAME[:TAG|@DIGEST] - -Pull an image or a repository from a registry - -Options: - -a, --all-tags Download all tagged images in the repository - --disable-content-trust Skip image verification (default true) - --help Print usage -``` - -## Description - -Most of your images will be created on top of a base image from the -[Docker Hub](https://hub.docker.com) registry. - -[Docker Hub](https://hub.docker.com) contains many pre-built images that you -can `pull` and try without needing to define and configure your own. - -To download a particular image, or set of images (i.e., a repository), -use `docker pull`. - -### Proxy configuration - -If you are behind an HTTP proxy server, for example in corporate settings, -before open a connect to registry, you may need to configure the Docker -daemon's proxy settings, using the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` -environment variables. To set these environment variables on a host using -`systemd`, refer to the [control and configure Docker with systemd](https://docs.docker.com/engine/admin/systemd/#http-proxy) -for variables configuration. - -### Concurrent downloads - -By default the Docker daemon will pull three layers of an image at a time. -If you are on a low bandwidth connection this may cause timeout issues and you may want to lower -this via the `--max-concurrent-downloads` daemon option. See the -[daemon documentation](dockerd.md) for more details. - -## Examples - -### Pull an image from Docker Hub - -To download a particular image, or set of images (i.e., a repository), use -`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a -default. This command pulls the `debian:latest` image: - -```bash -$ docker pull debian - -Using default tag: latest -latest: Pulling from library/debian -fdd5d7827f33: Pull complete -a3ed95caeb02: Pull complete -Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa -Status: Downloaded newer image for debian:latest -``` - -Docker images can consist of multiple layers. In the example above, the image -consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. - -Layers can be reused by images. For example, the `debian:jessie` image shares -both layers with `debian:latest`. Pulling the `debian:jessie` image therefore -only pulls its metadata, but not its layers, because all layers are already -present locally: - -```bash -$ docker pull debian:jessie - -jessie: Pulling from library/debian -fdd5d7827f33: Already exists -a3ed95caeb02: Already exists -Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e -Status: Downloaded newer image for debian:jessie -``` - -To see which images are present locally, use the [`docker images`](images.md) -command: - -```bash -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE -debian jessie f50f9524513f 5 days ago 125.1 MB -debian latest f50f9524513f 5 days ago 125.1 MB -``` - -Docker uses a content-addressable image store, and the image ID is a SHA256 -digest covering the image's configuration and layers. In the example above, -`debian:jessie` and `debian:latest` have the same image ID because they are -actually the *same* image tagged with different names. Because they are the -same image, their layers are stored only once and do not consume extra disk -space. - -For more information about images, layers, and the content-addressable store, -refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/). - - -### Pull an image by digest (immutable identifier) - -So far, you've pulled images by their name (and "tag"). Using names and tags is -a convenient way to work with images. When using tags, you can `docker pull` an -image again to make sure you have the most up-to-date version of that image. -For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu -14.04 image. - -In some cases you don't want images to be updated to newer versions, but prefer -to use a fixed version of an image. Docker enables you to pull an image by its -*digest*. When pulling an image by digest, you specify *exactly* which version -of an image to pull. Doing so, allows you to "pin" an image to that version, -and guarantee that the image you're using is always the same. - -To know the digest of an image, pull the image first. Let's pull the latest -`ubuntu:14.04` image from Docker Hub: - -```bash -$ docker pull ubuntu:14.04 - -14.04: Pulling from library/ubuntu -5a132a7e7af1: Pull complete -fd2731e4c50c: Pull complete -28a2f68d1120: Pull complete -a3ed95caeb02: Pull complete -Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 -Status: Downloaded newer image for ubuntu:14.04 -``` - -Docker prints the digest of the image after the pull has finished. In the example -above, the digest of the image is: - - sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - -Docker also prints the digest of an image when *pushing* to a registry. This -may be useful if you want to pin to a version of the image you just pushed. - -A digest takes the place of the tag when pulling an image, for example, to -pull the above image by digest, run the following command: - -```bash -$ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - -sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu -5a132a7e7af1: Already exists -fd2731e4c50c: Already exists -28a2f68d1120: Already exists -a3ed95caeb02: Already exists -Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 -Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 -``` - -Digest can also be used in the `FROM` of a Dockerfile, for example: - -```Dockerfile -FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 -MAINTAINER some maintainer -``` - -> **Note**: Using this feature "pins" an image to a specific version in time. -> Docker will therefore not pull updated versions of an image, which may include -> security updates. If you want to pull an updated image, you need to change the -> digest accordingly. - - -### Pull from a different registry - -By default, `docker pull` pulls images from [Docker Hub](https://hub.docker.com). It is also possible to -manually specify the path of a registry to pull from. For example, if you have -set up a local registry, you can specify its path to pull from it. A registry -path is similar to a URL, but does not contain a protocol specifier (`https://`). - -The following command pulls the `testing/test-image` image from a local registry -listening on port 5000 (`myregistry.local:5000`): - -```bash -$ docker pull myregistry.local:5000/testing/test-image -``` - -Registry credentials are managed by [docker login](login.md). - -Docker uses the `https://` protocol to communicate with a registry, unless the -registry is allowed to be accessed over an insecure connection. Refer to the -[insecure registries](dockerd.md#insecure-registries) section for more information. - - -### Pull a repository with multiple images - -By default, `docker pull` pulls a *single* image from the registry. A repository -can contain multiple images. To pull all images from a repository, provide the -`-a` (or `--all-tags`) option when using `docker pull`. - -This command pulls all images from the `fedora` repository: - -```bash -$ docker pull --all-tags fedora - -Pulling repository fedora -ad57ef8d78d7: Download complete -105182bb5e8b: Download complete -511136ea3c5a: Download complete -73bd853d2ea5: Download complete -.... - -Status: Downloaded newer image for fedora -``` - -After the pull has completed use the `docker images` command to see the -images that were pulled. The example below shows all the `fedora` images -that are present locally: - -```bash -$ docker images fedora - -REPOSITORY TAG IMAGE ID CREATED SIZE -fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB -fedora 20 105182bb5e8b 5 days ago 372.7 MB -fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB -fedora latest 105182bb5e8b 5 days ago 372.7 MB -``` - -### Cancel a pull - -Killing the `docker pull` process, for example by pressing `CTRL-c` while it is -running in a terminal, will terminate the pull operation. - -```bash -$ docker pull fedora - -Using default tag: latest -latest: Pulling from library/fedora -a3ed95caeb02: Pulling fs layer -236608c7b546: Pulling fs layer -^C -``` - -> **Note**: Technically, the Engine terminates a pull operation when the -> connection between the Docker Engine daemon and the Docker Engine client -> initiating the pull is lost. If the connection with the Engine daemon is -> lost for other reasons than a manual interaction, the pull is also aborted. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/push.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/push.md deleted file mode 100644 index 61c37139f..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/push.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: "push" -description: "The push command description and usage" -keywords: "share, push, image" ---- - - - -# push - -```markdown -Usage: docker push [OPTIONS] NAME[:TAG] - -Push an image or a repository to a registry - -Options: - --disable-content-trust Skip image signing (default true) - --help Print usage -``` - -## Description - -Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) -registry or to a self-hosted one. - -Refer to the [`docker tag`](tag.md) reference for more information about valid -image and tag names. - -Killing the `docker push` process, for example by pressing `CTRL-c` while it is -running in a terminal, terminates the push operation. - -Progress bars are shown during docker push, which show the uncompressed size. The -actual amount of data that's pushed will be compressed before sending, so the uploaded - size will not be reflected by the progress bar. - -Registry credentials are managed by [docker login](login.md). - -### Concurrent uploads - -By default the Docker daemon will push five layers of an image at a time. -If you are on a low bandwidth connection this may cause timeout issues and you may want to lower -this via the `--max-concurrent-uploads` daemon option. See the -[daemon documentation](dockerd.md) for more details. - -## Examples - -### Push a new image to a registry - -First save the new image by finding the container ID (using [`docker ps`](ps.md)) -and then committing it to a new image name. Note that only `a-z0-9-_.` are -allowed when naming images: - -```bash -$ docker commit c16378f943fe rhel-httpd -``` - -Now, push the image to the registry using the image ID. In this example the -registry is on host named `registry-host` and listening on port `5000`. To do -this, tag the image with the host name or IP address, and the port of the -registry: - -```bash -$ docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd - -$ docker push registry-host:5000/myadmin/rhel-httpd -``` - -Check that this worked by running: - -```bash -$ docker images -``` - -You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` -listed. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/rename.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/rename.md deleted file mode 100644 index 90268a2a2..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/rename.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "rename" -description: "The rename command description and usage" -keywords: "rename, docker, container" ---- - - - -# rename - -```markdown -Usage: docker rename CONTAINER NEW_NAME - -Rename a container - -Options: - --help Print usage -``` - -## Description - -The `docker rename` command renames a container. - -## Examples - -```bash -$ docker rename my_container my_new_container -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/restart.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/restart.md deleted file mode 100644 index a2796afe3..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/restart.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "restart" -description: "The restart command description and usage" -keywords: "restart, container, Docker" ---- - - - -# restart - -```markdown -Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] - -Restart one or more containers - -Options: - --help Print usage - -t, --time int Seconds to wait for stop before killing the container (default 10) -``` - -## Examples - -```bash -$ docker restart my_container -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/rm.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/rm.md deleted file mode 100644 index 8ee5b2874..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/rm.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: "rm" -description: "The rm command description and usage" -keywords: "remove, Docker, container" ---- - - - -# rm - -```markdown -Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] - -Remove one or more containers - -Options: - -f, --force Force the removal of a running container (uses SIGKILL) - --help Print usage - -l, --link Remove the specified link - -v, --volumes Remove the volumes associated with the container -``` - -## Examples - -### Remove a container - -This will remove the container referenced under the link -`/redis`. - -```bash -$ docker rm /redis - -/redis -``` - -### Remove a link specified with `--link` on the default bridge network - -This will remove the underlying link between `/webapp` and the `/redis` -containers on the default bridge network, removing all network communication -between the two containers. This does not apply when `--link` is used with -user-specified networks. - -```bash -$ docker rm --link /webapp/redis - -/webapp/redis -``` - -### Force-remove a running container - -This command will force-remove a running container. - -```bash -$ docker rm --force redis - -redis -``` - -The main process inside the container referenced under the link `redis` will receive -`SIGKILL`, then the container will be removed. - -### Remove all stopped containers - -```bash -$ docker rm $(docker ps -a -q) -``` - -This command will delete all stopped containers. The command -`docker ps -a -q` will return all existing container IDs and pass them to -the `rm` command which will delete them. Any running containers will not be -deleted. - -### Remove a container and its volumes - -```bash -$ docker rm -v redis -redis -``` - -This command will remove the container and any volumes associated with it. -Note that if a volume was specified with a name, it will not be removed. - -### Remove a container and selectively remove volumes - -```bash -$ docker create -v awesome:/foo -v /bar --name hello redis -hello -$ docker rm -v hello -``` - -In this example, the volume for `/foo` will remain intact, but the volume for -`/bar` will be removed. The same behavior holds for volumes inherited with -`--volumes-from`. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md deleted file mode 100644 index 28e21d439..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: "rmi" -description: "The rmi command description and usage" -keywords: "remove, image, Docker" ---- - - - -# rmi - -```markdown -Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] - -Remove one or more images - -Options: - -f, --force Force removal of the image - --help Print usage - --no-prune Do not delete untagged parents -``` - -## Examples - -You can remove an image using its short or long ID, its tag, or its digest. If -an image has one or more tag referencing it, you must remove all of them before -the image is removed. Digest references are removed automatically when an image -is removed by tag. - -```bash -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE -test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) -test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) -test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - -$ docker rmi fd484f19954f - -Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force -2013/12/11 05:47:16 Error: failed to remove one or more images - -$ docker rmi test1 - -Untagged: test1:latest - -$ docker rmi test2 - -Untagged: test2:latest - - -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE -test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - -$ docker rmi test - -Untagged: test:latest -Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 -``` - -If you use the `-f` flag and specify the image's short or long ID, then this -command untags and removes all images that match the specified ID. - -```bash -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE -test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) -test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) -test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - -$ docker rmi -f fd484f19954f - -Untagged: test1:latest -Untagged: test:latest -Untagged: test2:latest -Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 -``` - -An image pulled by digest has no tag associated with it: - -```bash -$ docker images --digests - -REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE -localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB -``` - -To remove an image using its digest: - -```bash -$ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf -Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf -Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 -Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 -Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/run.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/run.md deleted file mode 100644 index a189ccd5a..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/run.md +++ /dev/null @@ -1,803 +0,0 @@ ---- -title: "run" -description: "The run command description and usage" -keywords: "run, command, container" ---- - - - -# run - -```markdown -Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] - -Run a command in a new container - -Options: - --add-host value Add a custom host-to-IP mapping (host:ip) (default []) - -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) - --blkio-weight value Block IO (relative weight), between 10 and 1000 - --blkio-weight-device value Block IO weight (relative device weight) (default []) - --cap-add value Add Linux capabilities (default []) - --cap-drop value Drop Linux capabilities (default []) - --cgroup-parent string Optional parent cgroup for the container - --cidfile string Write the container ID to the file - --cpu-count int The number of CPUs available for execution by the container. - Windows daemon only. On Windows Server containers, this is - approximated as a percentage of total CPU usage. - --cpu-percent int Limit percentage of CPU available for execution - by the container. Windows daemon only. - The processor resource controls are mutually - exclusive, the order of precedence is CPUCount - first, then CPUShares, and CPUPercent last. - --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period - --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota - -c, --cpu-shares int CPU shares (relative weight) - --cpus NanoCPUs Number of CPUs (default 0.000) - --cpu-rt-period int Limit the CPU real-time period in microseconds - --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds - --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) - --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) - -d, --detach Run container in background and print container ID - --detach-keys string Override the key sequence for detaching a container - --device value Add a host device to the container (default []) - --device-cgroup-rule value Add a rule to the cgroup allowed devices list - --device-read-bps value Limit read rate (bytes per second) from a device (default []) - --device-read-iops value Limit read rate (IO per second) from a device (default []) - --device-write-bps value Limit write rate (bytes per second) to a device (default []) - --device-write-iops value Limit write rate (IO per second) to a device (default []) - --disable-content-trust Skip image verification (default true) - --dns value Set custom DNS servers (default []) - --dns-option value Set DNS options (default []) - --dns-search value Set custom DNS search domains (default []) - --entrypoint string Overwrite the default ENTRYPOINT of the image - -e, --env value Set environment variables (default []) - --env-file value Read in a file of environment variables (default []) - --expose value Expose a port or a range of ports (default []) - --group-add value Add additional groups to join (default []) - --health-cmd string Command to run to check health - --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) - --health-retries int Consecutive failures needed to report unhealthy - --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) - --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) (default 0s) - --help Print usage - -h, --hostname string Container host name - --init Run an init inside the container that forwards signals and reaps processes - -i, --interactive Keep STDIN open even if not attached - --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) - (Windows only). The format is ``. - Unit is optional and can be `b` (bytes per second), - `k` (kilobytes per second), `m` (megabytes per second), - or `g` (gigabytes per second). If you omit the unit, - the system uses bytes per second. - --io-maxbandwidth and --io-maxiops are mutually exclusive options. - --io-maxiops uint Maximum IOps limit for the system drive (Windows only) - --ip string IPv4 address (e.g., 172.30.100.104) - --ip6 string IPv6 address (e.g., 2001:db8::33) - --ipc string IPC namespace to use - --isolation string Container isolation technology - --kernel-memory string Kernel memory limit - -l, --label value Set meta data on a container (default []) - --label-file value Read in a line delimited file of labels (default []) - --link value Add link to another container (default []) - --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) - --log-driver string Logging driver for the container - --log-opt value Log driver options (default []) - --mac-address string Container MAC address (e.g., 92:d0:c6:0a:29:33) - -m, --memory string Memory limit - --memory-reservation string Memory soft limit - --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap - --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) - --mount value Attach a filesystem mount to the container (default []) - --name string Assign a name to the container - --network-alias value Add network-scoped alias for the container (default []) - --network string Connect a container to a network - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack - '|': connect to a user-defined network - --no-healthcheck Disable any container-specified HEALTHCHECK - --oom-kill-disable Disable OOM Killer - --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) - --pid string PID namespace to use - --pids-limit int Tune container pids limit (set -1 for unlimited) - --privileged Give extended privileges to this container - -p, --publish value Publish a container's port(s) to the host (default []) - -P, --publish-all Publish all exposed ports to random ports - --read-only Mount the container's root filesystem as read only - --restart string Restart policy to apply when a container exits (default "no") - Possible values are : no, on-failure[:max-retry], always, unless-stopped - --rm Automatically remove the container when it exits - --runtime string Runtime to use for this container - --security-opt value Security Options (default []) - --shm-size bytes Size of /dev/shm - The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), - or `g` (gigabytes). If you omit the unit, the system uses bytes. - --sig-proxy Proxy received signals to the process (default true) - --stop-signal string Signal to stop a container (default "SIGTERM") - --stop-timeout=10 Timeout (in seconds) to stop a container - --storage-opt value Storage driver options for the container (default []) - --sysctl value Sysctl options (default map[]) - --tmpfs value Mount a tmpfs directory (default []) - -t, --tty Allocate a pseudo-TTY - --ulimit value Ulimit options (default []) - -u, --user string Username or UID (format: [:]) - --userns string User namespace to use - 'host': Use the Docker host user namespace - '': Use the Docker daemon user namespace specified by `--userns-remap` option. - --uts string UTS namespace to use - -v, --volume value Bind mount a volume (default []). The format - is `[host-src:]container-dest[:]`. - The comma-delimited `options` are [rw|ro], - [z|Z], [[r]shared|[r]slave|[r]private], - [delegated|cached|consistent], and - [nocopy]. The 'host-src' is an absolute path - or a name value. - --volume-driver string Optional volume driver for the container - --volumes-from value Mount volumes from the specified container(s) (default []) - -w, --workdir string Working directory inside the container -``` - -## Description - -The `docker run` command first `creates` a writeable container layer over the -specified image, and then `starts` it using the specified command. That is, -`docker run` is equivalent to the API `/containers/create` then -`/containers/(id)/start`. A stopped container can be restarted with all its -previous changes intact using `docker start`. See `docker ps -a` to view a list -of all containers. - -The `docker run` command can be used in combination with `docker commit` to -[*change the command that a container runs*](commit.md). There is additional detailed information about `docker run` in the [Docker run reference](../run.md). - -For information on connecting a container to a network, see the ["*Docker network overview*"](https://docs.docker.com/engine/userguide/networking/). - -## Examples - -### Assign name and allocate pseudo-TTY (--name, -it) - -```bash -$ docker run --name test -it debian - -root@d6c0fe130dba:/# exit 13 -$ echo $? -13 -$ docker ps -a | grep test -d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test -``` - -This example runs a container named `test` using the `debian:latest` -image. The `-it` instructs Docker to allocate a pseudo-TTY connected to -the container's stdin; creating an interactive `bash` shell in the container. -In the example, the `bash` shell is quit by entering -`exit 13`. This exit code is passed on to the caller of -`docker run`, and is recorded in the `test` container's metadata. - -### Capture container ID (--cidfile) - -```bash -$ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" -``` - -This will create a container and print `test` to the console. The `cidfile` -flag makes Docker attempt to create a new file and write the container ID to it. -If the file exists already, Docker will return an error. Docker will close this -file when `docker run` exits. - -### Full container capabilities (--privileged) - -```bash -$ docker run -t -i --rm ubuntu bash -root@bc338942ef20:/# mount -t tmpfs none /mnt -mount: permission denied -``` - -This will *not* work, because by default, most potentially dangerous kernel -capabilities are dropped; including `cap_sys_admin` (which is required to mount -filesystems). However, the `--privileged` flag will allow it to run: - -```bash -$ docker run -t -i --privileged ubuntu bash -root@50e3f57e16e6:/# mount -t tmpfs none /mnt -root@50e3f57e16e6:/# df -h -Filesystem Size Used Avail Use% Mounted on -none 1.9G 0 1.9G 0% /mnt -``` - -The `--privileged` flag gives *all* capabilities to the container, and it also -lifts all the limitations enforced by the `device` cgroup controller. In other -words, the container can then do almost everything that the host can do. This -flag exists to allow special use-cases, like running Docker within Docker. - -### Set working directory (-w) - -```bash -$ docker run -w /path/to/dir/ -i -t ubuntu pwd -``` - -The `-w` lets the command being executed inside directory given, here -`/path/to/dir/`. If the path does not exist it is created inside the container. - -### Set storage driver options per container - -```bash -$ docker run -it --storage-opt size=120G fedora /bin/bash -``` - -This (size) will allow to set the container rootfs size to 120G at creation time. -This option is only available for the `devicemapper`, `btrfs`, `overlay2`, -`windowsfilter` and `zfs` graph drivers. -For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, -user cannot pass a size less than the Default BaseFS Size. -For the `overlay2` storage driver, the size option is only available if the -backing fs is `xfs` and mounted with the `pquota` mount option. -Under these conditions, user can pass any size less then the backing fs size. - -### Mount tmpfs (--tmpfs) - -```bash -$ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image -``` - -The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`, -`noexec`, `nosuid`, `size=65536k` options. - -### Mount volume (-v, --read-only) - -```bash -$ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd -``` - -The `-v` flag mounts the current working directory into the container. The `-w` -lets the command being executed inside the current working directory, by -changing into the directory to the value returned by `pwd`. So this -combination executes the command using the container, but inside the -current working directory. - -```bash -$ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash -``` - -When the host directory of a bind-mounted volume doesn't exist, Docker -will automatically create this directory on the host for you. In the -example above, Docker will create the `/doesnt/exist` -folder before starting your container. - -```bash -$ docker run --read-only -v /icanwrite busybox touch /icanwrite/here -``` - -Volumes can be used in combination with `--read-only` to control where -a container writes files. The `--read-only` flag mounts the container's root -filesystem as read only prohibiting writes to locations other than the -specified volumes for the container. - -```bash -$ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh -``` - -By bind-mounting the docker unix socket and statically linked docker -binary (refer to [get the linux binary]( -https://docs.docker.com/engine/installation/binaries/#/get-the-linux-binary)), -you give the container the full access to create and manipulate the host's -Docker daemon. - -On Windows, the paths must be specified using Windows-style semantics. - -```powershell -PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt -Contents of file - -PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt -Contents of file -``` - -The following examples will fail when using Windows-based containers, as the -destination of a volume or bind-mount inside the container must be one of: -a non-existing or empty directory; or a drive other than C:. Further, the source -of a bind mount must be a local directory, not a file. - -```powershell -net use z: \\remotemachine\share -docker run -v z:\foo:c:\dest ... -docker run -v \\uncpath\to\directory:c:\dest ... -docker run -v c:\foo\somefile.txt:c:\dest ... -docker run -v c:\foo:c: ... -docker run -v c:\foo:c:\existing-directory-with-contents ... -``` - -For in-depth information about volumes, refer to [manage data in containers](https://docs.docker.com/engine/tutorials/dockervolumes/) - - -### Add bind-mounts or volumes using the --mount flag - -The `--mount` flag allows you to mount volumes, host-directories and `tmpfs` -mounts in a container. - -The `--mount` flag supports most options that are supported by the `-v` or the -`--volume` flag, but uses a different syntax. For in-depth information on the -`--mount` flag, and a comparison between `--volume` and `--mount`, refer to -the [service create command reference](service_create.md#add-bind-mounts-or-volumes). - -Even though there is no plan to deprecate `--volume`, usage of `--mount` is recommended. - -Examples: - -```bash -$ docker run --read-only --mount type=volume,target=/icanwrite busybox touch /icanwrite/here -``` - -```bash -$ docker run -t -i --mount type=bind,src=/data,dst=/data busybox sh -``` - -### Publish or expose port (-p, --expose) - -```bash -$ docker run -p 127.0.0.1:80:8080 ubuntu bash -``` - -This binds port `8080` of the container to port `80` on `127.0.0.1` of the host -machine. The [Docker User -Guide](https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/) -explains in detail how to manipulate ports in Docker. - -```bash -$ docker run --expose 80 ubuntu bash -``` - -This exposes port `80` of the container without publishing the port to the host -system's interfaces. - -### Set environment variables (-e, --env, --env-file) - -```bash -$ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash -``` - -This sets simple (non-array) environmental variables in the container. For -illustration all three -flags are shown here. Where `-e`, `--env` take an environment variable and -value, or if no `=` is provided, then that variable's current value, set via -`export`, is passed through (i.e. `$MYVAR1` from the host is set to `$MYVAR1` -in the container). When no `=` is provided and that variable is not defined -in the client's environment then that variable will be removed from the -container's list of environment variables. All three flags, `-e`, `--env` and -`--env-file` can be repeated. - -Regardless of the order of these three flags, the `--env-file` are processed -first, and then `-e`, `--env` flags. This way, the `-e` or `--env` will -override variables as needed. - -```bash -$ cat ./env.list -TEST_FOO=BAR -$ docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO -TEST_FOO=This is a test -``` - -The `--env-file` flag takes a filename as an argument and expects each line -to be in the `VAR=VAL` format, mimicking the argument passed to `--env`. Comment -lines need only be prefixed with `#` - -An example of a file passed with `--env-file` - -```bash -$ cat ./env.list -TEST_FOO=BAR - -# this is a comment -TEST_APP_DEST_HOST=10.10.0.127 -TEST_APP_DEST_PORT=8888 -_TEST_BAR=FOO -TEST_APP_42=magic -helloWorld=true -123qwe=bar -org.spring.config=something - -# pass through this variable from the caller -TEST_PASSTHROUGH -$ TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env -PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -HOSTNAME=5198e0745561 -TEST_FOO=BAR -TEST_APP_DEST_HOST=10.10.0.127 -TEST_APP_DEST_PORT=8888 -_TEST_BAR=FOO -TEST_APP_42=magic -helloWorld=true -TEST_PASSTHROUGH=howdy -HOME=/root -123qwe=bar -org.spring.config=something - -$ docker run --env-file ./env.list busybox env -PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -HOSTNAME=5198e0745561 -TEST_FOO=BAR -TEST_APP_DEST_HOST=10.10.0.127 -TEST_APP_DEST_PORT=8888 -_TEST_BAR=FOO -TEST_APP_42=magic -helloWorld=true -TEST_PASSTHROUGH= -HOME=/root -123qwe=bar -org.spring.config=something -``` - -### Set metadata on container (-l, --label, --label-file) - -A label is a `key=value` pair that applies metadata to a container. To label a container with two labels: - -```bash -$ docker run -l my-label --label com.example.foo=bar ubuntu bash -``` - -The `my-label` key doesn't specify a value so the label defaults to an empty -string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). - -The `key=value` must be unique to avoid overwriting the label value. If you -specify labels with identical keys but different values, each subsequent value -overwrites the previous. Docker uses the last `key=value` you supply. - -Use the `--label-file` flag to load multiple labels from a file. Delimit each -label in the file with an EOL mark. The example below loads labels from a -labels file in the current directory: - -```bash -$ docker run --label-file ./labels ubuntu bash -``` - -The label-file format is similar to the format for loading environment -variables. (Unlike environment variables, labels are not visible to processes -running inside a container.) The following example illustrates a label-file -format: - -```none -com.example.label1="a label" - -# this is a comment -com.example.label2=another\ label -com.example.label3 -``` - -You can load multiple label-files by supplying multiple `--label-file` flags. - -For additional information on working with labels, see [*Labels - custom -metadata in Docker*](https://docs.docker.com/engine/userguide/labels-custom-metadata/) in the Docker User -Guide. - -### Connect a container to a network (--network) - -When you start a container use the `--network` flag to connect it to a network. -This adds the `busybox` container to the `my-net` network. - -```bash -$ docker run -itd --network=my-net busybox -``` - -You can also choose the IP addresses for the container with `--ip` and `--ip6` -flags when you start the container on a user-defined network. - -```bash -$ docker run -itd --network=my-net --ip=10.10.9.75 busybox -``` - -If you want to add a running container to a network use the `docker network connect` subcommand. - -You can connect multiple containers to the same network. Once connected, the -containers can communicate easily need only another container's IP address -or name. For `overlay` networks or custom plugins that support multi-host -connectivity, containers connected to the same multi-host network but launched -from different Engines can also communicate in this way. - -> **Note**: Service discovery is unavailable on the default bridge network. -> Containers can communicate via their IP addresses by default. To communicate -> by name, they must be linked. - -You can disconnect a container from a network using the `docker network -disconnect` command. - -### Mount volumes from container (--volumes-from) - -```bash -$ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd -``` - -The `--volumes-from` flag mounts all the defined volumes from the referenced -containers. Containers can be specified by repetitions of the `--volumes-from` -argument. The container ID may be optionally suffixed with `:ro` or `:rw` to -mount the volumes in read-only or read-write mode, respectively. By default, -the volumes are mounted in the same mode (read write or read only) as -the reference container. - -Labeling systems like SELinux require that proper labels are placed on volume -content mounted into a container. Without a label, the security system might -prevent the processes running inside the container from using the content. By -default, Docker does not change the labels set by the OS. - -To change the label in the container context, you can add either of two suffixes -`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file -objects on the shared volumes. The `z` option tells Docker that two containers -share the volume content. As a result, Docker labels the content with a shared -content label. Shared volume labels allow all containers to read/write content. -The `Z` option tells Docker to label the content with a private unshared label. -Only the current container can use a private volume. - -### Attach to STDIN/STDOUT/STDERR (-a) - -The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` -or `STDERR`. This makes it possible to manipulate the output and input as -needed. - -```bash -$ echo "test" | docker run -i -a stdin ubuntu cat - -``` - -This pipes data into a container and prints the container's ID by attaching -only to the container's `STDIN`. - -```bash -$ docker run -a stderr ubuntu echo test -``` - -This isn't going to print anything unless there's an error because we've -only attached to the `STDERR` of the container. The container's logs -still store what's been written to `STDERR` and `STDOUT`. - -```bash -$ cat somefile | docker run -i -a stdin mybuilder dobuild -``` - -This is how piping a file into a container could be done for a build. -The container's ID will be printed after the build is done and the build -logs could be retrieved using `docker logs`. This is -useful if you need to pipe a file or something else into a container and -retrieve the container's ID once the container has finished running. - -### Add host device to container (--device) - -```bash -$ docker run --device=/dev/sdc:/dev/xvdc \ - --device=/dev/sdd --device=/dev/zero:/dev/nulo \ - -i -t \ - ubuntu ls -l /dev/{xvdc,sdd,nulo} - -brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc -brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd -crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo -``` - -It is often necessary to directly expose devices to a container. The `--device` -option enables that. For example, a specific block storage device or loop -device or audio device can be added to an otherwise unprivileged container -(without the `--privileged` flag) and have the application directly access it. - -By default, the container will be able to `read`, `write` and `mknod` these devices. -This can be overridden using a third `:rwm` set of options to each `--device` -flag: - -```bash -$ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc - -Command (m for help): q -$ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc -You will not be able to write the partition table. - -Command (m for help): q - -$ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc - -Command (m for help): q - -$ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc -fdisk: unable to open /dev/xvdc: Operation not permitted -``` - -> **Note**: `--device` cannot be safely used with ephemeral devices. Block devices -> that may be removed should not be added to untrusted containers with -> `--device`. - -### Restart policies (--restart) - -Use Docker's `--restart` to specify a container's *restart policy*. A restart -policy controls whether the Docker daemon restarts a container after exit. -Docker supports the following restart policies: - -| Policy | Result | -|-------------------|-----------------------------------------| -| `no` | Do not automatically restart the container when it exits. This is the default. | -| `failure` | Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts. | -| `always` | Always restart the container regardless of the exit status. When you specify always, the Docker daemon will try to restart the container indefinitely. The container will also always start on daemon startup, regardless of the current state of the container. | - -```bash -$ docker run --restart=always redis -``` - -This will run the `redis` container with a restart policy of **always** -so that if the container exits, Docker will restart it. - -More detailed information on restart policies can be found in the -[Restart Policies (--restart)](../run.md#restart-policies-restart) -section of the Docker run reference page. - -### Add entries to container hosts file (--add-host) - -You can add other hosts into a container's `/etc/hosts` file by using one or -more `--add-host` flags. This example adds a static address for a host named -`docker`: - -```bash -$ docker run --add-host=docker:10.180.0.1 --rm -it debian - -root@f38c87f2a42d:/# ping docker -PING docker (10.180.0.1): 48 data bytes -56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms -56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms -^C--- docker ping statistics --- -2 packets transmitted, 2 packets received, 0% packet loss -round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms -``` - -Sometimes you need to connect to the Docker host from within your -container. To enable this, pass the Docker host's IP address to -the container using the `--add-host` flag. To find the host's address, -use the `ip addr show` command. - -The flags you pass to `ip addr show` depend on whether you are -using IPv4 or IPv6 networking in your containers. Use the following -flags for IPv4 address retrieval for a network device named `eth0`: - -```bash -$ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1` -$ docker run --add-host=docker:${HOSTIP} --rm -it debian -``` - -For IPv6 use the `-6` flag instead of the `-4` flag. For other network -devices, replace `eth0` with the correct device name (for example `docker0` -for the bridge device). - -### Set ulimits in container (--ulimit) - -Since setting `ulimit` settings in a container requires extra privileges not -available in the default container, you can set these using the `--ulimit` flag. -`--ulimit` is specified with a soft and hard limit as such: -`=[:]`, for example: - -```bash -$ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" -1024 -``` - -> **Note**: If you do not provide a `hard limit`, the `soft limit` will be used -> for both values. If no `ulimits` are set, they will be inherited from -> the default `ulimits` set on the daemon. `as` option is disabled now. -> In other words, the following script is not supported: -> -> ```bash -> $ docker run -it --ulimit as=1024 fedora /bin/bash` -> ``` - -The values are sent to the appropriate `syscall` as they are set. -Docker doesn't perform any byte conversion. Take this into account when setting the values. - -#### For `nproc` usage - -Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the -maximum number of processes available to a user, not to a container. For example, start four -containers with `daemon` user: - -```bash -$ docker run -d -u daemon --ulimit nproc=3 busybox top - -$ docker run -d -u daemon --ulimit nproc=3 busybox top - -$ docker run -d -u daemon --ulimit nproc=3 busybox top - -$ docker run -d -u daemon --ulimit nproc=3 busybox top -``` - -The 4th container fails and reports "[8] System error: resource temporarily unavailable" error. -This fails because the caller set `nproc=3` resulting in the first three containers using up -the three processes quota set for the `daemon` user. - -### Stop container with signal (--stop-signal) - -The `--stop-signal` flag sets the system call signal that will be sent to the container to exit. -This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, -or a signal name in the format SIGNAME, for instance SIGKILL. - -### Optional security options (--security-opt) - -On Windows, this flag can be used to specify the `credentialspec` option. -The `credentialspec` must be in the format `file://spec.txt` or `registry://keyname`. - -### Stop container with timeout (--stop-timeout) - -The `--stop-timeout` flag sets the timeout (in seconds) that a pre-defined (see `--stop-signal`) system call -signal that will be sent to the container to exit. After timeout elapses the container will be killed with SIGKILL. - -### Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation ` option sets a container's isolation technology. -On Linux, the only supported is the `default` option which uses -Linux namespaces. These two commands are equivalent on Linux: - -```bash -$ docker run -d busybox top -$ docker run -d --isolation default busybox top -``` - -On Windows, `--isolation` can take one of these values: - - -| Value | Description | -|-----------|--------------------------------------------------------------------------------------------| -| `default` | Use the value specified by the Docker daemon's `--exec-opt` or system default (see below). | -| `process` | Shared-kernel namespace isolation (not supported on Windows client operating systems). | -| `hyperv` | Hyper-V hypervisor partition-based isolation. | - -The default isolation on Windows server operating systems is `process`. The default (and only supported) -isolation on Windows client operating systems is `hyperv`. An attempt to start a container on a client -operating system with `--isolation process` will fail. - -On Windows server, assuming the default configuration, these commands are equivalent -and result in `process` isolation: - -```PowerShell -PS C:\> docker run -d microsoft/nanoserver powershell echo process -PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo process -PS C:\> docker run -d --isolation process microsoft/nanoserver powershell echo process -``` - -If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, or -are running against a Windows client-based daemon, these commands are equivalent and -result in `hyperv` isolation: - -```PowerShell -PS C:\> docker run -d microsoft/nanoserver powershell echo hyperv -PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo hyperv -PS C:\> docker run -d --isolation hyperv microsoft/nanoserver powershell echo hyperv -``` - -### Configure namespaced kernel parameters (sysctls) at runtime - -The `--sysctl` sets namespaced kernel parameters (sysctls) in the -container. For example, to turn on IP forwarding in the containers -network namespace, run this command: - -```bash -$ docker run --sysctl net.ipv4.ip_forward=1 someimage -``` - -> **Note**: Not all sysctls are namespaced. Docker does not support changing sysctls -> inside of a container that also modify the host system. As the kernel -> evolves we expect to see more sysctls become namespaced. - -#### Currently supported sysctls - -- `IPC Namespace`: - - ```none - kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced - Sysctls beginning with fs.mqueue.* - ``` - - If you use the `--ipc=host` option these sysctls will not be allowed. - -- `Network Namespace`: - - Sysctls beginning with net.* - - If you use the `--network=host` option using these sysctls will not be allowed. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/save.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/save.md deleted file mode 100644 index cba7385e1..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/save.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "save" -description: "The save command description and usage" -keywords: "tarred, repository, backup" ---- - - - -# save - -```markdown -Usage: docker save [OPTIONS] IMAGE [IMAGE...] - -Save one or more images to a tar archive (streamed to STDOUT by default) - -Options: - --help Print usage - -o, --output string Write to a file, instead of STDOUT -``` - -## Description - -Produces a tarred repository to the standard output stream. -Contains all parent layers, and all tags + versions, or specified `repo:tag`, for -each argument provided. - -## Examples - -### Create a backup that can then be used with `docker load`. - -```bash -$ docker save busybox > busybox.tar - -$ ls -sh busybox.tar - -2.7M busybox.tar - -$ docker save --output busybox.tar busybox - -$ ls -sh busybox.tar - -2.7M busybox.tar - -$ docker save -o fedora-all.tar fedora - -$ docker save -o fedora-latest.tar fedora:latest -``` - -### Cherry-pick particular tags - -You can even cherry-pick particular tags of an image repository. - -```bash -$ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/search.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/search.md deleted file mode 100644 index f645c7860..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/search.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: "search" -description: "The search command description and usage" -keywords: "search, hub, images" ---- - - - -# search - -```markdown -Usage: docker search [OPTIONS] TERM - -Search the Docker Hub for images - -Options: - -f, --filter value Filter output based on conditions provided (default []) - - is-automated=(true|false) - - is-official=(true|false) - - stars= - image has at least 'number' stars - --help Print usage - --limit int Max number of search results (default 25) - --no-trunc Don't truncate output -``` - -## Description - -Search [Docker Hub](https://hub.docker.com) for images - -See [*Find Public Images on Docker Hub*](https://docs.docker.com/engine/tutorials/dockerrepos/#searching-for-images) for -more details on finding shared images from the command line. - -> **Note**: Search queries return a maximum of 25 results. - -## Examples - -### Search images by name - -This example displays images with a name containing 'busybox': - -```none -$ docker search busybox - -NAME DESCRIPTION STARS OFFICIAL AUTOMATED -busybox Busybox base image. 316 [OK] -progrium/busybox 50 [OK] -radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] -odise/busybox-python 2 [OK] -azukiapp/busybox This image is meant to be used as the base... 2 [OK] -ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 [OK] -shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 [OK] -odise/busybox-curl 1 [OK] -ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 [OK] -peelsky/zulu-openjdk-busybox 1 [OK] -skomma/busybox-data Docker image suitable for data volume cont... 1 [OK] -elektritter/busybox-teamspeak Lightweight teamspeak3 container based on... 1 [OK] -socketplane/busybox 1 [OK] -oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 [OK] -ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 [OK] -nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 [OK] -openshift/busybox-http-app 0 [OK] -jllopis/busybox 0 [OK] -swyckoff/busybox 0 [OK] -powellquiring/busybox 0 [OK] -williamyeh/busybox-sh Docker image for BusyBox's sh 0 [OK] -simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 [OK] -fhisamoto/busybox-java Busybox java 0 [OK] -scottabernethy/busybox 0 [OK] -marclop/busybox-solr -``` - -### Display non-truncated description (--no-trunc) - -This example displays images with a name containing 'busybox', -at least 3 stars and the description isn't truncated in the output: - -```bash -$ docker search --stars=3 --no-trunc busybox -NAME DESCRIPTION STARS OFFICIAL AUTOMATED -busybox Busybox base image. 325 [OK] -progrium/busybox 50 [OK] -radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK] -``` - -### Limit search results (--limit) - -The flag `--limit` is the maximum number of results returned by a search. This value could -be in the range between 1 and 100. The default value of `--limit` is 25. - - -### Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more -than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* stars (int - number of stars the image has) -* is-automated (true|false) - is the image automated or not -* is-official (true|false) - is the image official or not - - -#### stars - -This example displays images with a name containing 'busybox' and at -least 3 stars: - -```bash -$ docker search --filter stars=3 busybox - -NAME DESCRIPTION STARS OFFICIAL AUTOMATED -busybox Busybox base image. 325 [OK] -progrium/busybox 50 [OK] -radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] -``` - - -#### is-automated - -This example displays images with a name containing 'busybox' -and are automated builds: - -```bash -$ docker search --filter is-automated busybox - -NAME DESCRIPTION STARS OFFICIAL AUTOMATED -progrium/busybox 50 [OK] -radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] -``` - -#### is-official - -This example displays images with a name containing 'busybox', at least -3 stars and are official builds: - -```bash -$ docker search --filter "is-official=true" --filter "stars=3" busybox - -NAME DESCRIPTION STARS OFFICIAL AUTOMATED -progrium/busybox 50 [OK] -radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret.md deleted file mode 100644 index 50734407a..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "secret" -description: "The secret command description and usage" -keywords: "secret" ---- - - - -# secret - -```markdown -Usage: docker secret COMMAND - -Manage Docker secrets - -Options: - --help Print usage - -Commands: - create Create a secret from a file or STDIN as content - inspect Display detailed information on one or more secrets - ls List secrets - rm Remove one or more secrets - -Run 'docker secret COMMAND --help' for more information on a command. - -``` - -## Description - -Manage secrets. - -## Related commands - -* [secret create](secret_create.md) -* [secret inspect](secret_inspect.md) -* [secret list](secret_list.md) -* [secret rm](secret_rm.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md deleted file mode 100644 index e534dde55..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: "secret create" -description: "The secret create command description and usage" -keywords: ["secret, create"] ---- - - - -# secret create - -```Markdown -Usage: docker secret create [OPTIONS] SECRET file|- - -Create a secret from a file or STDIN as content - -Options: - --help Print usage - -l, --label list Secret labels (default []) -``` - -## Description - -Creates a secret using standard input or from a file for the secret content. You must run this command on a manager node. - -For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). - -## Examples - -### Create a secret - -```bash -$ echo | docker secret create my_secret - - -onakdyv307se2tl7nl20anokv - -$ docker secret ls - -ID NAME CREATED UPDATED -onakdyv307se2tl7nl20anokv my_secret 6 seconds ago 6 seconds ago -``` - -### Create a secret with a file - -```bash -$ docker secret create my_secret ./secret.json - -dg426haahpi5ezmkkj5kyl3sn - -$ docker secret ls - -ID NAME CREATED UPDATED -dg426haahpi5ezmkkj5kyl3sn my_secret 7 seconds ago 7 seconds ago -``` - -### Create a secret with labels - -```bash -$ docker secret create --label env=dev \ - --label rev=20170324 \ - my_secret ./secret.json - -eo7jnzguqgtpdah3cm5srfb97 -``` - -```none -$ docker secret inspect my_secret - -[ - { - "ID": "eo7jnzguqgtpdah3cm5srfb97", - "Version": { - "Index": 17 - }, - "CreatedAt": "2017-03-24T08:15:09.735271783Z", - "UpdatedAt": "2017-03-24T08:15:09.735271783Z", - "Spec": { - "Name": "my_secret", - "Labels": { - "env": "dev", - "rev": "20170324" - } - } - } -] -``` - - -## Related commands - -* [secret inspect](secret_inspect.md) -* [secret ls](secret_ls.md) -* [secret rm](secret_rm.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md deleted file mode 100644 index cecf3c1dd..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: "secret inspect" -description: "The secret inspect command description and usage" -keywords: ["secret, inspect"] ---- - - - -# secret inspect - -```Markdown -Usage: docker secret inspect [OPTIONS] SECRET [SECRET...] - -Display detailed information on one or more secrets - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -## Description - -Inspects the specified secret. This command has to be run targeting a manager -node. - -By default, this renders all results in a JSON array. If a format is specified, -the given template will be executed for each result. - -Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). - -## Examples - -### Inspect a secret by name or ID - -You can inspect a secret, either by its *name*, or *ID* - -For example, given the following secret: - -```bash -$ docker secret ls - -ID NAME CREATED UPDATED -eo7jnzguqgtpdah3cm5srfb97 my_secret 3 minutes ago 3 minutes ago -``` - -```none -$ docker secret inspect secret.json - -[ - { - "ID": "eo7jnzguqgtpdah3cm5srfb97", - "Version": { - "Index": 17 - }, - "CreatedAt": "2017-03-24T08:15:09.735271783Z", - "UpdatedAt": "2017-03-24T08:15:09.735271783Z", - "Spec": { - "Name": "my_secret", - "Labels": { - "env": "dev", - "rev": "20170324" - } - } - } -] -``` - -### Formatting - -You can use the --format option to obtain specific information about a -secret. The following example command outputs the creation time of the -secret. - -```bash -$ docker secret inspect --format='{{.CreatedAt}}' eo7jnzguqgtpdah3cm5srfb97 - -2017-03-24 08:15:09.735271783 +0000 UTC -``` - - -## Related commands - -* [secret create](secret_create.md) -* [secret ls](secret_ls.md) -* [secret rm](secret_rm.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md deleted file mode 100644 index 9b60227b8..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: "secret ls" -description: "The secret ls command description and usage" -keywords: ["secret, ls"] ---- - - - -# secret ls - -```Markdown -Usage: docker secret ls [OPTIONS] - -List secrets - -Aliases: - ls, list - -Options: - -f, --filter filter Filter output based on conditions provided - --format string Pretty-print secrets using a Go template - --help Print usage - -q, --quiet Only display IDs -``` - -## Description - -Run this command on a manager node to list the secrets in the swarm. - -For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). - -## Examples - -```bash -$ docker secret ls - -ID NAME CREATED UPDATED -6697bflskwj1998km1gnnjr38 q5s5570vtvnimefos1fyeo2u2 6 weeks ago 6 weeks ago -9u9hk4br2ej0wgngkga6rp4hq my_secret 5 weeks ago 5 weeks ago -mem02h8n73mybpgqjf0kfi1n0 test_secret 3 seconds ago 3 seconds ago -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* [id](secret_ls.md#id) (secret's ID) -* [label](secret_ls.md#label) (`label=` or `label==`) -* [name](secret_ls.md#name) (secret's name) - -#### id - -The `id` filter matches all or prefix of a secret's id. - -```bash -$ docker secret ls -f "id=6697bflskwj1998km1gnnjr38" - -ID NAME CREATED UPDATED -6697bflskwj1998km1gnnjr38 q5s5570vtvnimefos1fyeo2u2 6 weeks ago 6 weeks ago -``` - -#### label - -The `label` filter matches secrets based on the presence of a `label` alone or -a `label` and a value. - -The following filter matches all secrets with a `project` label regardless of -its value: - -```bash -$ docker secret ls --filter label=project - -ID NAME CREATED UPDATED -mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago -``` - -The following filter matches only services with the `project` label with the -`project-a` value. - -```bash -$ docker service ls --filter label=project=test - -ID NAME CREATED UPDATED -mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago -``` - -#### name - -The `name` filter matches on all or prefix of a secret's name. - -The following filter matches secret with a name containing a prefix of `test`. - -```bash -$ docker secret ls --filter name=test_secret - -ID NAME CREATED UPDATED -mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago -``` - -### Format the output - -The formatting option (`--format`) pretty prints secrets output -using a Go template. - -Valid placeholders for the Go template are listed below: - -| Placeholder | Description | -| ------------ | ------------------------------------------------------------------------------------ | -| `.ID` | Secret ID | -| `.Name` | Secret name | -| `.CreatedAt` | Time when the secret was created | -| `.UpdatedAt` | Time when the secret was updated | -| `.Labels` | All labels assigned to the secret | -| `.Label` | Value of a specific label for this secret. For example `{{.Label "secret.ssh.key"}}` | - -When using the `--format` option, the `secret ls` command will either -output the data exactly as the template declares or, when using the -`table` directive, will include column headers as well. - -The following example uses a template without headers and outputs the -`ID` and `Name` entries separated by a colon for all images: - -```bash -$ docker secret ls --format "{{.ID}}: {{.Name}}" - -77af4d6b9913: secret-1 -b6fa739cedf5: secret-2 -78a85c484f71: secret-3 -``` - -To list all secrets with their name and created date in a table format you -can use: - -```bash -$ docker secret ls --format "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}" - -ID NAME CREATED -77af4d6b9913 secret-1 5 minutes ago -b6fa739cedf5 secret-2 3 hours ago -78a85c484f71 secret-3 10 days ago -``` - -## Related commands - -* [secret create](secret_create.md) -* [secret inspect](secret_inspect.md) -* [secret rm](secret_rm.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md deleted file mode 100644 index 1e10350f9..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "secret rm" -description: "The secret rm command description and usage" -keywords: ["secret, rm"] ---- - - - -# secret rm - -```Markdown -Usage: docker secret rm SECRET [SECRET...] - -Remove one or more secrets - -Aliases: - rm, remove - -Options: - --help Print usage -``` - -## Description - -Removes the specified secrets from the swarm. This command has to be run -targeting a manager node. - -For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). - -## Examples - -This example removes a secret: - -```bash -$ docker secret rm secret.json -sapth4csdo5b6wz2p5uimh5xg -``` - -> **Warning**: Unlike `docker rm`, this command does not ask for confirmation -> before removing a secret. - - -## Related commands - -* [secret create](secret_create.md) -* [secret inspect](secret_inspect.md) -* [secret ls](secret_ls.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/service.md deleted file mode 100644 index 7ae0224ac..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "service" -description: "The service command description and usage" -keywords: "service" ---- - - - -# service - -```markdown -Usage: docker service COMMAND - -Manage services - -Options: - --help Print usage - -Commands: - create Create a new service - inspect Display detailed information on one or more services - logs Fetch the logs of a service - ls List services - ps List the tasks of one or more services - rm Remove one or more services - scale Scale one or multiple replicated services - update Update a service - -Run 'docker service COMMAND --help' for more information on a command. -``` - -## Description - -Manage services. - diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md deleted file mode 100644 index 082dffb82..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md +++ /dev/null @@ -1,846 +0,0 @@ ---- -title: "service create" -description: "The service create command description and usage" -keywords: "service, create" ---- - - - -# service create - -```Markdown -Usage: docker service create [OPTIONS] IMAGE [COMMAND] [ARG...] - -Create a new service - -Options: - --constraint list Placement constraints - --container-label list Container labels - -d, --detach Exit immediately instead of waiting for the service to converge (default true) - --dns list Set custom DNS servers - --dns-option list Set DNS options - --dns-search list Set custom DNS search domains - --endpoint-mode string Endpoint mode (vip or dnsrr) (default "vip") - --entrypoint command Overwrite the default ENTRYPOINT of the image - -e, --env list Set environment variables - --env-file list Read in a file of environment variables - --group list Set one or more supplementary user groups for the container - --health-cmd string Command to run to check health - --health-interval duration Time between running the check (ns|us|ms|s|m|h) - --health-retries int Consecutive failures needed to report unhealthy - --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) - --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) - --help Print usage - --host list Set one or more custom host-to-IP mappings (host:ip) - --hostname string Container hostname - -l, --label list Service labels - --limit-cpu decimal Limit CPUs - --limit-memory bytes Limit Memory - --log-driver string Logging driver for service - --log-opt list Logging driver options - --mode string Service mode (replicated or global) (default "replicated") - --mount mount Attach a filesystem mount to the service - --name string Service name - --network list Network attachments - --no-healthcheck Disable any container-specified HEALTHCHECK - --placement-pref pref Add a placement preference - -p, --publish port Publish a port as a node port - -q, --quiet Suppress progress output - --read-only Mount the container's root filesystem as read only - --replicas uint Number of tasks - --reserve-cpu decimal Reserve CPUs - --reserve-memory bytes Reserve Memory - --restart-condition string Restart when condition is met ("none"|"on-failure"|"any") (default "any") - --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) (default 5s) - --restart-max-attempts uint Maximum number of restarts before giving up - --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) - --rollback-delay duration Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s) - --rollback-failure-action string Action on rollback failure ("pause"|"continue") (default "pause") - --rollback-max-failure-ratio float Failure rate to tolerate during a rollback (default 0) - --rollback-monitor duration Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) (default 5s) - --rollback-order string Rollback order ("start-first"|"stop-first") (default "stop-first") - --rollback-parallelism uint Maximum number of tasks rolled back simultaneously (0 to roll back all at once) (default 1) - --secret secret Specify secrets to expose to the service - --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) (default 10s) - --stop-signal string Signal to stop the container - -t, --tty Allocate a pseudo-TTY - --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) - --update-failure-action string Action on update failure ("pause"|"continue"|"rollback") (default "pause") - --update-max-failure-ratio float Failure rate to tolerate during an update (default 0) - --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 5s) - --update-order string Update order ("start-first"|"stop-first") (default "stop-first") - --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) - -u, --user string Username or UID (format: [:]) - --with-registry-auth Send registry authentication details to swarm agents - -w, --workdir string Working directory inside the container -``` - -## Description - -Creates a service as described by the specified parameters. You must run this -command on a manager node. - -## Examples - -### Create a service - -```bash -$ docker service create --name redis redis:3.0.6 - -dmu1ept4cxcfe8k8lhtux3ro3 - -$ docker service create --mode global --name redis2 redis:3.0.6 - -a8q9dasaafudfs8q8w32udass - -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -dmu1ept4cxcf redis replicated 1/1 redis:3.0.6 -a8q9dasaafud redis2 global 1/1 redis:3.0.6 -``` - -### Create a service with 5 replica tasks (--replicas) - -Use the `--replicas` flag to set the number of replica tasks for a replicated -service. The following command creates a `redis` service with `5` replica tasks: - -```bash -$ docker service create --name redis --replicas=5 redis:3.0.6 - -4cdgfyky7ozwh3htjfw0d12qv -``` - -The above command sets the *desired* number of tasks for the service. Even -though the command returns immediately, actual scaling of the service may take -some time. The `REPLICAS` column shows both the *actual* and *desired* number -of replica tasks for the service. - -In the following example the desired state is `5` replicas, but the current -number of `RUNNING` tasks is `3`: - -```bash -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -4cdgfyky7ozw redis replicated 3/5 redis:3.0.7 -``` - -Once all the tasks are created and `RUNNING`, the actual number of tasks is -equal to the desired number: - -```bash -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -4cdgfyky7ozw redis replicated 5/5 redis:3.0.7 -``` - -### Create a service with secrets - -Use the `--secret` flag to give a container access to a -[secret](secret_create.md). - -Create a service specifying a secret: - -```bash -$ docker service create --name redis --secret secret.json redis:3.0.6 - -4cdgfyky7ozwh3htjfw0d12qv -``` - -Create a service specifying the secret, target, user/group ID and mode: - -```bash -$ docker service create --name redis \ - --secret source=ssh-key,target=ssh \ - --secret source=app-key,target=app,uid=1000,gid=1001,mode=0400 \ - redis:3.0.6 - -4cdgfyky7ozwh3htjfw0d12qv -``` - -Secrets are located in `/run/secrets` in the container. If no target is -specified, the name of the secret will be used as the in memory file in the -container. If a target is specified, that will be the filename. In the -example above, two files will be created: `/run/secrets/ssh` and -`/run/secrets/app` for each of the secret targets specified. - -### Create a service with a rolling update policy - -```bash -$ docker service create \ - --replicas 10 \ - --name redis \ - --update-delay 10s \ - --update-parallelism 2 \ - redis:3.0.6 -``` - -When you run a [service update](service_update.md), the scheduler updates a -maximum of 2 tasks at a time, with `10s` between updates. For more information, -refer to the [rolling updates -tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/rolling-update/). - -### Set environment variables (-e, --env) - -This sets environmental variables for all tasks in a service. For example: - -```bash -$ docker service create --name redis_2 --replicas 5 --env MYVAR=foo redis:3.0.6 -``` - -### Create a service with specific hostname (--hostname) - -This option sets the docker service containers hostname to a specific string. -For example: - -```bash -$ docker service create --name redis --hostname myredis redis:3.0.6 -``` - -### Set metadata on a service (-l, --label) - -A label is a `key=value` pair that applies metadata to a service. To label a -service with two labels: - -```bash -$ docker service create \ - --name redis_2 \ - --label com.example.foo="bar" - --label bar=baz \ - redis:3.0.6 -``` - -For more information about labels, refer to [apply custom -metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). - -### Add bind-mounts or volumes - -Docker supports two different kinds of mounts, which allow containers to read to -or write from files or directories on other containers or the host operating -system. These types are _data volumes_ (often referred to simply as volumes) and -_bind-mounts_. - -Additionally, Docker supports `tmpfs` mounts. - -A **bind-mount** makes a file or directory on the host available to the -container it is mounted within. A bind-mount may be either read-only or -read-write. For example, a container might share its host's DNS information by -means of a bind-mount of the host's `/etc/resolv.conf` or a container might -write logs to its host's `/var/log/myContainerLogs` directory. If you use -bind-mounts and your host and containers have different notions of permissions, -access controls, or other such details, you will run into portability issues. - -A **named volume** is a mechanism for decoupling persistent data needed by your -container from the image used to create the container and from the host machine. -Named volumes are created and managed by Docker, and a named volume persists -even when no container is currently using it. Data in named volumes can be -shared between a container and the host machine, as well as between multiple -containers. Docker uses a _volume driver_ to create, manage, and mount volumes. -You can back up or restore volumes using Docker commands. - -A **tmpfs** mounts a tmpfs inside a container for volatile data. - -Consider a situation where your image starts a lightweight web server. You could -use that image as a base image, copy in your website's HTML files, and package -that into another image. Each time your website changed, you'd need to update -the new image and redeploy all of the containers serving your website. A better -solution is to store the website in a named volume which is attached to each of -your web server containers when they start. To update the website, you just -update the named volume. - -For more information about named volumes, see -[Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/). - -The following table describes options which apply to both bind-mounts and named -volumes in a service: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
OptionRequiredDescription
types -

The type of mount, can be either volume, bind, or tmpfs. Defaults to volume if no type is specified. -

    -
  • volume: mounts a [managed volume](volume_create.md) into the container.
  • -
  • bind: bind-mounts a directory or file from the host into the container.
  • -
  • tmpfs: mount a tmpfs in the container
  • -

-
src or sourcefor type=bind only> -
    -
  • - type=volume: src is an optional way to specify the name of the volume (for example, src=my-volume). - If the named volume does not exist, it is automatically created. If no src is specified, the volume is - assigned a random name which is guaranteed to be unique on the host, but may not be unique cluster-wide. - A randomly-named volume has the same lifecycle as its container and is destroyed when the container - is destroyed (which is upon service update, or when scaling or re-balancing the service) -
  • -
  • - type=bind: src is required, and specifies an absolute path to the file or directory to bind-mount - (for example, src=/path/on/host/). An error is produced if the file or directory does not exist. -
  • -
  • - type=tmpfs: src is not supported. -
  • -
-

dst or destination or target

yes -

Mount path inside the container, for example /some/path/in/container/. - If the path does not exist in the container's filesystem, the Engine creates - a directory at the specified location before mounting the volume or bind-mount.

-

readonly or ro

-

The Engine mounts binds and volumes read-write unless readonly option - is given when mounting the bind or volume. -

    -
  • true or 1 or no value: Mounts the bind or volume read-only.
  • -
  • false or 0: Mounts the bind or volume read-write.
  • -

-
consistency -

The consistency requirements for the mount; one of -

    -
  • default: Equivalent to consistent.
  • -
  • consistent: Full consistency. The container runtime and the host maintain an identical view of the mount at all times.
  • -
  • cached: The host's view of the mount is authoritative. There may be delays before updates made on the host are visible within a container.
  • -
  • delegated: The container runtime's view of the mount is authoritative. There may be delays before updates made in a container are are visible on the host.
  • -
-

-
- -#### Bind Propagation - -Bind propagation refers to whether or not mounts created within a given -bind-mount or named volume can be propagated to replicas of that mount. Consider -a mount point `/mnt`, which is also mounted on `/tmp`. The propation settings -control whether a mount on `/tmp/a` would also be available on `/mnt/a`. Each -propagation setting has a recursive counterpoint. In the case of recursion, -consider that `/tmp/a` is also mounted as `/foo`. The propagation settings -control whether `/mnt/a` and/or `/tmp/a` would exist. - -The `bind-propagation` option defaults to `rprivate` for both bind-mounts and -volume mounts, and is only configurable for bind-mounts. In other words, named -volumes do not support bind propagation. - -- **`shared`**: Sub-mounts of the original mount are exposed to replica mounts, - and sub-mounts of replica mounts are also propagated to the - original mount. -- **`slave`**: similar to a shared mount, but only in one direction. If the - original mount exposes a sub-mount, the replica mount can see it. - However, if the replica mount exposes a sub-mount, the original - mount cannot see it. -- **`private`**: The mount is private. Sub-mounts within it are not exposed to - replica mounts, and sub-mounts of replica mounts are not - exposed to the original mount. -- **`rshared`**: The same as shared, but the propagation also extends to and from - mount points nested within any of the original or replica mount - points. -- **`rslave`**: The same as `slave`, but the propagation also extends to and from - mount points nested within any of the original or replica mount - points. -- **`rprivate`**: The default. The same as `private`, meaning that no mount points - anywhere within the original or replica mount points propagate - in either direction. - -For more information about bind propagation, see the -[Linux kernel documentation for shared subtree](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). - -#### Options for Named Volumes - -The following options can only be used for named volumes (`type=volume`); - - - - - - - - - - - - - - - - - - - - - - - -
OptionDescription
volume-driver -

Name of the volume-driver plugin to use for the volume. Defaults to - "local", to use the local volume driver to create the volume if the - volume does not exist.

-
volume-label - One or more custom metadata ("labels") to apply to the volume upon - creation. For example, - `volume-label=mylabel=hello-world,my-other-label=hello-mars`. For more - information about labels, refer to - apply custom metadata. -
volume-nocopy - By default, if you attach an empty volume to a container, and files or - directories already existed at the mount-path in the container (dst), - the Engine copies those files and directories into the volume, allowing - the host to access them. Set `volume-nocopy` to disables copying files - from the container's filesystem to the volume and mount the empty volume. - - A value is optional: - -
    -
  • true or 1: Default if you do not provide a value. Disables copying.
  • -
  • false or 0: Enables copying.
  • -
-
volume-opt - Options specific to a given volume driver, which will be passed to the - driver when creating the volume. Options are provided as a comma-separated - list of key/value pairs, for example, - volume-opt=some-option=some-value,volume-opt=some-other-option=some-other-value. - For available options for a given driver, refer to that driver's - documentation. -
- - -#### Options for tmpfs - -The following options can only be used for tmpfs mounts (`type=tmpfs`); - - - - - - - - - - - - - - - -
OptionDescription
tmpfs-sizeSize of the tmpfs mount in bytes. Unlimited by default in Linux.
tmpfs-modeFile mode of the tmpfs in octal. (e.g. "700" or "0700".) Defaults to "1777" in Linux.
- - -#### Differences between "--mount" and "--volume" - -The `--mount` flag supports most options that are supported by the `-v` -or `--volume` flag for `docker run`, with some important exceptions: - -- The `--mount` flag allows you to specify a volume driver and volume driver - options *per volume*, without creating the volumes in advance. In contrast, - `docker run` allows you to specify a single volume driver which is shared - by all volumes, using the `--volume-driver` flag. - -- The `--mount` flag allows you to specify custom metadata ("labels") for a volume, - before the volume is created. - -- When you use `--mount` with `type=bind`, the host-path must refer to an *existing* - path on the host. The path will not be created for you and the service will fail - with an error if the path does not exist. - -- The `--mount` flag does not allow you to relabel a volume with `Z` or `z` flags, - which are used for `selinux` labeling. - -#### Create a service using a named volume - -The following example creates a service that uses a named volume: - -```bash -$ docker service create \ - --name my-service \ - --replicas 3 \ - --mount type=volume,source=my-volume,destination=/path/in/container,volume-label="color=red",volume-label="shape=round" \ - nginx:alpine -``` - -For each replica of the service, the engine requests a volume named "my-volume" -from the default ("local") volume driver where the task is deployed. If the -volume does not exist, the engine creates a new volume and applies the "color" -and "shape" labels. - -When the task is started, the volume is mounted on `/path/in/container/` inside -the container. - -Be aware that the default ("local") volume is a locally scoped volume driver. -This means that depending on where a task is deployed, either that task gets a -*new* volume named "my-volume", or shares the same "my-volume" with other tasks -of the same service. Multiple containers writing to a single shared volume can -cause data corruption if the software running inside the container is not -designed to handle concurrent processes writing to the same location. Also take -into account that containers can be re-scheduled by the Swarm orchestrator and -be deployed on a different node. - -#### Create a service that uses an anonymous volume - -The following command creates a service with three replicas with an anonymous -volume on `/path/in/container`: - -```bash -$ docker service create \ - --name my-service \ - --replicas 3 \ - --mount type=volume,destination=/path/in/container \ - nginx:alpine -``` - -In this example, no name (`source`) is specified for the volume, so a new volume -is created for each task. This guarantees that each task gets its own volume, -and volumes are not shared between tasks. Anonymous volumes are removed after -the task using them is complete. - -#### Create a service that uses a bind-mounted host directory - -The following example bind-mounts a host directory at `/path/in/container` in -the containers backing the service: - -```bash -$ docker service create \ - --name my-service \ - --mount type=bind,source=/path/on/host,destination=/path/in/container \ - nginx:alpine -``` - -### Set service mode (--mode) - -The service mode determines whether this is a _replicated_ service or a _global_ -service. A replicated service runs as many tasks as specified, while a global -service runs on each active node in the swarm. - -The following command creates a global service: - -```bash -$ docker service create \ - --name redis_2 \ - --mode global \ - redis:3.0.6 -``` - -### Specify service constraints (--constraint) - -You can limit the set of nodes where a task can be scheduled by defining -constraint expressions. Multiple constraints find nodes that satisfy every -expression (AND match). Constraints can match node or Docker Engine labels as -follows: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
node attributematchesexample
node.idNode IDnode.id == 2ivku8v2gvtg4
node.hostnameNode hostnamenode.hostname != node-2
node.roleNode rolenode.role == manager
node.labelsuser defined node labelsnode.labels.security == high
engine.labelsDocker Engine's labelsengine.labels.operatingsystem == ubuntu 14.04
- - -`engine.labels` apply to Docker Engine labels like operating system, -drivers, etc. Swarm administrators add `node.labels` for operational purposes by -using the [`docker node update`](node_update.md) command. - -For example, the following limits tasks for the redis service to nodes where the -node type label equals queue: - -```bash -$ docker service create \ - --name redis_2 \ - --constraint 'node.labels.type == queue' \ - redis:3.0.6 -``` - -### Specify service placement preferences (--placement-pref) - -You can set up the service to divide tasks evenly over different categories of -nodes. One example of where this can be useful is to balance tasks over a set -of datacenters or availability zones. The example below illustrates this: - -```bash -$ docker service create \ - --replicas 9 \ - --name redis_2 \ - --placement-pref 'spread=node.labels.datacenter' \ - redis:3.0.6 -``` - -This uses `--placement-pref` with a `spread` strategy (currently the only -supported strategy) to spread tasks evenly over the values of the `datacenter` -node label. In this example, we assume that every node has a `datacenter` node -label attached to it. If there are three different values of this label among -nodes in the swarm, one third of the tasks will be placed on the nodes -associated with each value. This is true even if there are more nodes with one -value than another. For example, consider the following set of nodes: - -- Three nodes with `node.labels.datacenter=east` -- Two nodes with `node.labels.datacenter=south` -- One node with `node.labels.datacenter=west` - -Since we are spreading over the values of the `datacenter` label and the -service has 9 replicas, 3 replicas will end up in each datacenter. There are -three nodes associated with the value `east`, so each one will get one of the -three replicas reserved for this value. There are two nodes with the value -`south`, and the three replicas for this value will be divided between them, -with one receiving two replicas and another receiving just one. Finally, `west` -has a single node that will get all three replicas reserved for `west`. - -If the nodes in one category (for example, those with -`node.labels.datacenter=south`) can't handle their fair share of tasks due to -constraints or resource limitations, the extra tasks will be assigned to other -nodes instead, if possible. - -Both engine labels and node labels are supported by placement preferences. The -example above uses a node label, because the label is referenced with -`node.labels.datacenter`. To spread over the values of an engine label, use -`--placement-pref spread=engine.labels.`. - -It is possible to add multiple placement preferences to a service. This -establishes a hierarchy of preferences, so that tasks are first divided over -one category, and then further divided over additional categories. One example -of where this may be useful is dividing tasks fairly between datacenters, and -then splitting the tasks within each datacenter over a choice of racks. To add -multiple placement preferences, specify the `--placement-pref` flag multiple -times. The order is significant, and the placement preferences will be applied -in the order given when making scheduling decisions. - -The following example sets up a service with multiple placement preferences. -Tasks are spread first over the various datacenters, and then over racks -(as indicated by the respective labels): - -```bash -$ docker service create \ - --replicas 9 \ - --name redis_2 \ - --placement-pref 'spread=node.labels.datacenter' \ - --placement-pref 'spread=node.labels.rack' \ - redis:3.0.6 -``` - -When updating a service with `docker service update`, `--placement-pref-add` -appends a new placement preference after all existing placement preferences. -`--placement-pref-rm` removes an existing placement preference that matches the -argument. - -### Attach a service to an existing network (--network) - -You can use overlay networks to connect one or more services within the swarm. - -First, create an overlay network on a manager node the docker network create -command: - -```bash -$ docker network create --driver overlay my-network - -etjpu59cykrptrgw0z0hk5snf -``` - -After you create an overlay network in swarm mode, all manager nodes have -access to the network. - -When you create a service and pass the --network flag to attach the service to -the overlay network: - -```bash -$ docker service create \ - --replicas 3 \ - --network my-network \ - --name my-web \ - nginx - -716thylsndqma81j6kkkb5aus -``` - -The swarm extends my-network to each node running the service. - -Containers on the same network can access each other using -[service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery). - -### Publish service ports externally to the swarm (-p, --publish) - -You can publish service ports to make them available externally to the swarm -using the `--publish` flag: - -```bash -$ docker service create --publish : nginx -``` - -For example: - -```bash -$ docker service create --name my_web --replicas 3 --publish 8080:80 nginx -``` - -When you publish a service port, the swarm routing mesh makes the service -accessible at the target port on every node regardless if there is a task for -the service running on the node. For more information refer to -[Use swarm mode routing mesh](https://docs.docker.com/engine/swarm/ingress/). - -### Publish a port for TCP only or UDP only - -By default, when you publish a port, it is a TCP port. You can -specifically publish a UDP port instead of or in addition to a TCP port. When -you publish both TCP and UDP ports, Docker 1.12.2 and earlier require you to -add the suffix `/tcp` for TCP ports. Otherwise it is optional. - -#### TCP only - -The following two commands are equivalent. - -```bash -$ docker service create --name dns-cache -p 53:53 dns-cache - -$ docker service create --name dns-cache -p 53:53/tcp dns-cache -``` - -#### TCP and UDP - -```bash -$ docker service create --name dns-cache -p 53:53/tcp -p 53:53/udp dns-cache -``` - -#### UDP only - -```bash -$ docker service create --name dns-cache -p 53:53/udp dns-cache -``` - -### Create services using templates - -You can use templates for some flags of `service create`, using the syntax -provided by the Go's [text/template](http://golang.org/pkg/text/template/) package. - -The supported flags are the following : - -- `--hostname` -- `--mount` -- `--env` - -Valid placeholders for the Go template are listed below: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PlaceholderDescription
.Service.IDService ID
.Service.NameService name
.Service.LabelsService labels
.Node.IDNode ID
.Task.IDTask ID
.Task.NameTask name
.Task.SlotTask slot
- - -#### Template example - -In this example, we are going to set the template of the created containers based on the -service's name and the node's ID where it sits. - -```bash -$ docker service create --name hosttempl \ - --hostname="{{.Node.ID}}-{{.Service.Name}}"\ - busybox top - -va8ew30grofhjoychbr6iot8c - -$ docker service ps va8ew30grofhjoychbr6iot8c - -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -wo41w8hg8qan hosttempl.1 busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 2e7a8a9c4da2 Running Running about a minute ago - -$ docker inspect --format="{{.Config.Hostname}}" hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj - -x3ti0erg11rjpg64m75kej2mz-hosttempl -``` - -## Related commands - -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) -* [service ps](service_ps.md) -* [service update](service_update.md) - - diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md deleted file mode 100644 index 24c593cec..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -title: "service inspect" -description: "The service inspect command description and usage" -keywords: "service, inspect" ---- - - - -# service inspect - -```Markdown -Usage: docker service inspect [OPTIONS] SERVICE [SERVICE...] - -Display detailed information on one or more services - -Options: - -f, --format string Format the output using the given Go template - --help Print usage - --pretty Print the information in a human friendly format -``` - -## Description - -Inspects the specified service. This command has to be run targeting a manager -node. - -By default, this renders all results in a JSON array. If a format is specified, -the given template will be executed for each result. - -Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -## Examples - -### Inspect a service by name or ID - -You can inspect a service, either by its *name*, or *ID* - -For example, given the following service; - -```bash -$ docker service ls -ID NAME MODE REPLICAS IMAGE -dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 -``` - -Both `docker service inspect redis`, and `docker service inspect dmu1ept4cxcf` -produce the same result: - -```none -$ docker service inspect redis - -[ - { - "ID": "dmu1ept4cxcfe8k8lhtux3ro3", - "Version": { - "Index": 12 - }, - "CreatedAt": "2016-06-17T18:44:02.558012087Z", - "UpdatedAt": "2016-06-17T18:44:02.558012087Z", - "Spec": { - "Name": "redis", - "TaskTemplate": { - "ContainerSpec": { - "Image": "redis:3.0.6" - }, - "Resources": { - "Limits": {}, - "Reservations": {} - }, - "RestartPolicy": { - "Condition": "any", - "MaxAttempts": 0 - }, - "Placement": {} - }, - "Mode": { - "Replicated": { - "Replicas": 1 - } - }, - "UpdateConfig": {}, - "EndpointSpec": { - "Mode": "vip" - } - }, - "Endpoint": { - "Spec": {} - } - } -] -``` - -```bash -$ docker service inspect dmu1ept4cxcf - -[ - { - "ID": "dmu1ept4cxcfe8k8lhtux3ro3", - "Version": { - "Index": 12 - }, - ... - } -] -``` - -### Formatting - -You can print the inspect output in a human-readable format instead of the default -JSON output, by using the `--pretty` option: - -```bash -$ docker service inspect --pretty frontend - -ID: c8wgl7q4ndfd52ni6qftkvnnp -Name: frontend -Labels: - - org.example.projectname=demo-app -Service Mode: REPLICATED - Replicas: 5 -Placement: -UpdateConfig: - Parallelism: 0 - On failure: pause - Max failure ratio: 0 -ContainerSpec: - Image: nginx:alpine -Resources: -Networks: net1 -Endpoint Mode: vip -Ports: - PublishedPort = 4443 - Protocol = tcp - TargetPort = 443 - PublishMode = ingress -``` - -You can also use `--format pretty` for the same effect. - - -#### Find the number of tasks running as part of a service - -The `--format` option can be used to obtain specific information about a -service. For example, the following command outputs the number of replicas -of the "redis" service. - -```bash -$ docker service inspect --format='{{.Spec.Mode.Replicated.Replicas}}' redis - -10 -``` - - -## Related commands - -* [service create](service_create.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) -* [service ps](service_ps.md) -* [service update](service_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md deleted file mode 100644 index fd328d0f6..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "service logs" -description: "The service logs command description and usage" -keywords: "service, task, logs" ---- - - - -# service logs - -```Markdown -Usage: docker service logs [OPTIONS] SERVICE|TASK - -Fetch the logs of a service or task - -Options: - -f, --follow Follow log output - --help Print usage - --no-resolve Do not map IDs to Names in output - --no-task-ids Do not include task IDs in output - --no-trunc Do not truncate output - --since string Show logs since timestamp - --tail string Number of lines to show from the end of the logs (default "all") - -t, --timestamps Show timestamps -``` - -## Description - -The `docker service logs` command batch-retrieves logs present at the time of execution. - -The `docker service logs` command can be used with either the name or ID of a -service, or with the ID of a task. If a service is passed, it will display logs -for all of the containers in that service. If a task is passed, it will only -display logs from that particular task. - -> **Note**: This command is only functional for services that are started with -> the `json-file` or `journald` logging driver. - -For more information about selecting and configuring logging drivers, refer to -[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). - -The `docker service logs --follow` command will continue streaming the new output from -the service's `STDOUT` and `STDERR`. - -Passing a negative number or a non-integer to `--tail` is invalid and the -value is set to `all` in that case. - -The `docker service logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) -, for example `2014-09-16T06:17:46.000000000Z`, to each -log entry. To ensure that the timestamps are aligned the -nano-second part of the timestamp will be padded with zero when necessary. - -The `docker service logs --details` command will add on extra attributes, such as -environment variables and labels, provided to `--log-opt` when creating the -service. - -The `--since` option shows only the service logs generated after -a given date. You can specify the date as an RFC 3339 date, a UNIX -timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date -format you may also use RFC3339Nano, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the client will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. You can combine the -`--since` option with either or both of the `--follow` or `--tail` options. - -## Related commands - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service ls](service_ls.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) -* [service ps](service_ps.md) -* [service update](service_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md deleted file mode 100644 index c222c0485..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: "service ls" -description: "The service ls command description and usage" -keywords: "service, ls" ---- - - - -# service ls - -```Markdown -Usage: docker service ls [OPTIONS] - -List services - -Aliases: - ls, list - -Options: - -f, --filter filter Filter output based on conditions provided - --format string Pretty-print services using a Go template - --help Print usage - -q, --quiet Only display IDs -``` - -## Description - -This command when run targeting a manager, lists services are running in the -swarm. - -## Examples - -On a manager node: - -```bash -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -c8wgl7q4ndfd frontend replicated 5/5 nginx:alpine -dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 -iwe3278osahj mongo global 7/7 mongo:3.3 -``` - -The `REPLICAS` column shows both the *actual* and *desired* number of tasks for -the service. - -### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* [id](service_ls.md#id) -* [label](service_ls.md#label) -* [mode](service_ls.md#mode) -* [name](service_ls.md#name) - -#### id - -The `id` filter matches all or part of a service's id. - -```bash -$ docker service ls -f "id=0bcjw" -ID NAME MODE REPLICAS IMAGE -0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 -``` - -#### label - -The `label` filter matches services based on the presence of a `label` alone or -a `label` and a value. - -The following filter matches all services with a `project` label regardless of -its value: - -```bash -$ docker service ls --filter label=project -ID NAME MODE REPLICAS IMAGE -01sl1rp6nj5u frontend2 replicated 1/1 nginx:alpine -36xvvwwauej0 frontend replicated 5/5 nginx:alpine -74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 -``` - -The following filter matches only services with the `project` label with the -`project-a` value. - -```bash -$ docker service ls --filter label=project=project-a -ID NAME MODE REPLICAS IMAGE -36xvvwwauej0 frontend replicated 5/5 nginx:alpine -74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 -``` - -#### mode - -The `mode` filter matches on the mode (either `replicated` or `global`) of a service. - -The following filter matches only `global` services. - -```bash -$ docker service ls --filter mode=global -ID NAME MODE REPLICAS IMAGE -w7y0v2yrn620 top global 1/1 busybox -``` - -#### name - -The `name` filter matches on all or part of a service's name. - -The following filter matches services with a name containing `redis`. - -```bash -$ docker service ls --filter name=redis -ID NAME MODE REPLICAS IMAGE -0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 -``` - -### Formatting - -The formatting options (`--format`) pretty-prints services output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description -------------|------------------------------------------------------------------------------------------ -`.ID` | Service ID -`.Name` | Service name -`.Mode` | Service mode (replicated, global) -`.Replicas` | Service replicas -`.Image` | Service image -`.Ports` | Service ports published in ingress mode - -When using the `--format` option, the `service ls` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`ID`, `Mode`, and `Replicas` entries separated by a colon for all services: - -```bash -$ docker service ls --format "{{.ID}}: {{.Mode}} {{.Replicas}}" - -0zmvwuiu3vue: replicated 10/10 -fm6uf97exkul: global 5/5 -``` - -## Related commands - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) -* [service ps](service_ps.md) -* [service update](service_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md deleted file mode 100644 index 51e8604c7..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: "service ps" -description: "The service ps command description and usage" -keywords: "service, tasks, ps" -aliases: ["/engine/reference/commandline/service_tasks/"] ---- - - - -# service ps - -```Markdown -Usage: docker service ps [OPTIONS] SERVICE [SERVICE...] - -List the tasks of one or more services - -Options: - -f, --filter filter Filter output based on conditions provided - --format string Pretty-print tasks using a Go template - --help Print usage - --no-resolve Do not map IDs to Names - --no-trunc Do not truncate output - -q, --quiet Only display task IDs -``` - -## Description - -Lists the tasks that are running as part of the specified services. This command -has to be run targeting a manager node. - -## Examples - -### List the tasks that are part of a service - -The following command shows all the tasks that are part of the `redis` service: - -```bash -$ docker service ps redis - -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -0qihejybwf1x redis.1 redis:3.0.5 manager1 Running Running 8 seconds -bk658fpbex0d redis.2 redis:3.0.5 worker2 Running Running 9 seconds -5ls5s5fldaqg redis.3 redis:3.0.5 worker1 Running Running 9 seconds -8ryt076polmc redis.4 redis:3.0.5 worker1 Running Running 9 seconds -1x0v8yomsncd redis.5 redis:3.0.5 manager1 Running Running 8 seconds -71v7je3el7rr redis.6 redis:3.0.5 worker2 Running Running 9 seconds -4l3zm9b7tfr7 redis.7 redis:3.0.5 worker2 Running Running 9 seconds -9tfpyixiy2i7 redis.8 redis:3.0.5 worker1 Running Running 9 seconds -3w1wu13yupln redis.9 redis:3.0.5 manager1 Running Running 8 seconds -8eaxrb2fqpbn redis.10 redis:3.0.5 manager1 Running Running 8 seconds -``` - -In addition to _running_ tasks, the output also shows the task history. For -example, after updating the service to use the `redis:3.0.6` image, the output -may look like this: - -```bash -$ docker service ps redis - -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -50qe8lfnxaxk redis.1 redis:3.0.6 manager1 Running Running 6 seconds ago -ky2re9oz86r9 \_ redis.1 redis:3.0.5 manager1 Shutdown Shutdown 8 seconds ago -3s46te2nzl4i redis.2 redis:3.0.6 worker2 Running Running less than a second ago -nvjljf7rmor4 \_ redis.2 redis:3.0.6 worker2 Shutdown Rejected 23 seconds ago "No such image: redis@sha256:6…" -vtiuz2fpc0yb \_ redis.2 redis:3.0.5 worker2 Shutdown Shutdown 1 second ago -jnarweeha8x4 redis.3 redis:3.0.6 worker1 Running Running 3 seconds ago -vs448yca2nz4 \_ redis.3 redis:3.0.5 worker1 Shutdown Shutdown 4 seconds ago -jf1i992619ir redis.4 redis:3.0.6 worker1 Running Running 10 seconds ago -blkttv7zs8ee \_ redis.4 redis:3.0.5 worker1 Shutdown Shutdown 11 seconds ago -``` - -The number of items in the task history is determined by the -`--task-history-limit` option that was set when initializing the swarm. You can -change the task history retention limit using the -[`docker swarm update`](swarm_update.md) command. - -When deploying a service, docker resolves the digest for the service's -image, and pins the service to that digest. The digest is not shown by -default, but is printed if `--no-trunc` is used. The `--no-trunc` option -also shows the non-truncated task ID, and error-messages, as can be seen below; - -```bash -$ docker service ps --no-trunc redis - -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -50qe8lfnxaxksi9w2a704wkp7 redis.1 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 manager1 Running Running 5 minutes ago -ky2re9oz86r9556i2szb8a8af \_ redis.1 redis:3.0.5@sha256:f8829e00d95672c48c60f468329d6693c4bdd28d1f057e755f8ba8b40008682e worker2 Shutdown Shutdown 5 minutes ago -bk658fpbex0d57cqcwoe3jthu redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Running Running 5 seconds -nvjljf7rmor4htv7l8rwcx7i7 \_ redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Shutdown Rejected 5 minutes ago "No such image: redis@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842" -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. For example, -`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. - -The currently supported filters are: - -* [id](#id) -* [name](#name) -* [node](#node) -* [desired-state](#desired-state) - - -#### id - -The `id` filter matches on all or a prefix of a task's ID. - -```bash -$ docker service ps -f "id=8" redis - -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -8ryt076polmc redis.4 redis:3.0.6 worker1 Running Running 9 seconds -8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds -``` - -#### name - -The `name` filter matches on task names. - -```bash -$ docker service ps -f "name=redis.1" redis -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -qihejybwf1x5 redis.1 redis:3.0.6 manager1 Running Running 8 seconds -``` - - -#### node - -The `node` filter matches on a node name or a node ID. - -```bash -$ docker service ps -f "node=manager1" redis -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -0qihejybwf1x redis.1 redis:3.0.6 manager1 Running Running 8 seconds -1x0v8yomsncd redis.5 redis:3.0.6 manager1 Running Running 8 seconds -3w1wu13yupln redis.9 redis:3.0.6 manager1 Running Running 8 seconds -8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds -``` - -#### desired-state - -The `desired-state` filter can take the values `running`, `shutdown`, or `accepted`. - -### Formatting - -The formatting options (`--format`) pretty-prints tasks output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description -----------------|------------------------------------------------------------------------------------------ -`.ID` | Task ID -`.Name` | Task name -`.Image` | Task image -`.Node` | Node ID -`.DesiredState` | Desired state of the task (`running`, `shutdown`, or `accepted`) -`.CurrentState` | Current state of the task -`.Error` | Error -`.Ports` | Task published ports - -When using the `--format` option, the `service ps` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`Name` and `Image` entries separated by a colon for all tasks: - -```bash -$ docker service ps --format "{{.Name}}: {{.Image}}" top -top.1: busybox -top.2: busybox -top.3: busybox -``` - -## Related commands - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) -* [service update](service_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md deleted file mode 100644 index 448f2c3b2..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: "service rm" -description: "The service rm command description and usage" -keywords: "service, rm" ---- - - - -# service rm - -```Markdown -Usage: docker service rm SERVICE [SERVICE...] - -Remove one or more services - -Aliases: - rm, remove - -Options: - --help Print usage -``` - -## Description - -Removes the specified services from the swarm. This command has to be run -targeting a manager node. - -## Examples - -Remove the `redis` service: - -```bash -$ docker service rm redis - -redis - -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -``` - -> **Warning**: Unlike `docker rm`, this command does not ask for confirmation -> before removing a running service. - -## Related commands - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service scale](service_scale.md) -* [service ps](service_ps.md) -* [service update](service_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md deleted file mode 100644 index a3aef5fd3..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: "service scale" -description: "The service scale command description and usage" -keywords: "service, scale" ---- - - - -# service scale - -```markdown -Usage: docker service scale SERVICE=REPLICAS [SERVICE=REPLICAS...] - -Scale one or multiple replicated services - -Options: - --help Print usage -``` - -## Description - -The scale command enables you to scale one or more replicated services either up -or down to the desired number of replicas. This command cannot be applied on -services which are global mode. The command will return immediately, but the -actual scaling of the service may take some time. To stop all replicas of a -service while keeping the service active in the swarm you can set the scale to 0. - -## Examples - -### Scale a single service - -The following command scales the "frontend" service to 50 tasks. - -```bash -$ docker service scale frontend=50 - -frontend scaled to 50 -``` - -The following command tries to scale a global service to 10 tasks and returns an error. - -```bash -$ docker service create --mode global --name backend backend:latest - -b4g08uwuairexjub6ome6usqh - -$ docker service scale backend=10 - -backend: scale can only be used with replicated mode -``` - -Directly afterwards, run `docker service ls`, to see the actual number of -replicas. - -```bash -$ docker service ls --filter name=frontend - -ID NAME MODE REPLICAS IMAGE -3pr5mlvu3fh9 frontend replicated 15/50 nginx:alpine -``` - -You can also scale a service using the [`docker service update`](service_update.md) -command. The following commands are equivalent: - -```bash -$ docker service scale frontend=50 -$ docker service update --replicas=50 frontend -``` - -### Scale multiple services - -The `docker service scale` command allows you to set the desired number of -tasks for multiple services at once. The following example scales both the -backend and frontend services: - -```bash -$ docker service scale backend=3 frontend=5 - -backend scaled to 3 -frontend scaled to 5 - -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -3pr5mlvu3fh9 frontend replicated 5/5 nginx:alpine -74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 -``` - -## Related commands - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service rm](service_rm.md) -* [service ps](service_ps.md) -* [service update](service_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md deleted file mode 100644 index fae6b0af8..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md +++ /dev/null @@ -1,265 +0,0 @@ ---- -title: "service update" -description: "The service update command description and usage" -keywords: "service, update" ---- - - - -# service update - -```Markdown -Usage: docker service update [OPTIONS] SERVICE - -Update a service - -Options: - --args command Service command args - --constraint-add list Add or update a placement constraint - --constraint-rm list Remove a constraint - --container-label-add list Add or update a container label - --container-label-rm list Remove a container label by its key - -d, --detach Exit immediately instead of waiting for the service to converge (default true) - --dns-add list Add or update a custom DNS server - --dns-option-add list Add or update a DNS option - --dns-option-rm list Remove a DNS option - --dns-rm list Remove a custom DNS server - --dns-search-add list Add or update a custom DNS search domain - --dns-search-rm list Remove a DNS search domain - --endpoint-mode string Endpoint mode (vip or dnsrr) - --entrypoint command Overwrite the default ENTRYPOINT of the image - --env-add list Add or update an environment variable - --env-rm list Remove an environment variable - --force Force update even if no changes require it - --group-add list Add an additional supplementary user group to the container - --group-rm list Remove a previously added supplementary user group from the container - --health-cmd string Command to run to check health - --health-interval duration Time between running the check (ns|us|ms|s|m|h) - --health-retries int Consecutive failures needed to report unhealthy - --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) - --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) - --help Print usage - --host-add list Add or update a custom host-to-IP mapping (host:ip) - --host-rm list Remove a custom host-to-IP mapping (host:ip) - --hostname string Container hostname - --image string Service image tag - --label-add list Add or update a service label - --label-rm list Remove a label by its key - --limit-cpu decimal Limit CPUs - --limit-memory bytes Limit Memory - --log-driver string Logging driver for service - --log-opt list Logging driver options - --mount-add mount Add or update a mount on a service - --mount-rm list Remove a mount by its target path - --network-add list Add a network - --network-rm list Remove a network - --no-healthcheck Disable any container-specified HEALTHCHECK - --placement-pref-add pref Add a placement preference - --placement-pref-rm pref Remove a placement preference - --publish-add port Add or update a published port - --publish-rm port Remove a published port by its target port - -q, --quiet Suppress progress output - --read-only Mount the container's root filesystem as read only - --replicas uint Number of tasks - --reserve-cpu decimal Reserve CPUs - --reserve-memory bytes Reserve Memory - --restart-condition string Restart when condition is met ("none"|"on-failure"|"any") - --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) - --restart-max-attempts uint Maximum number of restarts before giving up - --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) - --rollback Rollback to previous specification - --rollback-delay duration Delay between task rollbacks (ns|us|ms|s|m|h) - --rollback-failure-action string Action on rollback failure ("pause"|"continue") - --rollback-max-failure-ratio float Failure rate to tolerate during a rollback - --rollback-monitor duration Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) - --rollback-order string Rollback order ("start-first"|"stop-first") (default "stop-first") - --rollback-parallelism uint Maximum number of tasks rolled back simultaneously (0 to roll back all at once) - --secret-add secret Add or update a secret on a service - --secret-rm list Remove a secret - --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) - --stop-signal string Signal to stop the container - -t, --tty Allocate a pseudo-TTY - --update-delay duration Delay between updates (ns|us|ms|s|m|h) - --update-failure-action string Action on update failure ("pause"|"continue"|"rollback") - --update-max-failure-ratio float Failure rate to tolerate during an update - --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) - --update-order string Update order ("start-first"|"stop-first") - --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) - -u, --user string Username or UID (format: [:]) - --with-registry-auth Send registry authentication details to swarm agents - -w, --workdir string Working directory inside the container -``` - -## Description - -Updates a service as described by the specified parameters. This command has to be run targeting a manager node. -The parameters are the same as [`docker service create`](service_create.md). Please look at the description there -for further information. - -Normally, updating a service will only cause the service's tasks to be replaced with new ones if a change to the -service requires recreating the tasks for it to take effect. For example, only changing the -`--update-parallelism` setting will not recreate the tasks, because the individual tasks are not affected by this -setting. However, the `--force` flag will cause the tasks to be recreated anyway. This can be used to perform a -rolling restart without any changes to the service parameters. - -## Examples - -### Update a service - -```bash -$ docker service update --limit-cpu 2 redis -``` - -### Perform a rolling restart with no parameter changes - -```bash -$ docker service update --force --update-parallelism 1 --update-delay 30s redis -``` - -In this example, the `--force` flag causes the service's tasks to be shut down -and replaced with new ones even though none of the other parameters would -normally cause that to happen. The `--update-parallelism 1` setting ensures -that only one task is replaced at a time (this is the default behavior). The -`--update-delay 30s` setting introduces a 30 second delay between tasks, so -that the rolling restart happens gradually. - -### Add or remove mounts - -Use the `--mount-add` or `--mount-rm` options add or remove a service's bind-mounts -or volumes. - -The following example creates a service which mounts the `test-data` volume to -`/somewhere`. The next step updates the service to also mount the `other-volume` -volume to `/somewhere-else`volume, The last step unmounts the `/somewhere` mount -point, effectively removing the `test-data` volume. Each command returns the -service name. - -- The `--mount-add` flag takes the same parameters as the `--mount` flag on - `service create`. Refer to the [volumes and - bind-mounts](service_create.md#volumes-and-bind-mounts-mount) section in the - `service create` reference for details. - -- The `--mount-rm` flag takes the `target` path of the mount. - -```bash -$ docker service create \ - --name=myservice \ - --mount \ - type=volume,source=test-data,target=/somewhere \ - nginx:alpine \ - myservice - -myservice - -$ docker service update \ - --mount-add \ - type=volume,source=other-volume,target=/somewhere-else \ - myservice - -myservice - -$ docker service update --mount-rm /somewhere myservice - -myservice -``` - -### Rolling back to the previous version of a service - -Use the `--rollback` option to roll back to the previous version of the service. - -This will revert the service to the configuration that was in place before the most recent `docker service update` command. - -The following example updates the number of replicas for the service from 4 to 5, and then rolls back to the previous configuration. - -```bash -$ docker service update --replicas=5 web - -web - -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -80bvrzp6vxf3 web replicated 0/5 nginx:alpine - -``` -Roll back the `web` service... - -```bash -$ docker service update --rollback web - -web - -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -80bvrzp6vxf3 web replicated 0/4 nginx:alpine - -``` - -Other options can be combined with `--rollback` as well, for example, `--update-delay 0s` to execute the rollback without a delay between tasks: - -```bash -$ docker service update \ - --rollback \ - --update-delay 0s - web - -web - -``` - -Services can also be set up to roll back to the previous version automatically -when an update fails. To set up a service for automatic rollback, use -`--update-failure-action=rollback`. A rollback will be triggered if the fraction -of the tasks which failed to update successfully exceeds the value given with -`--update-max-failure-ratio`. - -The rate, parallelism, and other parameters of a rollback operation are -determined by the values passed with the following flags: - -- `--rollback-delay` -- `--rollback-failure-action` -- `--rollback-max-failure-ratio` -- `--rollback-monitor` -- `--rollback-parallelism` - -For example, a service set up with `--update-parallelism 1 --rollback-parallelism 3` -will update one task at a time during a normal update, but during a rollback, 3 -tasks at a time will get rolled back. These rollback parameters are respected both -during automatic rollbacks and for rollbacks initiated manually using `--rollback`. - -### Add or remove secrets - -Use the `--secret-add` or `--secret-rm` options add or remove a service's -secrets. - -The following example adds a secret named `ssh-2` and removes `ssh-1`: - -```bash -$ docker service update \ - --secret-add source=ssh-2,target=ssh-2 \ - --secret-rm ssh-1 \ - myservice -``` - -### Update services using templates - -Some flags of `service update` support the use of templating. -See [`service create`](./service_create.md#templating) for the reference. - -## Related commands - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service ps](service_ps.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack.md deleted file mode 100644 index 94e3e252f..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: "stack" -description: "The stack command description and usage" -keywords: "stack" ---- - - - -# stack - -```markdown -Usage: docker stack COMMAND - -Manage Docker stacks - -Options: - --help Print usage - -Commands: - deploy Deploy a new stack or update an existing stack - ls List stacks - ps List the tasks in the stack - rm Remove the stack - services List the services in the stack - -Run 'docker stack COMMAND --help' for more information on a command. -``` - -## Description - -Manage stacks. - diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md deleted file mode 100644 index d57ef0f76..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: "stack deploy" -description: "The stack deploy command description and usage" -keywords: "stack, deploy, up" ---- - - - -# stack deploy - -```markdown -Usage: docker stack deploy [OPTIONS] STACK - -Deploy a new stack or update an existing stack - -Aliases: - deploy, up - -Options: - --bundle-file string Path to a Distributed Application Bundle file - -c, --compose-file string Path to a Compose file - --help Print usage - --prune Prune services that are no longer referenced - --with-registry-auth Send registry authentication details to Swarm agents -``` - -## Description - -Create and update a stack from a `compose` or a `dab` file on the swarm. This command -has to be run targeting a manager node. - -## Examples - -### Compose file - -The `deploy` command supports compose file version `3.0` and above." - -```bash -$ docker stack deploy --compose-file docker-compose.yml vossibility - -Ignoring unsupported options: links - -Creating network vossibility_vossibility -Creating network vossibility_default -Creating service vossibility_nsqd -Creating service vossibility_logstash -Creating service vossibility_elasticsearch -Creating service vossibility_kibana -Creating service vossibility_ghollector -Creating service vossibility_lookupd -``` - -You can verify that the services were correctly created - -```bash -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe -axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba -``` - -### DAB file - -```bash -$ docker stack deploy --bundle-file vossibility-stack.dab vossibility - -Loading bundle from vossibility-stack.dab -Creating service vossibility_elasticsearch -Creating service vossibility_kibana -Creating service vossibility_logstash -Creating service vossibility_lookupd -Creating service vossibility_nsqd -Creating service vossibility_vossibility-collector -``` - -You can verify that the services were correctly created: - -```bash -$ docker service ls - -ID NAME MODE REPLICAS IMAGE -29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe -axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba -``` - -## Related commands - -* [stack ls](stack_ls.md) -* [stack ps](stack_ps.md) -* [stack rm](stack_rm.md) -* [stack services](stack_services.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md deleted file mode 100644 index 567d947ba..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "stack ls" -description: "The stack ls command description and usage" -keywords: "stack, ls" ---- - - - -# stack ls - -```markdown -Usage: docker stack ls - -List stacks - -Aliases: - ls, list - -Options: - --help Print usage -``` - -## Description - -Lists the stacks. - -## Examples - -The following command shows all stacks and some additional information: - -```bash -$ docker stack ls - -ID SERVICES -vossibility-stack 6 -myapp 2 -``` - -## Related commands - -* [stack deploy](stack_deploy.md) -* [stack ps](stack_ps.md) -* [stack rm](stack_rm.md) -* [stack services](stack_services.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md deleted file mode 100644 index 901b46b22..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -title: "stack ps" -description: "The stack ps command description and usage" -keywords: "stack, ps" ---- - - - -# stack ps - -```markdown -Usage: docker stack ps [OPTIONS] STACK - -List the tasks in the stack - -Options: - -f, --filter filter Filter output based on conditions provided - --format string Pretty-print tasks using a Go template - --help Print usage - --no-resolve Do not map IDs to Names - --no-trunc Do not truncate output - -q, --quiet Only display task IDs -``` - -## Description - -Lists the tasks that are running as part of the specified stack. This -command has to be run targeting a manager node. - -## Examples - -### List the tasks that are part of a stack - -The following command shows all the tasks that are part of the `voting` stack: - -```bash -$ docker stack ps voting -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -xim5bcqtgk1b voting_worker.1 dockersamples/examplevotingapp_worker:latest node2 Running Running 2 minutes ago -q7yik0ks1in6 voting_result.1 dockersamples/examplevotingapp_result:before node1 Running Running 2 minutes ago -rx5yo0866nfx voting_vote.1 dockersamples/examplevotingapp_vote:before node3 Running Running 2 minutes ago -tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 2 minutes ago -w48spazhbmxc voting_redis.1 redis:alpine node2 Running Running 3 minutes ago -6jj1m02freg1 voting_visualizer.1 dockersamples/visualizer:stable node1 Running Running 2 minutes ago -kqgdmededccb voting_vote.2 dockersamples/examplevotingapp_vote:before node2 Running Running 2 minutes ago -t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 3 minutes ago -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. For example, -`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. - -The currently supported filters are: - -* [id](#id) -* [name](#name) -* [node](#node) -* [desired-state](#desired-state) - -#### id - -The `id` filter matches on all or a prefix of a task's ID. - -```bash -$ docker stack ps -f "id=t" voting -ID NAME IMAGE NODE DESIRED STATE CURRENTSTATE ERROR PORTS -tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 14 minutes ago -t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 14 minutes ago -``` - -#### name - -The `name` filter matches on task names. - -```bash -$ docker stack ps -f "name=voting_redis" voting -ID NAME IMAGE NODE DESIRED STATE CURRENTSTATE ERROR PORTS -w48spazhbmxc voting_redis.1 redis:alpine node2 Running Running 17 minutes ago -t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 17 minutes ago -``` - -#### node - -The `node` filter matches on a node name or a node ID. - -```bash -$ docker stack ps -f "node=node1" voting -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -q7yik0ks1in6 voting_result.1 dockersamples/examplevotingapp_result:before node1 Running Running 18 minutes ago -tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 18 minutes ago -6jj1m02freg1 voting_visualizer.1 dockersamples/visualizer:stable node1 Running Running 18 minutes ago -``` - -#### desired-state - -The `desired-state` filter can take the values `running`, `shutdown`, or `accepted`. - -```bash -$ docker stack ps -f "desired-state=running" voting -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -xim5bcqtgk1b voting_worker.1 dockersamples/examplevotingapp_worker:latest node2 Running Running 21 minutes ago -q7yik0ks1in6 voting_result.1 dockersamples/examplevotingapp_result:before node1 Running Running 21 minutes ago -rx5yo0866nfx voting_vote.1 dockersamples/examplevotingapp_vote:before node3 Running Running 21 minutes ago -tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 21 minutes ago -w48spazhbmxc voting_redis.1 redis:alpine node2 Running Running 21 minutes ago -6jj1m02freg1 voting_visualizer.1 dockersamples/visualizer:stable node1 Running Running 21 minutes ago -kqgdmededccb voting_vote.2 dockersamples/examplevotingapp_vote:before node2 Running Running 21 minutes ago -t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 21 minutes ago -``` - -### Formatting - -The formatting options (`--format`) pretty-prints tasks output using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description -----------------|------------------------------------------------------------------------------------------ -`.ID` | Task ID -`.Name` | Task name -`.Image` | Task image -`.Node` | Node ID -`.DesiredState` | Desired state of the task (`running`, `shutdown`, or `accepted`) -`.CurrentState` | Current state of the task -`.Error` | Error -`.Ports` | Task published ports - -When using the `--format` option, the `stack ps` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`Name` and `Image` entries separated by a colon for all tasks: - -```bash -$ docker stack ps --format "{{.Name}}: {{.Image}}" voting -voting_worker.1: dockersamples/examplevotingapp_worker:latest -voting_result.1: dockersamples/examplevotingapp_result:before -voting_vote.1: dockersamples/examplevotingapp_vote:before -voting_db.1: postgres:9.4 -voting_redis.1: redis:alpine -voting_visualizer.1: dockersamples/visualizer:stable -voting_vote.2: dockersamples/examplevotingapp_vote:before -voting_redis.2: redis:alpine -``` - -### Do not map IDs to Names - -The `--no-resolve` option shows IDs for task name, without mapping IDs to Names. - -```bash -$ docker stack ps --no-resolve voting -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -xim5bcqtgk1b 10z9fjfqzsxnezo4hb81p8mqg.1 dockersamples/examplevotingapp_worker:latest qaqt4nrzo775jrx6detglho01 Running Running 30 minutes ago -q7yik0ks1in6 hbxltua1na7mgqjnidldv5m65.1 dockersamples/examplevotingapp_result:before mxpaef1tlh23s052erw88a4w5 Running Running 30 minutes ago -rx5yo0866nfx qyprtqw1g5nrki557i974ou1d.1 dockersamples/examplevotingapp_vote:before kanqcxfajd1r16wlnqcblobmm Running Running 31 minutes ago -tz6j82jnwrx7 122f0xxngg17z52be7xspa72x.1 postgres:9.4 mxpaef1tlh23s052erw88a4w5 Running Running 31 minutes ago -w48spazhbmxc tg61x8myx563ueo3urmn1ic6m.1 redis:alpine qaqt4nrzo775jrx6detglho01 Running Running 31 minutes ago -6jj1m02freg1 8cqlyi444kzd3panjb7edh26v.1 dockersamples/visualizer:stable mxpaef1tlh23s052erw88a4w5 Running Running 31 minutes ago -kqgdmededccb qyprtqw1g5nrki557i974ou1d.2 dockersamples/examplevotingapp_vote:before qaqt4nrzo775jrx6detglho01 Running Running 31 minutes ago -t72q3z038jeh tg61x8myx563ueo3urmn1ic6m.2 redis:alpine kanqcxfajd1r16wlnqcblobmm Running Running 31 minutes ago -``` - -### Do not truncate output - -When deploying a service, docker resolves the digest for the service's -image, and pins the service to that digest. The digest is not shown by -default, but is printed if `--no-trunc` is used. The `--no-trunc` option -also shows the non-truncated task IDs, and error-messages, as can be seen below: - -```bash -$ docker stack ps --no-trunc voting -ID NAME IMAGE NODE DESIRED STATE CURREN STATE ERROR PORTS -xim5bcqtgk1bxqz91jzo4a1s5 voting_worker.1 dockersamples/examplevotingapp_worker:latest@sha256:3e4ddf59c15f432280a2c0679c4fc5a2ee5a797023c8ef0d3baf7b1385e9fed node2 Running Runnin 32 minutes ago -q7yik0ks1in6kv32gg6y6yjf7 voting_result.1 dockersamples/examplevotingapp_result:before@sha256:83b56996e930c292a6ae5187fda84dd6568a19d97cdb933720be15c757b7463 node1 Running Runnin 32 minutes ago -rx5yo0866nfxc58zf4irsss6n voting_vote.1 dockersamples/examplevotingapp_vote:before@sha256:8e64b182c87de902f2b72321c89b4af4e2b942d76d0b772532ff27ec4c6ebf6 node3 Running Runnin 32 minutes ago -tz6j82jnwrx7n2offljp3mn03 voting_db.1 postgres:9.4@sha256:6046af499eae34d2074c0b53f9a8b404716d415e4a03e68bc1d2f8064f2b027 node1 Running Runnin 32 minutes ago -w48spazhbmxcmbjfi54gs7x90 voting_redis.1 redis:alpine@sha256:9cd405cd1ec1410eaab064a1383d0d8854d1ef74a54e1e4a92fb4ec7bdc3ee7 node2 Running Runnin 32 minutes ago -6jj1m02freg1n3z9n1evrzsbl voting_visualizer.1 dockersamples/visualizer:stable@sha256:f924ad66c8e94b10baaf7bdb9cd491ef4e982a1d048a56a17e02bf5945401e5 node1 Running Runnin 32 minutes ago -kqgdmededccbhz2wuc0e9hx7g voting_vote.2 dockersamples/examplevotingapp_vote:before@sha256:8e64b182c87de902f2b72321c89b4af4e2b942d76d0b772532ff27ec4c6ebf6 node2 Running Runnin 32 minutes ago -t72q3z038jehe1wbh9gdum076 voting_redis.2 redis:alpine@sha256:9cd405cd1ec1410eaab064a1383d0d8854d1ef74a54e1e4a92fb4ec7bdc3ee7 node3 Running Runnin 32 minutes ago -``` - -### Only display task IDs - -The `-q ` or `--quiet` option only shows IDs of the tasks in the stack. -This example outputs all task IDs of the "voting" stack; - -```bash -$ docker stack ps -q voting -xim5bcqtgk1b -q7yik0ks1in6 -rx5yo0866nfx -tz6j82jnwrx7 -w48spazhbmxc -6jj1m02freg1 -kqgdmededccb -t72q3z038jeh -``` - -This option can be used to perform batch operations. For example, you can use -the task IDs as input for other commands, such as `docker inspect`. The -following example inspects all tasks of the "voting" stack; - -```bash -$ docker inspect $(docker stack ps -q voting) - -[ - { - "ID": "xim5bcqtgk1b1gk0krq1", - "Version": { -(...) -``` - -## Related commands - -* [stack deploy](stack_deploy.md) -* [stack ls](stack_ls.md) -* [stack rm](stack_rm.md) -* [stack services](stack_services.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md deleted file mode 100644 index a1854ae6f..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: "stack rm" -description: "The stack rm command description and usage" -keywords: "stack, rm, remove, down" ---- - - - -# stack rm - -```markdown -Usage: docker stack rm STACK [STACK...] - -Remove one or more stacks - -Aliases: - rm, remove, down - -Options: - --help Print usage -``` - -## Description - -Remove the stack from the swarm. This command has to be run targeting -a manager node. - -## Examples - -### Remove a stack - -This will remove the stack with the name `myapp`. Services, networks, and secrets associated with the stack will be removed. - -```bash -$ docker stack rm myapp - -Removing service myapp_redis -Removing service myapp_web -Removing service myapp_lb -Removing network myapp_default -Removing network myapp_frontend -``` - -### Remove multiple stacks - -This will remove all the specified stacks, `myapp` and `vossibility`. Services, networks, and secrets associated with all the specified stacks will be removed. - -```bash -$ docker stack rm myapp vossibility - -Removing service myapp_redis -Removing service myapp_web -Removing service myapp_lb -Removing network myapp_default -Removing network myapp_frontend -Removing service vossibility_nsqd -Removing service vossibility_logstash -Removing service vossibility_elasticsearch -Removing service vossibility_kibana -Removing service vossibility_ghollector -Removing service vossibility_lookupd -Removing network vossibility_default -Removing network vossibility_vossibility -``` - -## Related commands - -* [stack deploy](stack_deploy.md) -* [stack ls](stack_ls.md) -* [stack ps](stack_ps.md) -* [stack services](stack_services.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md deleted file mode 100644 index b45047d40..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: "stack services" -description: "The stack services command description and usage" -keywords: "stack, services" -advisory: "experimental" ---- - - - -# stack services (experimental) - -```markdown -Usage: docker stack services [OPTIONS] STACK - -List the services in the stack - -Options: - -f, --filter filter Filter output based on conditions provided - --format string Pretty-print services using a Go template - --help Print usage - -q, --quiet Only display IDs -``` - -## Description - -Lists the services that are running as part of the specified stack. This -command has to be run targeting a manager node. - -## Examples - -The following command shows all services in the `myapp` stack: - -```bash -$ docker stack services myapp - -ID NAME REPLICAS IMAGE COMMAND -7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f -dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. - -The following command shows both the `web` and `db` services: - -```bash -$ docker stack services --filter name=myapp_web --filter name=myapp_db myapp - -ID NAME REPLICAS IMAGE COMMAND -7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f -dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 -``` - -The currently supported filters are: - -* id / ID (`--filter id=7be5ei6sqeye`, or `--filter ID=7be5ei6sqeye`) -* name (`--filter name=myapp_web`) -* label (`--filter label=key=value`) - -### Formatting - -The formatting options (`--format`) pretty-prints services output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description -------------|------------------------------------------------------------------------------------------ -`.ID` | Service ID -`.Name` | Service name -`.Mode` | Service mode (replicated, global) -`.Replicas` | Service replicas -`.Image` | Service image - -When using the `--format` option, the `stack services` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`ID`, `Mode`, and `Replicas` entries separated by a colon for all services: - -```bash -$ docker stack services --format "{{.ID}}: {{.Mode}} {{.Replicas}}" - -0zmvwuiu3vue: replicated 10/10 -fm6uf97exkul: global 5/5 -``` - - -## Related commands - -* [stack deploy](stack_deploy.md) -* [stack ls](stack_ls.md) -* [stack ps](stack_ps.md) -* [stack rm](stack_rm.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/start.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/start.md deleted file mode 100644 index aa672289e..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/start.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "start" -description: "The start command description and usage" -keywords: "Start, container, stopped" ---- - - - -# start - -```markdown -Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] - -Start one or more stopped containers - -Options: - -a, --attach Attach STDOUT/STDERR and forward signals - --detach-keys string Override the key sequence for detaching a container - --help Print usage - -i, --interactive Attach container's STDIN -``` - -## Examples - -```bash -$ docker start my_container -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stats.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/stats.md deleted file mode 100644 index f5c058524..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stats.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: "stats" -description: "The stats command description and usage" -keywords: "container, resource, statistics" ---- - - - -# stats - -```markdown -Usage: docker stats [OPTIONS] [CONTAINER...] - -Display a live stream of container(s) resource usage statistics - -Options: - -a, --all Show all containers (default shows just running) - --format string Pretty-print images using a Go template - --help Print usage - --no-stream Disable streaming stats and only pull the first result -``` - -## Description - -The `docker stats` command returns a live data stream for running containers. To limit data to one or more specific containers, specify a list of container names or ids separated by a space. You can specify a stopped container but stopped containers do not return any data. - -If you want more detailed information about a container's resource usage, use the `/containers/(id)/stats` API endpoint. - -## Examples - -Running `docker stats` on all running containers against a Linux daemon. - -```bash -$ docker stats -CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O -1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB -9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B -d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B -``` - -Running `docker stats` on multiple containers by name and id against a Linux daemon. - -```bash -$ docker stats fervent_panini 5acfcb1b4fd1 -CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O -5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B -fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B -``` - -Running `docker stats` with customized format on all (Running and Stopped) containers. - -```bash -$ docker stats --all --format "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}" -CONTAINER ID NAME CPU % MEM USAGE / LIMIT -c9dfa83f0317f87637d5b7e67aa4223337d947215c5a9947e697e4f7d3e0f834 ecstatic_noether 0.00% 56KiB / 15.57GiB -8f92d01cf3b29b4f5fca4cd33d907e05def7af5a3684711b20a2369d211ec67f stoic_goodall 0.07% 32.86MiB / 15.57GiB -38dd23dba00f307d53d040c1d18a91361bbdcccbf592315927d56cf13d8b7343 drunk_visvesvaraya 0.00% 0B / 0B -5a8b07ec4cc52823f3cbfdb964018623c1ba307bce2c057ccdbde5f4f6990833 big_heisenberg 0.00% 0B / 0B -``` - -`drunk_visvesvaraya` and `big_heisenberg` are stopped containers in the above example. - -Running `docker stats` on all running containers against a Windows daemon. - -```powershell -PS E:\> docker stats -CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O -09d3bb5b1604 6.61% 38.21 MiB 17.1 kB / 7.73 kB 10.7 MB / 3.57 MB -9db7aa4d986d 9.19% 38.26 MiB 15.2 kB / 7.65 kB 10.6 MB / 3.3 MB -3f214c61ad1d 0.00% 28.64 MiB 64 kB / 6.84 kB 4.42 MB / 6.93 MB -``` - -Running `docker stats` on multiple containers by name and id against a Windows daemon. - -```powershell -PS E:\> docker ps -a -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -3f214c61ad1d nanoserver "cmd" 2 minutes ago Up 2 minutes big_minsky -9db7aa4d986d windowsservercore "cmd" 2 minutes ago Up 2 minutes mad_wilson -09d3bb5b1604 windowsservercore "cmd" 2 minutes ago Up 2 minutes affectionate_easley - -PS E:\> docker stats 3f214c61ad1d mad_wilson -CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O -3f214c61ad1d 0.00% 46.25 MiB 76.3 kB / 7.92 kB 10.3 MB / 14.7 MB -mad_wilson 9.59% 40.09 MiB 27.6 kB / 8.81 kB 17 MB / 20.1 MB -``` - -### Formatting - -The formatting option (`--format`) pretty prints container output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description ------------- | -------------------------------------------- -`.Container` | Container name or ID (user input) -`.Name` | Container name -`.ID` | Container ID -`.CPUPerc` | CPU percentage -`.MemUsage` | Memory usage -`.NetIO` | Network IO -`.BlockIO` | Block IO -`.MemPerc` | Memory percentage (Not available on Windows) -`.PIDs` | Number of PIDs (Not available on Windows) - - -When using the `--format` option, the `stats` command either -outputs the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`Container` and `CPUPerc` entries separated by a colon for all images: - -```bash -$ docker stats --format "{{.Container}}: {{.CPUPerc}}" - -09d3bb5b1604: 6.61% -9db7aa4d986d: 9.19% -3f214c61ad1d: 0.00% -``` - -To list all containers statistics with their name, CPU percentage and memory -usage in a table format you can use: - -```bash -$ docker stats --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" - -CONTAINER CPU % PRIV WORKING SET -1285939c1fd3 0.07% 796 KiB / 64 MiB -9c76f7834ae2 0.07% 2.746 MiB / 64 MiB -d1ea048f04e4 0.03% 4.583 MiB / 64 MiB -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stop.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/stop.md deleted file mode 100644 index dc00b38af..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/stop.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "stop" -description: "The stop command description and usage" -keywords: "stop, SIGKILL, SIGTERM" ---- - - - -# stop - -```markdown -Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] - -Stop one or more running containers - -Options: - --help Print usage - -t, --time int Seconds to wait for stop before killing it (default 10) -``` - -## Description - -The main process inside the container will receive `SIGTERM`, and after a grace -period, `SIGKILL`. - -## Examples - -```bash -$ docker stop my_container -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm.md deleted file mode 100644 index 395db6905..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "swarm" -description: "The swarm command description and usage" -keywords: "swarm" ---- - - - -# swarm - -```markdown -Usage: docker swarm COMMAND - -Manage Swarm - -Options: - --help Print usage - -Commands: - init Initialize a swarm - join Join a swarm as a node and/or manager - join-token Manage join tokens - leave Leave the swarm - unlock Unlock swarm - unlock-key Manage the unlock key - update Update the swarm - -Run 'docker swarm COMMAND --help' for more information on a command. -``` - -## Description - -Manage the swarm. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md deleted file mode 100644 index f4c6348e8..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: "swarm init" -description: "The swarm init command description and usage" -keywords: "swarm, init" ---- - - - -# swarm init - -```markdown -Usage: docker swarm init [OPTIONS] - -Initialize a swarm - -Options: - --advertise-addr string Advertised address (format: [:port]) - --autolock Enable manager autolocking (requiring an unlock key to start a stopped manager) - --availability string Availability of the node ("active"|"pause"|"drain") (default "active") - --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) - --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) - --external-ca external-ca Specifications of one or more certificate signing endpoints - --force-new-cluster Force create a new cluster from current state - --help Print usage - --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) - --max-snapshots uint Number of additional Raft snapshots to retain - --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) - --task-history-limit int Task history retention limit (default 5) -``` - -## Description - -Initialize a swarm. The docker engine targeted by this command becomes a manager -in the newly created single-node swarm. - -## Examples - -```bash -$ docker swarm init --advertise-addr 192.168.99.121 -Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager. - -To add a worker to this swarm, run the following command: - - docker swarm join \ - --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ - 172.17.0.2:2377 - -To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. -``` - -`docker swarm init` generates two random tokens, a worker token and a manager token. When you join -a new node to the swarm, the node joins as a worker or manager node based upon the token you pass -to [swarm join](swarm_join.md). - -After you create the swarm, you can display or rotate the token using -[swarm join-token](swarm_join_token.md). - -### `--autolock` - -This flag enables automatic locking of managers with an encryption key. The -private keys and data stored by all managers will be protected by the -encryption key printed in the output, and will not be accessible without it. -Thus, it is very important to store this key in order to activate a manager -after it restarts. The key can be passed to `docker swarm unlock` to reactivate -the manager. Autolock can be disabled by running -`docker swarm update --autolock=false`. After disabling it, the encryption key -is no longer required to start the manager, and it will start up on its own -without user intervention. - -### `--cert-expiry` - -This flag sets the validity period for node certificates. - -### `--dispatcher-heartbeat` - -This flag sets the frequency with which nodes are told to use as a -period to report their health. - -### `--external-ca` - -This flag sets up the swarm to use an external CA to issue node certificates. The value takes -the form `protocol=X,url=Y`. The value for `protocol` specifies what protocol should be used -to send signing requests to the external CA. Currently, the only supported value is `cfssl`. -The URL specifies the endpoint where signing requests should be submitted. - -### `--force-new-cluster` - -This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data. - -### `--listen-addr` - -The node listens for inbound swarm manager traffic on this address. The default is to listen on -0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface's -address; for example `--listen-addr eth0:2377`. - -Specifying a port is optional. If the value is a bare IP address or interface -name, the default port 2377 will be used. - -### `--advertise-addr` - -This flag specifies the address that will be advertised to other members of the -swarm for API access and overlay networking. If unspecified, Docker will check -if the system has a single IP address, and use that IP address with the -listening port (see `--listen-addr`). If the system has multiple IP addresses, -`--advertise-addr` must be specified so that the correct address is chosen for -inter-manager communication and overlay networking. - -It is also possible to specify a network interface to advertise that interface's address; -for example `--advertise-addr eth0:2377`. - -Specifying a port is optional. If the value is a bare IP address or interface -name, the default port 2377 will be used. - -### `--task-history-limit` - -This flag sets up task history retention limit. - -### `--max-snapshots` - -This flag sets the number of old Raft snapshots to retain in addition to the -current Raft snapshots. By default, no old snapshots are retained. This option -may be used for debugging, or to store old snapshots of the swarm state for -disaster recovery purposes. - -### `--snapshot-interval` - -This flag specifies how many log entries to allow in between Raft snapshots. -Setting this to a higher number will trigger snapshots less frequently. -Snapshots compact the Raft log and allow for more efficient transfer of the -state to new managers. However, there is a performance cost to taking snapshots -frequently. - -### `--availability` - -This flag specifies the availability of the node at the time the node joins a master. -Possible availability values are `active`, `pause`, or `drain`. - -This flag is useful in certain situations. For example, a cluster may want to have -dedicated manager nodes that are not served as worker nodes. This could be achieved -by passing `--availability=drain` to `docker swarm init`. - - -## Related commands - -* [swarm join](swarm_join.md) -* [swarm join-token](swarm_join_token.md) -* [swarm leave](swarm_leave.md) -* [swarm unlock](swarm_unlock.md) -* [swarm unlock-key](swarm_unlock_key.md) -* [swarm update](swarm_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md deleted file mode 100644 index 4ee11c188..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: "swarm join" -description: "The swarm join command description and usage" -keywords: "swarm, join" ---- - - - -# swarm join - -```markdown -Usage: docker swarm join [OPTIONS] HOST:PORT - -Join a swarm as a node and/or manager - -Options: - --advertise-addr string Advertised address (format: [:port]) - --availability string Availability of the node ("active"|"pause"|"drain") (default "active") - --help Print usage - --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) - --token string Token for entry into the swarm -``` - -## Description - -Join a node to a swarm. The node joins as a manager node or worker node based upon the token you -pass with the `--token` flag. If you pass a manager token, the node joins as a manager. If you -pass a worker token, the node joins as a worker. - -## Examples - -### Join a node to swarm as a manager - -The example below demonstrates joining a manager node using a manager token. - -```bash -$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 192.168.99.121:2377 -This node joined a swarm as a manager. -$ docker node ls -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -dkp8vy1dq1kxleu9g4u78tlag * manager2 Ready Active Reachable -dvfxp4zseq4s0rih1selh0d20 manager1 Ready Active Leader -``` - -A cluster should only have 3-7 managers at most, because a majority of managers must be available -for the cluster to function. Nodes that aren't meant to participate in this management quorum -should join as workers instead. Managers should be stable hosts that have static IP addresses. - -### Join a node to swarm as a worker - -The example below demonstrates joining a worker node using a worker token. - -```bash -$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx 192.168.99.121:2377 -This node joined a swarm as a worker. -$ docker node ls -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active -dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable -dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader -``` - -### `--listen-addr value` - -If the node is a manager, it will listen for inbound swarm manager traffic on this -address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a -network interface to listen on that interface's address; for example `--listen-addr eth0:2377`. - -Specifying a port is optional. If the value is a bare IP address, or interface -name, the default port 2377 will be used. - -This flag is generally not necessary when joining an existing swarm. - -### `--advertise-addr value` - -This flag specifies the address that will be advertised to other members of the -swarm for API access. If unspecified, Docker will check if the system has a -single IP address, and use that IP address with the listening port (see -`--listen-addr`). If the system has multiple IP addresses, `--advertise-addr` -must be specified so that the correct address is chosen for inter-manager -communication and overlay networking. - -It is also possible to specify a network interface to advertise that interface's address; -for example `--advertise-addr eth0:2377`. - -Specifying a port is optional. If the value is a bare IP address, or interface -name, the default port 2377 will be used. - -This flag is generally not necessary when joining an existing swarm. - -### `--token string` - -Secret value required for nodes to join the swarm - -### `--availability` - -This flag specifies the availability of the node at the time the node joins a master. -Possible availability values are `active`, `pause`, or `drain`. - -This flag is useful in certain situations. For example, a cluster may want to have -dedicated manager nodes that are not served as worker nodes. This could be achieved -by passing `--availability=drain` to `docker swarm join`. - - -## Related commands - -* [swarm init](swarm_init.md) -* [swarm join-token](swarm_join_token.md) -* [swarm leave](swarm_leave.md) -* [swarm unlock](swarm_unlock.md) -* [swarm unlock-key](swarm_unlock_key.md) -* [swarm update](swarm_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md deleted file mode 100644 index 4638a92fd..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: "swarm join-token" -description: "The swarm join-token command description and usage" -keywords: "swarm, join-token" ---- - - - -# swarm join-token - -```markdown -Usage: docker swarm join-token [OPTIONS] (worker|manager) - -Manage join tokens - -Options: - --help Print usage - -q, --quiet Only display token - --rotate Rotate join token -``` - -## Description - -Join tokens are secrets that allow a node to join the swarm. There are two -different join tokens available, one for the worker role and one for the manager -role. You pass the token using the `--token` flag when you run -[swarm join](swarm_join.md). Nodes use the join token only when they join the -swarm. - -## Examples - -You can view or rotate the join tokens using `swarm join-token`. - -As a convenience, you can pass `worker` or `manager` as an argument to -`join-token` to print the full `docker swarm join` command to join a new node to -the swarm: - -```bash -$ docker swarm join-token worker -To add a worker to this swarm, run the following command: - - docker swarm join \ - --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ - 172.17.0.2:2377 - -$ docker swarm join-token manager -To add a manager to this swarm, run the following command: - - docker swarm join \ - --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \ - 172.17.0.2:2377 -``` - -Use the `--rotate` flag to generate a new join token for the specified role: - -```bash -$ docker swarm join-token --rotate worker -Successfully rotated worker join token. - -To add a worker to this swarm, run the following command: - - docker swarm join \ - --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t \ - 172.17.0.2:2377 -``` - -After using `--rotate`, only the new token will be valid for joining with the specified role. - -The `-q` (or `--quiet`) flag only prints the token: - -```bash -$ docker swarm join-token -q worker - -SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t -``` - -### `--rotate` - -Because tokens allow new nodes to join the swarm, you should keep them secret. -Be particularly careful with manager tokens since they allow new manager nodes -to join the swarm. A rogue manager has the potential to disrupt the operation of -your swarm. - -Rotate your swarm's join token if a token gets checked-in to version control, -stolen, or a node is compromised. You may also want to periodically rotate the -token to ensure any unknown token leaks do not allow a rogue node to join -the swarm. - -To rotate the join token and print the newly generated token, run -`docker swarm join-token --rotate` and pass the role: `manager` or `worker`. - -Rotating a join-token means that no new nodes will be able to join the swarm -using the old token. Rotation does not affect existing nodes in the swarm -because the join token is only used for authorizing new nodes joining the swarm. - -### `--quiet` - -Only print the token. Do not print a complete command for joining. - -## Related commands - -* [swarm init](swarm_init.md) -* [swarm join](swarm_join.md) -* [swarm leave](swarm_leave.md) -* [swarm unlock](swarm_unlock.md) -* [swarm unlock-key](swarm_unlock_key.md) -* [swarm update](swarm_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md deleted file mode 100644 index c5da595d9..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: "swarm leave" -description: "The swarm leave command description and usage" -keywords: "swarm, leave" ---- - - - -# swarm leave - -```markdown -Usage: docker swarm leave [OPTIONS] - -Leave the swarm - -Options: - -f, --force Force this node to leave the swarm, ignoring warnings - --help Print usage -``` - -## Description - -When you run this command on a worker, that worker leaves the swarm. - -You can use the `--force` option on a manager to remove it from the swarm. -However, this does not reconfigure the swarm to ensure that there are enough -managers to maintain a quorum in the swarm. The safe way to remove a manager -from a swarm is to demote it to a worker and then direct it to leave the quorum -without using `--force`. Only use `--force` in situations where the swarm will -no longer be used after the manager leaves, such as in a single-node swarm. - -## Examples - -Consider the following swarm, as seen from the manager: - -```bash -$ docker node ls -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active -dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active -dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader -``` - -To remove `worker2`, issue the following command from `worker2` itself: - -```bash -$ docker swarm leave -Node left the default swarm. -``` - -The node will still appear in the node list, and marked as `down`. It no longer -affects swarm operation, but a long list of `down` nodes can clutter the node -list. To remove an inactive node from the list, use the [`node rm`](node_rm.md) -command. - -## Related commands - -* [node rm](node_rm.md) -* [swarm init](swarm_init.md) -* [swarm join](swarm_join.md) -* [swarm join-token](swarm_join_token.md) -* [swarm unlock](swarm_unlock.md) -* [swarm unlock-key](swarm_unlock_key.md) -* [swarm update](swarm_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md deleted file mode 100644 index b507008c1..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "swarm unlock" -description: "The swarm unlock command description and usage" -keywords: "swarm, unlock" ---- - - - -# swarm unlock - -```markdown -Usage: docker swarm unlock - -Unlock swarm - -Options: - --help Print usage -``` - -## Description - -Unlocks a locked manager using a user-supplied unlock key. This command must be -used to reactivate a manager after its Docker daemon restarts if the autolock -setting is turned on. The unlock key is printed at the time when autolock is -enabled, and is also available from the `docker swarm unlock-key` command. - -## Examples - -```bash -$ docker swarm unlock -Please enter unlock key: -``` - -## Related commands - -* [swarm init](swarm_init.md) -* [swarm join](swarm_join.md) -* [swarm join-token](swarm_join_token.md) -* [swarm leave](swarm_leave.md) -* [swarm unlock-key](swarm_unlock_key.md) -* [swarm update](swarm_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md deleted file mode 100644 index 42faae83a..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "swarm unlock-key" -description: "The swarm unlock-keycommand description and usage" -keywords: "swarm, unlock-key" ---- - - - -# swarm unlock-key - -```markdown -Usage: docker swarm unlock-key [OPTIONS] - -Manage the unlock key - -Options: - --help Print usage - -q, --quiet Only display token - --rotate Rotate unlock key -``` - -## Description - -An unlock key is a secret key needed to unlock a manager after its Docker daemon -restarts. These keys are only used when the autolock feature is enabled for the -swarm. - -You can view or rotate the unlock key using `swarm unlock-key`. To view the key, -run the `docker swarm unlock-key` command without any arguments: - -## Examples - -```bash -$ docker swarm unlock-key - -To unlock a swarm manager after it restarts, run the `docker swarm unlock` -command and provide the following key: - - SWMKEY-1-fySn8TY4w5lKcWcJPIpKufejh9hxx5KYwx6XZigx3Q4 - -Please remember to store this key in a password manager, since without it you -will not be able to restart the manager. -``` - -Use the `--rotate` flag to rotate the unlock key to a new, randomly-generated -key: - -```bash -$ docker swarm unlock-key --rotate -Successfully rotated manager unlock key. - -To unlock a swarm manager after it restarts, run the `docker swarm unlock` -command and provide the following key: - - SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 - -Please remember to store this key in a password manager, since without it you -will not be able to restart the manager. -``` - -The `-q` (or `--quiet`) flag only prints the key: - -```bash -$ docker swarm unlock-key -q -SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 -``` - -### `--rotate` - -This flag rotates the unlock key, replacing it with a new randomly-generated -key. The old unlock key will no longer be accepted. - -### `--quiet` - -Only print the unlock key, without instructions. - -## Related commands - -* [swarm init](swarm_init.md) -* [swarm join](swarm_join.md) -* [swarm join-token](swarm_join_token.md) -* [swarm leave](swarm_leave.md) -* [swarm unlock](swarm_unlock.md) -* [swarm update](swarm_update.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md deleted file mode 100644 index 981eeba19..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "swarm update" -description: "The swarm update command description and usage" -keywords: "swarm, update" ---- - - - -# swarm update - -```markdown -Usage: docker swarm update [OPTIONS] - -Update the swarm - -Options: - --autolock Change manager autolocking setting (true|false) - --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) - --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) - --external-ca external-ca Specifications of one or more certificate signing endpoints - --help Print usage - --max-snapshots uint Number of additional Raft snapshots to retain - --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) - --task-history-limit int Task history retention limit (default 5) -``` - -## Description - -Updates a swarm with new parameter values. This command must target a manager node. - -## Examples - -```bash -$ docker swarm update --cert-expiry 720h -``` - -## Related commands - -* [swarm init](swarm_init.md) -* [swarm join](swarm_join.md) -* [swarm join-token](swarm_join_token.md) -* [swarm leave](swarm_leave.md) -* [swarm unlock](swarm_unlock.md) -* [swarm unlock-key](swarm_unlock_key.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/system.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/system.md deleted file mode 100644 index 2484a4a98..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/system.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "system" -description: "The system command description and usage" -keywords: "system" ---- - - - -# system - -```markdown -Usage: docker system COMMAND - -Manage Docker - -Options: - --help Print usage - -Commands: - df Show docker disk usage - events Get real time events from the server - info Display system-wide information - prune Remove unused data - -Run 'docker system COMMAND --help' for more information on a command. -``` - -## Description - -Manage Docker. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md deleted file mode 100644 index 86cc9896c..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "system df" -description: "The system df command description and usage" -keywords: "system, data, usage, disk" ---- - - - -# system df - -```markdown -Usage: docker system df [OPTIONS] - -Show docker filesystem usage - -Options: - --help Print usage - -v, --verbose Show detailed information on space usage -``` - -## Description - -The `docker system df` command displays information regarding the -amount of disk space used by the docker daemon. - -## Examples - -By default the command will just show a summary of the data used: - -```bash -$ docker system df - -TYPE TOTAL ACTIVE SIZE RECLAIMABLE -Images 5 2 16.43 MB 11.63 MB (70%) -Containers 2 0 212 B 212 B (100%) -Local Volumes 2 1 36 B 0 B (0%) -``` - -A more detailed view can be requested using the `-v, --verbose` flag: - -```bash -$ docker system df -v - -Images space usage: - -REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS -my-curl latest b2789dd875bf 6 minutes ago 11 MB 11 MB 5 B 0 -my-jq latest ae67841be6d0 6 minutes ago 9.623 MB 8.991 MB 632.1 kB 0 - a0971c4015c1 6 minutes ago 11 MB 11 MB 0 B 0 -alpine latest 4e38e38c8ce0 9 weeks ago 4.799 MB 0 B 4.799 MB 1 -alpine 3.3 47cf20d8c26c 9 weeks ago 4.797 MB 4.797 MB 0 B 1 - -Containers space usage: - -CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES -4a7f7eebae0f alpine:latest "sh" 1 0 B 16 minutes ago Exited (0) 5 minutes ago hopeful_yalow -f98f9c2aa1ea alpine:3.3 "sh" 1 212 B 16 minutes ago Exited (0) 48 seconds ago anon-vol - -Local Volumes space usage: - -NAME LINKS SIZE -07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e 2 36 B -my-named-vol 0 0 B -``` - -* `SHARED SIZE` is the amount of space that an image shares with another one (i.e. their common data) -* `UNIQUE SIZE` is the amount of space that is only used by a given image -* `SIZE` is the virtual size of the image, it is the sum of `SHARED SIZE` and `UNIQUE SIZE` - -> **Note**: Network information is not shown because it doesn't consume the disk -> space. - -## Performance - -The `system df` command can be very resource-intensive. It traverses the -filesystem of every image, container, and volume in the system. You should be -careful running this command in systems with lots of images, containers, or -volumes or in systems where some images, containers, or volumes have very large -filesystems with many files. You should also be careful not to run this command -in systems where performance is critical. - -## Related commands -* [system prune](system_prune.md) -* [container prune](container_prune.md) -* [volume prune](volume_prune.md) -* [image prune](image_prune.md) -* [network prune](network_prune.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md deleted file mode 100644 index 3e5ff413b..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: "system prune" -description: "Remove unused data" -keywords: "system, prune, delete, remove" ---- - - - -# system prune - -```markdown -Usage: docker system prune [OPTIONS] - -Delete unused data - -Options: - -a, --all Remove all unused images not just dangling ones - --filter filter Provide filter values (e.g. 'until=') - -f, --force Do not prompt for confirmation - --help Print usage -``` - -## Description - -Remove all unused containers, volumes, networks and images (both dangling and unreferenced). - -## Examples - -```bash -$ docker system prune -a - -WARNING! This will remove: - - all stopped containers - - all volumes not used by at least one container - - all networks not used by at least one container - - all images without at least one container associated to them -Are you sure you want to continue? [y/N] y -Deleted Containers: -0998aa37185a1a7036b0e12cf1ac1b6442dcfa30a5c9650a42ed5010046f195b -73958bfb884fa81fa4cc6baf61055667e940ea2357b4036acbbe25a60f442a4d - -Deleted Volumes: -named-vol - -Deleted Images: -untagged: my-curl:latest -deleted: sha256:7d88582121f2a29031d92017754d62a0d1a215c97e8f0106c586546e7404447d -deleted: sha256:dd14a93d83593d4024152f85d7c63f76aaa4e73e228377ba1d130ef5149f4d8b -untagged: alpine:3.3 -deleted: sha256:695f3d04125db3266d4ab7bbb3c6b23aa4293923e762aa2562c54f49a28f009f -untagged: alpine:latest -deleted: sha256:ee4603260daafe1a8c2f3b78fd760922918ab2441cbb2853ed5c439e59c52f96 -deleted: sha256:9007f5987db353ec398a223bc5a135c5a9601798ba20a1abba537ea2f8ac765f -deleted: sha256:71fa90c8f04769c9721459d5aa0936db640b92c8c91c9b589b54abd412d120ab -deleted: sha256:bb1c3357b3c30ece26e6604aea7d2ec0ace4166ff34c3616701279c22444c0f3 -untagged: my-jq:latest -deleted: sha256:6e66d724542af9bc4c4abf4a909791d7260b6d0110d8e220708b09e4ee1322e1 -deleted: sha256:07b3fa89d4b17009eb3988dfc592c7d30ab3ba52d2007832dffcf6d40e3eda7f -deleted: sha256:3a88a5c81eb5c283e72db2dbc6d65cbfd8e80b6c89bb6e714cfaaa0eed99c548 - -Total reclaimed space: 13.5 MB -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* until (``) - only remove containers, images, and networks created before given timestamp - -The `until` filter can be Unix timestamps, date formatted -timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed -relative to the daemon machine’s time. Supported formats for date -formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the daemon will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. - -## Related commands - -* [volume create](volume_create.md) -* [volume ls](volume_ls.md) -* [volume inspect](volume_inspect.md) -* [volume rm](volume_rm.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) -* [system df](system_df.md) -* [container prune](container_prune.md) -* [image prune](image_prune.md) -* [network prune](network_prune.md) -* [system prune](system_prune.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/tag.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/tag.md deleted file mode 100644 index 5f9defd8a..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/tag.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "tag" -description: "The tag command description and usage" -keywords: "tag, name, image" ---- - - - -# tag - -```markdown -Usage: docker tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] - -Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE - -Options: - --help Print usage -``` - -## Description - -An image name is made up of slash-separated name components, optionally prefixed -by a registry hostname. The hostname must comply with standard DNS rules, but -may not contain underscores. If a hostname is present, it may optionally be -followed by a port number in the format `:8080`. If not present, the command -uses Docker's public registry located at `registry-1.docker.io` by default. Name -components may contain lowercase letters, digits and separators. A separator -is defined as a period, one or two underscores, or one or more dashes. A name -component may not start or end with a separator. - -A tag name must be valid ASCII and may contain lowercase and uppercase letters, -digits, underscores, periods and dashes. A tag name may not start with a -period or a dash and may contain a maximum of 128 characters. - -You can group your images together using names and tags, and then upload them -to [*Share Images via Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). - -## Examples - -### Tag an image referenced by ID - -To tag a local image with ID "0e5574283393" into the "fedora" repository with -"version1.0": - -```bash -$ docker tag 0e5574283393 fedora/httpd:version1.0 -``` - -### Tag an image referenced by Name - -To tag a local image with name "httpd" into the "fedora" repository with -"version1.0": - -```bash -$ docker tag httpd fedora/httpd:version1.0 -``` - -Note that since the tag name is not specified, the alias is created for an -existing local version `httpd:latest`. - -### Tag an image referenced by Name and Tag - -To tag a local image with name "httpd" and tag "test" into the "fedora" -repository with "version1.0.test": - -```bash -$ docker tag httpd:test fedora/httpd:version1.0.test -``` - -### Tag an image for a private repository - -To push an image to a private registry and not the central Docker -registry you must tag it with the registry hostname and port (if needed). - -```bash -$ docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/top.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/top.md deleted file mode 100644 index 0a0482877..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/top.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "top" -description: "The top command description and usage" -keywords: "container, running, processes" ---- - - - -# top - -```markdown -Usage: docker top CONTAINER [ps OPTIONS] - -Display the running processes of a container - -Options: - --help Print usage -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md deleted file mode 100644 index 8915a43b4..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "unpause" -description: "The unpause command description and usage" -keywords: "cgroups, suspend, container" ---- - - - -# unpause - -```markdown -Usage: docker unpause CONTAINER [CONTAINER...] - -Unpause all processes within one or more containers - -Options: - --help Print usage -``` - -## Description - -The `docker unpause` command un-suspends all processes in the specified containers. -On Linux, it does this using the cgroups freezer. - -See the -[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) -for further details. - -## Examples - -```bash -$ docker unpause my_container -``` - -## Related commands - -* [pause](pause.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/update.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/update.md deleted file mode 100644 index 935dc9bf3..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/update.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: "update" -description: "The update command description and usage" -keywords: "resources, update, dynamically" ---- - - - -## update - -```markdown -Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] - -Update configuration of one or more containers - -Options: - --blkio-weight uint16 Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0) - --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period - --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota - --cpu-rt-period int Limit the CPU real-time period in microseconds - --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds - -c, --cpu-shares int CPU shares (relative weight) - --cpus decimal Number of CPUs (default 0.000) - --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) - --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) - --help Print usage - --kernel-memory string Kernel memory limit - -m, --memory string Memory limit - --memory-reservation string Memory soft limit - --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap - --restart string Restart policy to apply when a container exits -``` - -## Description - -The `docker update` command dynamically updates container configuration. -You can use this command to prevent containers from consuming too many -resources from their Docker host. With a single command, you can place -limits on a single container or on many. To specify more than one container, -provide space-separated list of container names or IDs. - -With the exception of the `--kernel-memory` option, you can specify these -options on a running or a stopped container. On kernel version older than -4.6, you can only update `--kernel-memory` on a stopped container or on -a running container with kernel memory initialized. - -## Examples - -The following sections illustrate ways to use this command. - -### Update a container's cpu-shares - -To limit a container's cpu-shares to 512, first identify the container -name or ID. You can use `docker ps` to find these values. You can also -use the ID returned from the `docker run` command. Then, do the following: - -```bash -$ docker update --cpu-shares 512 abebf7571666 -``` - -### Update a container with cpu-shares and memory - -To update multiple resource configurations for multiple containers: - -```bash -$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse -``` - -### Update a container's kernel memory constraints - -You can update a container's kernel memory limit using the `--kernel-memory` -option. On kernel version older than 4.6, this option can be updated on a -running container only if the container was started with `--kernel-memory`. -If the container was started *without* `--kernel-memory` you need to stop -the container before updating kernel memory. - -For example, if you started a container with this command: - -```bash -$ docker run -dit --name test --kernel-memory 50M ubuntu bash -``` - -You can update kernel memory while the container is running: - -```bash -$ docker update --kernel-memory 80M test -``` - -If you started a container *without* kernel memory initialized: - -```bash -$ docker run -dit --name test2 --memory 300M ubuntu bash -``` - -Update kernel memory of running container `test2` will fail. You need to stop -the container before updating the `--kernel-memory` setting. The next time you -start it, the container uses the new value. - -Kernel version newer than (include) 4.6 does not have this limitation, you -can use `--kernel-memory` the same way as other options. - -### Update a container's restart policy - -You can change a container's restart policy on a running container. The new -restart policy takes effect instantly after you run `docker update` on a -container. - -To update restart policy for one or more containers: - -```bash -$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse -``` - -Note that if the container is started with "--rm" flag, you cannot update the restart -policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the -container. diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/version.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/version.md deleted file mode 100644 index b15d13b97..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/version.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: "version" -description: "The version command description and usage" -keywords: "version, architecture, api" ---- - - - -# version - -```markdown -Usage: docker version [OPTIONS] - -Show the Docker version information - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -## Description - -By default, this will render all version information in an easy to read -layout. If a format is specified, the given template will be executed instead. - -Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -## Examples - -### Default output - -```bash -$ docker version - -Client: -Version: 1.8.0 -API version: 1.20 -Go version: go1.4.2 -Git commit: f5bae0a -Built: Tue Jun 23 17:56:00 UTC 2015 -OS/Arch: linux/amd64 - -Server: -Version: 1.8.0 -API version: 1.20 -Go version: go1.4.2 -Git commit: f5bae0a -Built: Tue Jun 23 17:56:00 UTC 2015 -OS/Arch: linux/amd64 -``` - -### Get the server version - -```bash -$ docker version --format '{{.Server.Version}}' - -1.8.0 -``` - -### Dump raw JSON data - -```bash -$ docker version --format '{{json .}}' - -{"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume.md deleted file mode 100644 index d5dd9c592..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "volume" -description: "The volume command description and usage" -keywords: "volume" ---- - - - -# volume - -```markdown -Usage: docker volume COMMAND - -Manage volumes - -Options: - --help Print usage - -Commands: - create Create a volume - inspect Display detailed information on one or more volumes - ls List volumes - prune Remove all unused volumes - rm Remove one or more volumes - -Run 'docker volume COMMAND --help' for more information on a command. -``` - -## Description - -Manage volumes. You can use subcommands to create, inspect, list, remove, or -prune volumes. - -## Related commands - -* [volume create](volume_create.md) -* [volume inspect](volume_inspect.md) -* [volume list](volume_list.md) -* [volume rm](volume_rm.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md deleted file mode 100644 index b1eed37b5..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: "volume create" -description: "The volume create command description and usage" -keywords: "volume, create" ---- - - - -# volume create - -```markdown -Usage: docker volume create [OPTIONS] [VOLUME] - -Create a volume - -Options: - -d, --driver string Specify volume driver name (default "local") - --help Print usage - --label value Set metadata for a volume (default []) - -o, --opt value Set driver specific options (default map[]) -``` - -## Description - -Creates a new volume that containers can consume and store data in. If a name is -not specified, Docker generates a random name. - -## Examples - -Create a volume and then configure the container to use it: - -```bash -$ docker volume create hello - -hello - -$ docker run -d -v hello:/world busybox ls /world -``` - -The mount is created inside the container's `/world` directory. Docker does not -support relative paths for mount points inside the container. - -Multiple containers can use the same volume in the same time period. This is -useful if two containers need access to shared data. For example, if one -container writes and the other reads the data. - -Volume names must be unique among drivers. This means you cannot use the same -volume name with two different drivers. If you attempt this `docker` returns an -error: - -```none -A volume named "hello" already exists with the "some-other" driver. Choose a different volume name. -``` - -If you specify a volume name already in use on the current driver, Docker -assumes you want to re-use the existing volume and does not return an error. - -### Driver-specific options - -Some volume drivers may take options to customize the volume creation. Use the -`-o` or `--opt` flags to pass driver options: - -```bash -$ docker volume create --driver fake \ - --opt tardis=blue \ - --opt timey=wimey \ - foo -``` - -These options are passed directly to the volume driver. Options for -different volume drivers may do different things (or nothing at all). - -The built-in `local` driver on Windows does not support any options. - -The built-in `local` driver on Linux accepts options similar to the linux -`mount` command. You can provide multiple options by passing the `--opt` flag -multiple times. Some `mount` options (such as the `o` option) can take a -comma-separated list of options. Complete list of available mount options can be -found [here](http://man7.org/linux/man-pages/man8/mount.8.html). - -For example, the following creates a `tmpfs` volume called `foo` with a size of -100 megabyte and `uid` of 1000. - -```bash -$ docker volume create --driver local \ - --opt type=tmpfs \ - --opt device=tmpfs \ - --opt o=size=100m,uid=1000 \ - foo -``` - -Another example that uses `btrfs`: - -```bash -$ docker volume create --driver local \ - --opt type=btrfs \ - --opt device=/dev/sda2 \ - foo -``` - -Another example that uses `nfs` to mount the `/path/to/dir` in `rw` mode from -`192.168.1.1`: - -```bash -$ docker volume create --driver local \ - --opt type=nfs \ - --opt o=addr=192.168.1.1,rw \ - --opt device=:/path/to/dir \ - foo -``` - -## Related commands - -* [volume inspect](volume_inspect.md) -* [volume ls](volume_ls.md) -* [volume rm](volume_rm.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md deleted file mode 100644 index bbdc6bd3e..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "volume inspect" -description: "The volume inspect command description and usage" -keywords: "volume, inspect" ---- - - - -# volume inspect - -```markdown -Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] - -Display detailed information on one or more volumes - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -## Description - -Returns information about a volume. By default, this command renders all results -in a JSON array. You can specify an alternate format to execute a -given template for each result. Go's -[text/template](http://golang.org/pkg/text/template/) package describes all the -details of the format. - -## Examples - -```bash -$ docker volume create -85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d -$ docker volume inspect 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d -[ - { - "Name": "85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data", - "Status": null - } -] - -$ docker volume inspect --format '{{ .Mountpoint }}' 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d -/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data -``` - -## Related commands - -* [volume create](volume_create.md) -* [volume ls](volume_ls.md) -* [volume rm](volume_rm.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md deleted file mode 100644 index 713922d60..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: "volume ls" -description: "The volume ls command description and usage" -keywords: "volume, list" ---- - - - -# volume ls - -```markdown -Usage: docker volume ls [OPTIONS] - -List volumes - -Aliases: - ls, list - -Options: - -f, --filter value Provide filter values (e.g. 'dangling=true') (default []) - - dangling= a volume if referenced or not - - driver= a volume's driver name - - label= or label== - - name= a volume's name - --format string Pretty-print volumes using a Go template - --help Print usage - -q, --quiet Only display volume names -``` - -## Description - -List all the volumes known to Docker. You can filter using the `-f` or -`--filter` flag. Refer to the [filtering](#filtering) section for more -information about available filter options. - -## Examples - -### Create a volume -```bash -$ docker volume create rosemary - -rosemary - -$ docker volume create tyler - -tyler - -$ docker volume ls - -DRIVER VOLUME NAME -local rosemary -local tyler -``` - -### Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* dangling (boolean - true or false, 0 or 1) -* driver (a volume driver's name) -* label (`label=` or `label==`) -* name (a volume's name) - -#### dangling - -The `dangling` filter matches on all volumes not referenced by any containers - -```bash -$ docker run -d -v tyler:/tmpwork busybox - -f86a7dd02898067079c99ceacd810149060a70528eff3754d0b0f1a93bd0af18 -$ docker volume ls -f dangling=true -DRIVER VOLUME NAME -local rosemary -``` - -#### driver - -The `driver` filter matches volumes based on their driver. - -The following example matches volumes that are created with the `local` driver: - -```bash -$ docker volume ls -f driver=local - -DRIVER VOLUME NAME -local rosemary -local tyler -``` - -#### label - -The `label` filter matches volumes based on the presence of a `label` alone or -a `label` and a value. - -First, let's create some volumes to illustrate this; - -```bash -$ docker volume create the-doctor --label is-timelord=yes - -the-doctor -$ docker volume create daleks --label is-timelord=no - -daleks -``` - -The following example filter matches volumes with the `is-timelord` label -regardless of its value. - -```bash -$ docker volume ls --filter label=is-timelord - -DRIVER VOLUME NAME -local daleks -local the-doctor -``` - -As the above example demonstrates, both volumes with `is-timelord=yes`, and -`is-timelord=no` are returned. - -Filtering on both `key` *and* `value` of the label, produces the expected result: - -```bash -$ docker volume ls --filter label=is-timelord=yes - -DRIVER VOLUME NAME -local the-doctor -``` - -Specifying multiple label filter produces an "and" search; all conditions -should be met; - -```bash -$ docker volume ls --filter label=is-timelord=yes --filter label=is-timelord=no - -DRIVER VOLUME NAME -``` - -#### name - -The `name` filter matches on all or part of a volume's name. - -The following filter matches all volumes with a name containing the `rose` string. - -```bash -$ docker volume ls -f name=rose - -DRIVER VOLUME NAME -local rosemary -``` - -### Formatting - -The formatting options (`--format`) pretty-prints volumes output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description ---------------|------------------------------------------------------------------------------------------ -`.Name` | Network name -`.Driver` | Network driver -`.Scope` | Network scope (local, global) -`.Mountpoint` | Whether the network is internal or not. -`.Labels` | All labels assigned to the volume. -`.Label` | Value of a specific label for this volume. For example `{{.Label "project.version"}}` - -When using the `--format` option, the `volume ls` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`Name` and `Driver` entries separated by a colon for all volumes: - -```bash -$ docker volume ls --format "{{.Name}}: {{.Driver}}" - -vol1: local -vol2: local -vol3: local -``` - -## Related commands - -* [volume create](volume_create.md) -* [volume inspect](volume_inspect.md) -* [volume rm](volume_rm.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md deleted file mode 100644 index a9c4b70fc..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: "volume prune" -description: "Remove unused volumes" -keywords: "volume, prune, delete" ---- - - - -# volume prune - -```markdown -Usage: docker volume prune [OPTIONS] - -Remove all unused volumes - -Options: - -f, --force Do not prompt for confirmation - --help Print usage -``` - -## Description - -Remove all unused volumes. Unused volumes are those which are not referenced by any containers - -## Examples - -```bash -$ docker volume prune - -WARNING! This will remove all volumes not used by at least one container. -Are you sure you want to continue? [y/N] y -Deleted Volumes: -07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e -my-named-vol - -Total reclaimed space: 36 B -``` - -## Related commands - -* [volume create](volume_create.md) -* [volume ls](volume_ls.md) -* [volume inspect](volume_inspect.md) -* [volume rm](volume_rm.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) -* [system df](system_df.md) -* [container prune](container_prune.md) -* [image prune](image_prune.md) -* [network prune](network_prune.md) -* [system prune](system_prune.md) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md deleted file mode 100644 index a1f2d3b9f..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "volume rm" -description: "the volume rm command description and usage" -keywords: "volume, rm" ---- - - - -# volume rm - -```markdown -Usage: docker volume rm [OPTIONS] VOLUME [VOLUME...] - -Remove one or more volumes - -Aliases: - rm, remove - -Options: - -f, --force Force the removal of one or more volumes - --help Print usage -``` - -## Description - -Remove one or more volumes. You cannot remove a volume that is in use by a container. - -## Examples - -```bash - $ docker volume rm hello - hello -``` - -## Related commands - -* [volume create](volume_create.md) -* [volume inspect](volume_inspect.md) -* [volume ls](volume_ls.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/commandline/wait.md b/fn/vendor/github.com/docker/docker/docs/reference/commandline/wait.md deleted file mode 100644 index ee8f9ab24..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/commandline/wait.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "wait" -description: "The wait command description and usage" -keywords: "container, stop, wait" ---- - - - -# wait - -```markdown -Usage: docker wait CONTAINER [CONTAINER...] - -Block until one or more containers stop, then print their exit codes - -Options: - --help Print usage -``` - -> **Note**: `docker wait` returns `0` when run against a container which had -> already exited before the `docker wait` command was run. - -## Examples - -Start a container in the background. - -```bash -$ docker run -dit --name=my_container ubuntu bash -``` - -Run `docker wait`, which should block until the container exits. - -```bash -$ docker wait my_container -``` - -In another terminal, stop the first container. The `docker wait` command above -returns the exit code. - -```bash -$ docker stop my_container -``` - -This is the same `docker wait` command from above, but it now exits, returning -`0`. - -```bash -$ docker wait my_container - -0 -``` diff --git a/fn/vendor/github.com/docker/docker/docs/reference/glossary.md b/fn/vendor/github.com/docker/docker/docs/reference/glossary.md deleted file mode 100644 index f829ad54c..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/glossary.md +++ /dev/null @@ -1,374 +0,0 @@ ---- -title: "Docker Glossary" -description: "Glossary of terms used around Docker" -keywords: "glossary, docker, terms, definitions" ---- - - - -# Glossary - -A list of terms used around the Docker project. - -## aufs - -aufs (advanced multi layered unification filesystem) is a Linux [filesystem](#filesystem) that -Docker supports as a storage backend. It implements the -[union mount](http://en.wikipedia.org/wiki/Union_mount) for Linux file systems. - -## base image - -An image that has no parent is a **base image**. - -## boot2docker - -[boot2docker](http://boot2docker.io/) is a lightweight Linux distribution made -specifically to run Docker containers. The boot2docker management tool for Mac and Windows was deprecated and replaced by [`docker-machine`](#machine) which you can install with the Docker Toolbox. - -## bridge - -In terms of generic networking, a bridge is a Link Layer device which forwards -traffic between network segments. A bridge can be a hardware device or a -software device running within a host machine's kernel. - -In terms of Docker, a bridge network uses a software bridge which allows -containers connected to the same bridge network to communicate, while providing -isolation from containers which are not connected to that bridge network. -The Docker bridge driver automatically installs rules in the host machine so -that containers on different bridge networks cannot communicate directly with -each other. - -The default bridge network, which is also named `bridge`, behaves differently -from user-defined bridge networks. Containers connected to the default `bridge` -network can communicate with each other across the bridge by IP address but -cannot resolve each other's container name to an IP address unless they are -explicitly linked using the `--link` flag to `docker run`. - -For more information about Docker networking, see -[Understand container communication](https://docs.docker.com/engine/userguide/networking/default_network/container-communication/). - -## btrfs - -btrfs (B-tree file system) is a Linux [filesystem](#filesystem) that Docker -supports as a storage backend. It is a [copy-on-write](http://en.wikipedia.org/wiki/Copy-on-write) -filesystem. - -## build - -build is the process of building Docker images using a [Dockerfile](#dockerfile). -The build uses a Dockerfile and a "context". The context is the set of files in the -directory in which the image is built. - -## cgroups - -cgroups is a Linux kernel feature that limits, accounts for, and isolates -the resource usage (CPU, memory, disk I/O, network, etc.) of a collection -of processes. Docker relies on cgroups to control and isolate resource limits. - -*Also known as : control groups* - -## Compose - -[Compose](https://github.com/docker/compose) is a tool for defining and -running complex applications with Docker. With compose, you define a -multi-container application in a single file, then spin your -application up in a single command which does everything that needs to -be done to get it running. - -*Also known as : docker-compose, fig* - -## copy-on-write - -Docker uses a -[copy-on-write](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/#/the-copy-on-write-strategy) -technique and a [union file system](#union-file-system) for both images and -containers to optimize resources and speed performance. Multiple copies of an -entity share the same instance and each one makes only specific changes to its -unique layer. - -Multiple containers can share access to the same image, and make -container-specific changes on a writable layer which is deleted when -the container is removed. This speeds up container start times and performance. - -Images are essentially layers of filesystems typically predicated on a base -image under a writable layer, and built up with layers of differences from the -base image. This minimizes the footprint of the image and enables shared -development. - -For more about copy-on-write in the context of Docker, see [Understand images, -containers, and storage -drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/). - -## container - -A container is a runtime instance of a [docker image](#image). - -A Docker container consists of - -- A Docker image -- Execution environment -- A standard set of instructions - -The concept is borrowed from Shipping Containers, which define a standard to ship -goods globally. Docker defines a standard to ship software. - -## data volume - -A data volume is a specially-designated directory within one or more containers -that bypasses the Union File System. Data volumes are designed to persist data, -independent of the container's life cycle. Docker therefore never automatically -delete volumes when you remove a container, nor will it "garbage collect" -volumes that are no longer referenced by a container. - - -## Docker - -The term Docker can refer to - -- The Docker project as a whole, which is a platform for developers and sysadmins to -develop, ship, and run applications -- The docker daemon process running on the host which manages images and containers - - -## Docker for Mac - -[Docker for Mac](https://docs.docker.com/docker-for-mac/) is an easy-to-install, -lightweight Docker development environment designed specifically for the Mac. A -native Mac application, Docker for Mac uses the macOS Hypervisor framework, -networking, and filesystem. It's the best solution if you want to build, debug, -test, package, and ship Dockerized applications on a Mac. Docker for Mac -supersedes [Docker Toolbox](#toolbox) as state-of-the-art Docker on macOS. - - -## Docker for Windows - -[Docker for Windows](https://docs.docker.com/docker-for-windows/) is an -easy-to-install, lightweight Docker development environment designed -specifically for Windows 10 systems that support Microsoft Hyper-V -(Professional, Enterprise and Education). Docker for Windows uses Hyper-V for -virtualization, and runs as a native Windows app. It works with Windows Server -2016, and gives you the ability to set up and run Windows containers as well as -the standard Linux containers, with an option to switch between the two. Docker -for Windows is the best solution if you want to build, debug, test, package, and -ship Dockerized applications from Windows machines. Docker for Windows -supersedes [Docker Toolbox](#toolbox) as state-of-the-art Docker on Windows. - -## Docker Hub - -The [Docker Hub](https://hub.docker.com/) is a centralized resource for working with -Docker and its components. It provides the following services: - -- Docker image hosting -- User authentication -- Automated image builds and work-flow tools such as build triggers and web hooks -- Integration with GitHub and Bitbucket - - -## Dockerfile - -A Dockerfile is a text document that contains all the commands you would -normally execute manually in order to build a Docker image. Docker can -build images automatically by reading the instructions from a Dockerfile. - -## filesystem - -A file system is the method an operating system uses to name files -and assign them locations for efficient storage and retrieval. - -Examples : - -- Linux : ext4, aufs, btrfs, zfs -- Windows : NTFS -- macOS : HFS+ - -## image - -Docker images are the basis of [containers](#container). An Image is an -ordered collection of root filesystem changes and the corresponding -execution parameters for use within a container runtime. An image typically -contains a union of layered filesystems stacked on top of each other. An image -does not have state and it never changes. - -## libcontainer - -libcontainer provides a native Go implementation for creating containers with -namespaces, cgroups, capabilities, and filesystem access controls. It allows -you to manage the lifecycle of the container performing additional operations -after the container is created. - -## libnetwork - -libnetwork provides a native Go implementation for creating and managing container -network namespaces and other network resources. It manage the networking lifecycle -of the container performing additional operations after the container is created. - -## link - -links provide a legacy interface to connect Docker containers running on the -same host to each other without exposing the hosts' network ports. Use the -Docker networks feature instead. - -## Machine - -[Machine](https://github.com/docker/machine) is a Docker tool which -makes it really easy to create Docker hosts on your computer, on -cloud providers and inside your own data center. It creates servers, -installs Docker on them, then configures the Docker client to talk to them. - -*Also known as : docker-machine* - -## node - -A [node](https://docs.docker.com/engine/swarm/how-swarm-mode-works/nodes/) is a physical or virtual -machine running an instance of the Docker Engine in swarm mode. - -**Manager nodes** perform swarm management and orchestration duties. By default -manager nodes are also worker nodes. - -**Worker nodes** execute tasks. - -## overlay network driver - -Overlay network driver provides out of the box multi-host network connectivity -for docker containers in a cluster. - -## overlay storage driver - -OverlayFS is a [filesystem](#filesystem) service for Linux which implements a -[union mount](http://en.wikipedia.org/wiki/Union_mount) for other file systems. -It is supported by the Docker daemon as a storage driver. - -## registry - -A Registry is a hosted service containing [repositories](#repository) of [images](#image) -which responds to the Registry API. - -The default registry can be accessed using a browser at [Docker Hub](#docker-hub) -or using the `docker search` command. - -## repository - -A repository is a set of Docker images. A repository can be shared by pushing it -to a [registry](#registry) server. The different images in the repository can be -labeled using [tags](#tag). - -Here is an example of the shared [nginx repository](https://hub.docker.com/_/nginx/) -and its [tags](https://hub.docker.com/r/library/nginx/tags/) - - -## service - -A [service](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/) is the definition of how -you want to run your application containers in a swarm. At the most basic level -a service defines which container image to run in the swarm and which commands -to run in the container. For orchestration purposes, the service defines the -"desired state", meaning how many containers to run as tasks and constraints for -deploying the containers. - -Frequently a service is a microservice within the context of some larger -application. Examples of services might include an HTTP server, a database, or -any other type of executable program that you wish to run in a distributed -environment. - -## service discovery - -Swarm mode [service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery) is a DNS component -internal to the swarm that automatically assigns each service on an overlay -network in the swarm a VIP and DNS entry. Containers on the network share DNS -mappings for the service via gossip so any container on the network can access -the service via its service name. - -You don’t need to expose service-specific ports to make the service available to -other services on the same overlay network. The swarm’s internal load balancer -automatically distributes requests to the service VIP among the active tasks. - -## swarm - -A [swarm](https://docs.docker.com/engine/swarm/) is a cluster of one or more Docker Engines running in [swarm mode](#swarm-mode). - -## Docker Swarm - -Do not confuse [Docker Swarm](https://github.com/docker/swarm) with the [swarm mode](#swarm-mode) features in Docker Engine. - -Docker Swarm is the name of a standalone native clustering tool for Docker. -Docker Swarm pools together several Docker hosts and exposes them as a single -virtual Docker host. It serves the standard Docker API, so any tool that already -works with Docker can now transparently scale up to multiple hosts. - -*Also known as : docker-swarm* - -## swarm mode - -[Swarm mode](https://docs.docker.com/engine/swarm/) refers to cluster management and orchestration -features embedded in Docker Engine. When you initialize a new swarm (cluster) or -join nodes to a swarm, the Docker Engine runs in swarm mode. - -## tag - -A tag is a label applied to a Docker image in a [repository](#repository). -tags are how various images in a repository are distinguished from each other. - -*Note : This label is not related to the key=value labels set for docker daemon* - -## task - -A [task](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/#/tasks-and-scheduling) is the -atomic unit of scheduling within a swarm. A task carries a Docker container and -the commands to run inside the container. Manager nodes assign tasks to worker -nodes according to the number of replicas set in the service scale. - -The diagram below illustrates the relationship of services to tasks and -containers. - -![services diagram](https://docs.docker.com/engine/swarm/images/services-diagram.png) - -## Toolbox - -[Docker Toolbox](https://docs.docker.com/toolbox/overview/) is a legacy -installer for Mac and Windows users. It uses Oracle VirtualBox for -virtualization. - -For Macs running OS X El Capitan 10.11 and newer macOS releases, [Docker for -Mac](https://docs.docker.com/docker-for-mac/) is the better solution. - -For Windows 10 systems that support Microsoft Hyper-V (Professional, Enterprise -and Education), [Docker for -Windows](https://docs.docker.com/docker-for-windows/) is the better solution. - -## Union file system - -Union file systems implement a [union -mount](https://en.wikipedia.org/wiki/Union_mount) and operate by creating -layers. Docker uses union file systems in conjunction with -[copy-on-write](#copy-on-write) techniques to provide the building blocks for -containers, making them very lightweight and fast. - -For more on Docker and union file systems, see [Docker and AUFS in -practice](https://docs.docker.com/engine/userguide/storagedriver/aufs-driver/), -[Docker and Btrfs in -practice](https://docs.docker.com/engine/userguide/storagedriver/btrfs-driver/), -and [Docker and OverlayFS in -practice](https://docs.docker.com/engine/userguide/storagedriver/overlayfs-driver/) - -Example implementations of union file systems are -[UnionFS](https://en.wikipedia.org/wiki/UnionFS), -[AUFS](https://en.wikipedia.org/wiki/Aufs), and -[Btrfs](https://btrfs.wiki.kernel.org/index.php/Main_Page). - -## virtual machine - -A virtual machine is a program that emulates a complete computer and imitates dedicated hardware. -It shares physical hardware resources with other users but isolates the operating system. The -end user has the same experience on a Virtual Machine as they would have on dedicated hardware. - -Compared to containers, a virtual machine is heavier to run, provides more isolation, -gets its own set of resources and does minimal sharing. - -*Also known as : VM* diff --git a/fn/vendor/github.com/docker/docker/docs/reference/index.md b/fn/vendor/github.com/docker/docker/docs/reference/index.md deleted file mode 100644 index f24c342df..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Engine reference" -description: "Docker Engine reference" -keywords: "Engine" ---- - - - -# Engine reference - -* [Dockerfile reference](builder.md) -* [Docker run reference](run.md) -* [Command line reference](commandline/index.md) -* [API Reference](https://docs.docker.com/engine/api/) diff --git a/fn/vendor/github.com/docker/docker/docs/reference/run.md b/fn/vendor/github.com/docker/docker/docs/reference/run.md deleted file mode 100644 index f5deddfec..000000000 --- a/fn/vendor/github.com/docker/docker/docs/reference/run.md +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: "Docker run reference" -description: "Configure containers at runtime" -keywords: "docker, run, configure, runtime" ---- - - - -# Docker run reference - -Docker runs processes in isolated containers. A container is a process -which runs on a host. The host may be local or remote. When an operator -executes `docker run`, the container process that runs is isolated in -that it has its own file system, its own networking, and its own -isolated process tree separate from the host. - -This page details how to use the `docker run` command to define the -container's resources at runtime. - -## General form - -The basic `docker run` command takes this form: - - $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] - -The `docker run` command must specify an [*IMAGE*](glossary.md#image) -to derive the container from. An image developer can define image -defaults related to: - - * detached or foreground running - * container identification - * network settings - * runtime constraints on CPU and memory - -With the `docker run [OPTIONS]` an operator can add to or override the -image defaults set by a developer. And, additionally, operators can -override nearly all the defaults set by the Docker runtime itself. The -operator's ability to override image and Docker runtime defaults is why -[*run*](commandline/run.md) has more options than any -other `docker` command. - -To learn how to interpret the types of `[OPTIONS]`, see [*Option -types*](commandline/cli.md#option-types). - -> **Note**: Depending on your Docker system configuration, you may be -> required to preface the `docker run` command with `sudo`. To avoid -> having to use `sudo` with the `docker` command, your system -> administrator can create a Unix group called `docker` and add users to -> it. For more information about this configuration, refer to the Docker -> installation documentation for your operating system. - - -## Operator exclusive options - -Only the operator (the person executing `docker run`) can set the -following options. - - - [Detached vs foreground](#detached-vs-foreground) - - [Detached (-d)](#detached--d) - - [Foreground](#foreground) - - [Container identification](#container-identification) - - [Name (--name)](#name---name) - - [PID equivalent](#pid-equivalent) - - [IPC settings (--ipc)](#ipc-settings---ipc) - - [Network settings](#network-settings) - - [Restart policies (--restart)](#restart-policies---restart) - - [Clean up (--rm)](#clean-up---rm) - - [Runtime constraints on resources](#runtime-constraints-on-resources) - - [Runtime privilege and Linux capabilities](#runtime-privilege-and-linux-capabilities) - -## Detached vs foreground - -When starting a Docker container, you must first decide if you want to -run the container in the background in a "detached" mode or in the -default foreground mode: - - -d=false: Detached mode: Run container in the background, print new container id - -### Detached (-d) - -To start a container in detached mode, you use `-d=true` or just `-d` option. By -design, containers started in detached mode exit when the root process used to -run the container exits. A container in detached mode cannot be automatically -removed when it stops, this means you cannot use the `--rm` option with `-d` option. - -Do not pass a `service x start` command to a detached container. For example, this -command attempts to start the `nginx` service. - - $ docker run -d -p 80:80 my_image service nginx start - -This succeeds in starting the `nginx` service inside the container. However, it -fails the detached container paradigm in that, the root process (`service nginx -start`) returns and the detached container stops as designed. As a result, the -`nginx` service is started but could not be used. Instead, to start a process -such as the `nginx` web server do the following: - - $ docker run -d -p 80:80 my_image nginx -g 'daemon off;' - -To do input/output with a detached container use network connections or shared -volumes. These are required because the container is no longer listening to the -command line where `docker run` was run. - -To reattach to a detached container, use `docker` -[*attach*](commandline/attach.md) command. - -### Foreground - -In foreground mode (the default when `-d` is not specified), `docker -run` can start the process in the container and attach the console to -the process's standard input, output, and standard error. It can even -pretend to be a TTY (this is what most command line executables expect) -and pass along signals. All of that is configurable: - - -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR` - -t : Allocate a pseudo-tty - --sig-proxy=true: Proxy all received signals to the process (non-TTY mode only) - -i : Keep STDIN open even if not attached - -If you do not specify `-a` then Docker will [attach to both stdout and stderr -]( https://github.com/docker/docker/blob/4118e0c9eebda2412a09ae66e90c34b85fae3275/runconfig/opts/parse.go#L267). -You can specify to which of the three standard streams (`STDIN`, `STDOUT`, -`STDERR`) you'd like to connect instead, as in: - - $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash - -For interactive processes (like a shell), you must use `-i -t` together in -order to allocate a tty for the container process. `-i -t` is often written `-it` -as you'll see in later examples. Specifying `-t` is forbidden when the client -standard output is redirected or piped, such as in: - - $ echo test | docker run -i busybox cat - ->**Note**: A process running as PID 1 inside a container is treated ->specially by Linux: it ignores any signal with the default action. ->So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is ->coded to do so. - -## Container identification - -### Name (--name) - -The operator can identify a container in three ways: - -| Identifier type | Example value | -| --------------------- | ------------------------------------------------------------------ | -| UUID long identifier | "f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778" | -| UUID short identifier | "f78375b1c487" | -| Name | "evil_ptolemy" | - -The UUID identifiers come from the Docker daemon. If you do not assign a -container name with the `--name` option, then the daemon generates a random -string name for you. Defining a `name` can be a handy way to add meaning to a -container. If you specify a `name`, you can use it when referencing the -container within a Docker network. This works for both background and foreground -Docker containers. - -> **Note**: Containers on the default bridge network must be linked to -> communicate by name. - -### PID equivalent - -Finally, to help with automation, you can have Docker write the -container ID out to a file of your choosing. This is similar to how some -programs might write out their process ID to a file (you've seen them as -PID files): - - --cidfile="": Write the container ID to the file - -### Image[:tag] - -While not strictly a means of identifying a container, you can specify a version of an -image you'd like to run the container with by adding `image[:tag]` to the command. For -example, `docker run ubuntu:14.04`. - -### Image[@digest] - -Images using the v2 or later image format have a content-addressable identifier -called a digest. As long as the input used to generate the image is unchanged, -the digest value is predictable and referenceable. - -The following example runs a container from the `alpine` image with the -`sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0` digest: - - $ docker run alpine@sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0 date - -## PID settings (--pid) - - --pid="" : Set the PID (Process) Namespace mode for the container, - 'container:': joins another container's PID namespace - 'host': use the host's PID namespace inside the container - -By default, all containers have the PID namespace enabled. - -PID namespace provides separation of processes. The PID Namespace removes the -view of the system processes, and allows process ids to be reused including -pid 1. - -In certain cases you want your container to share the host's process namespace, -basically allowing processes within the container to see all of the processes -on the system. For example, you could build a container with debugging tools -like `strace` or `gdb`, but want to use these tools when debugging processes -within the container. - -### Example: run htop inside a container - -Create this Dockerfile: - -``` -FROM alpine:latest -RUN apk add --update htop && rm -rf /var/cache/apk/* -CMD ["htop"] -``` - -Build the Dockerfile and tag the image as `myhtop`: - -```bash -$ docker build -t myhtop . -``` - -Use the following command to run `htop` inside a container: - -``` -$ docker run -it --rm --pid=host myhtop -``` - -Joining another container's pid namespace can be used for debugging that container. - -### Example - -Start a container running a redis server: - -```bash -$ docker run --name my-redis -d redis -``` - -Debug the redis container by running another container that has strace in it: - -```bash -$ docker run -it --pid=container:my-redis my_strace_docker_image bash -$ strace -p 1 -``` - -## UTS settings (--uts) - - --uts="" : Set the UTS namespace mode for the container, - 'host': use the host's UTS namespace inside the container - -The UTS namespace is for setting the hostname and the domain that is visible -to running processes in that namespace. By default, all containers, including -those with `--network=host`, have their own UTS namespace. The `host` setting will -result in the container using the same UTS namespace as the host. Note that -`--hostname` is invalid in `host` UTS mode. - -You may wish to share the UTS namespace with the host if you would like the -hostname of the container to change as the hostname of the host changes. A -more advanced use case would be changing the host's hostname from a container. - -## IPC settings (--ipc) - - --ipc="" : Set the IPC mode for the container, - 'container:': reuses another container's IPC namespace - 'host': use the host's IPC namespace inside the container - -By default, all containers have the IPC namespace enabled. - -IPC (POSIX/SysV IPC) namespace provides separation of named shared memory -segments, semaphores and message queues. - -Shared memory segments are used to accelerate inter-process communication at -memory speed, rather than through pipes or through the network stack. Shared -memory is commonly used by databases and custom-built (typically C/OpenMPI, -C++/using boost libraries) high performance applications for scientific -computing and financial services industries. If these types of applications -are broken into multiple containers, you might need to share the IPC mechanisms -of the containers. - -## Network settings - - --dns=[] : Set custom dns servers for the container - --network="bridge" : Connect a container to a network - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack - '|': connect to a user-defined network - --network-alias=[] : Add network-scoped alias for the container - --add-host="" : Add a line to /etc/hosts (host:IP) - --mac-address="" : Sets the container's Ethernet device's MAC address - --ip="" : Sets the container's Ethernet device's IPv4 address - --ip6="" : Sets the container's Ethernet device's IPv6 address - --link-local-ip=[] : Sets one or more container's Ethernet device's link local IPv4/IPv6 addresses - -By default, all containers have networking enabled and they can make any -outgoing connections. The operator can completely disable networking -with `docker run --network none` which disables all incoming and outgoing -networking. In cases like this, you would perform I/O through files or -`STDIN` and `STDOUT` only. - -Publishing ports and linking to other containers only works with the default (bridge). The linking feature is a legacy feature. You should always prefer using Docker network drivers over linking. - -Your container will use the same DNS servers as the host by default, but -you can override this with `--dns`. - -By default, the MAC address is generated using the IP address allocated to the -container. You can set the container's MAC address explicitly by providing a -MAC address via the `--mac-address` parameter (format:`12:34:56:78:9a:bc`).Be -aware that Docker does not check if manually specified MAC addresses are unique. - -Supported networks : - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NetworkDescription
none - No networking in the container. -
bridge (default) - Connect the container to the bridge via veth interfaces. -
host - Use the host's network stack inside the container. -
container:<name|id> - Use the network stack of another container, specified via - its name or id. -
NETWORK - Connects the container to a user created network (using docker network create command) -
- -#### Network: none - -With the network is `none` a container will not have -access to any external routes. The container will still have a -`loopback` interface enabled in the container but it does not have any -routes to external traffic. - -#### Network: bridge - -With the network set to `bridge` a container will use docker's -default networking setup. A bridge is setup on the host, commonly named -`docker0`, and a pair of `veth` interfaces will be created for the -container. One side of the `veth` pair will remain on the host attached -to the bridge while the other side of the pair will be placed inside the -container's namespaces in addition to the `loopback` interface. An IP -address will be allocated for containers on the bridge's network and -traffic will be routed though this bridge to the container. - -Containers can communicate via their IP addresses by default. To communicate by -name, they must be linked. - -#### Network: host - -With the network set to `host` a container will share the host's -network stack and all interfaces from the host will be available to the -container. The container's hostname will match the hostname on the host -system. Note that `--mac-address` is invalid in `host` netmode. Even in `host` -network mode a container has its own UTS namespace by default. As such -`--hostname` is allowed in `host` network mode and will only change the -hostname inside the container. -Similar to `--hostname`, the `--add-host`, `--dns`, `--dns-search`, and -`--dns-option` options can be used in `host` network mode. These options update -`/etc/hosts` or `/etc/resolv.conf` inside the container. No change are made to -`/etc/hosts` and `/etc/resolv.conf` on the host. - -Compared to the default `bridge` mode, the `host` mode gives *significantly* -better networking performance since it uses the host's native networking stack -whereas the bridge has to go through one level of virtualization through the -docker daemon. It is recommended to run containers in this mode when their -networking performance is critical, for example, a production Load Balancer -or a High Performance Web Server. - -> **Note**: `--network="host"` gives the container full access to local system -> services such as D-bus and is therefore considered insecure. - -#### Network: container - -With the network set to `container` a container will share the -network stack of another container. The other container's name must be -provided in the format of `--network container:`. Note that `--add-host` -`--hostname` `--dns` `--dns-search` `--dns-option` and `--mac-address` are -invalid in `container` netmode, and `--publish` `--publish-all` `--expose` are -also invalid in `container` netmode. - -Example running a Redis container with Redis binding to `localhost` then -running the `redis-cli` command and connecting to the Redis server over the -`localhost` interface. - - $ docker run -d --name redis example/redis --bind 127.0.0.1 - $ # use the redis container's network stack to access localhost - $ docker run --rm -it --network container:redis example/redis-cli -h 127.0.0.1 - -#### User-defined network - -You can create a network using a Docker network driver or an external network -driver plugin. You can connect multiple containers to the same network. Once -connected to a user-defined network, the containers can communicate easily using -only another container's IP address or name. - -For `overlay` networks or custom plugins that support multi-host connectivity, -containers connected to the same multi-host network but launched from different -Engines can also communicate in this way. - -The following example creates a network using the built-in `bridge` network -driver and running a container in the created network - -``` -$ docker network create -d bridge my-net -$ docker run --network=my-net -itd --name=container3 busybox -``` - -### Managing /etc/hosts - -Your container will have lines in `/etc/hosts` which define the hostname of the -container itself as well as `localhost` and a few other common things. The -`--add-host` flag can be used to add additional lines to `/etc/hosts`. - - $ docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts - 172.17.0.22 09d03f76bf2c - fe00::0 ip6-localnet - ff00::0 ip6-mcastprefix - ff02::1 ip6-allnodes - ff02::2 ip6-allrouters - 127.0.0.1 localhost - ::1 localhost ip6-localhost ip6-loopback - 86.75.30.9 db-static - -If a container is connected to the default bridge network and `linked` -with other containers, then the container's `/etc/hosts` file is updated -with the linked container's name. - -> **Note** Since Docker may live update the container’s `/etc/hosts` file, there -may be situations when processes inside the container can end up reading an -empty or incomplete `/etc/hosts` file. In most cases, retrying the read again -should fix the problem. - -## Restart policies (--restart) - -Using the `--restart` flag on Docker run you can specify a restart policy for -how a container should or should not be restarted on exit. - -When a restart policy is active on a container, it will be shown as either `Up` -or `Restarting` in [`docker ps`](commandline/ps.md). It can also be -useful to use [`docker events`](commandline/events.md) to see the -restart policy in effect. - -Docker supports the following restart policies: - - - - - - - - - - - - - - - - - - - - - - - - - - -
PolicyResult
no - Do not automatically restart the container when it exits. This is the - default. -
- - on-failure[:max-retries] - - - Restart only if the container exits with a non-zero exit status. - Optionally, limit the number of restart retries the Docker - daemon attempts. -
always - Always restart the container regardless of the exit status. - When you specify always, the Docker daemon will try to restart - the container indefinitely. The container will also always start - on daemon startup, regardless of the current state of the container. -
unless-stopped - Always restart the container regardless of the exit status, but - do not start it on daemon startup if the container has been put - to a stopped state before. -
- -An ever increasing delay (double the previous delay, starting at 100 -milliseconds) is added before each restart to prevent flooding the server. -This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600, -and so on until either the `on-failure` limit is hit, or when you `docker stop` -or `docker rm -f` the container. - -If a container is successfully restarted (the container is started and runs -for at least 10 seconds), the delay is reset to its default value of 100 ms. - -You can specify the maximum amount of times Docker will try to restart the -container when using the **on-failure** policy. The default is that Docker -will try forever to restart the container. The number of (attempted) restarts -for a container can be obtained via [`docker inspect`](commandline/inspect.md). For example, to get the number of restarts -for container "my-container"; - - {% raw %} - $ docker inspect -f "{{ .RestartCount }}" my-container - # 2 - {% endraw %} - -Or, to get the last time the container was (re)started; - - {% raw %} - $ docker inspect -f "{{ .State.StartedAt }}" my-container - # 2015-03-04T23:47:07.691840179Z - {% endraw %} - - -Combining `--restart` (restart policy) with the `--rm` (clean up) flag results -in an error. On container restart, attached clients are disconnected. See the -examples on using the [`--rm` (clean up)](#clean-up-rm) flag later in this page. - -### Examples - - $ docker run --restart=always redis - -This will run the `redis` container with a restart policy of **always** -so that if the container exits, Docker will restart it. - - $ docker run --restart=on-failure:10 redis - -This will run the `redis` container with a restart policy of **on-failure** -and a maximum restart count of 10. If the `redis` container exits with a -non-zero exit status more than 10 times in a row Docker will abort trying to -restart the container. Providing a maximum restart limit is only valid for the -**on-failure** policy. - -## Exit Status - -The exit code from `docker run` gives information about why the container -failed to run or why it exited. When `docker run` exits with a non-zero code, -the exit codes follow the `chroot` standard, see below: - -**_125_** if the error is with Docker daemon **_itself_** - - $ docker run --foo busybox; echo $? - # flag provided but not defined: --foo - See 'docker run --help'. - 125 - -**_126_** if the **_contained command_** cannot be invoked - - $ docker run busybox /etc; echo $? - # docker: Error response from daemon: Container command '/etc' could not be invoked. - 126 - -**_127_** if the **_contained command_** cannot be found - - $ docker run busybox foo; echo $? - # docker: Error response from daemon: Container command 'foo' not found or does not exist. - 127 - -**_Exit code_** of **_contained command_** otherwise - - $ docker run busybox /bin/sh -c 'exit 3'; echo $? - # 3 - -## Clean up (--rm) - -By default a container's file system persists even after the container -exits. This makes debugging a lot easier (since you can inspect the -final state) and you retain all your data by default. But if you are -running short-term **foreground** processes, these container file -systems can really pile up. If instead you'd like Docker to -**automatically clean up the container and remove the file system when -the container exits**, you can add the `--rm` flag: - - --rm=false: Automatically remove the container when it exits (incompatible with -d) - -> **Note**: When you set the `--rm` flag, Docker also removes the volumes -associated with the container when the container is removed. This is similar -to running `docker rm -v my-container`. Only volumes that are specified without a -name are removed. For example, with -`docker run --rm -v /foo -v awesome:/bar busybox top`, the volume for `/foo` will be removed, -but the volume for `/bar` will not. Volumes inherited via `--volumes-from` will be removed -with the same logic -- if the original volume was specified with a name it will **not** be removed. - -## Security configuration - --security-opt="label=user:USER" : Set the label user for the container - --security-opt="label=role:ROLE" : Set the label role for the container - --security-opt="label=type:TYPE" : Set the label type for the container - --security-opt="label=level:LEVEL" : Set the label level for the container - --security-opt="label=disable" : Turn off label confinement for the container - --security-opt="apparmor=PROFILE" : Set the apparmor profile to be applied to the container - --security-opt="no-new-privileges:true|false" : Disable/enable container processes from gaining new privileges - --security-opt="seccomp=unconfined" : Turn off seccomp confinement for the container - --security-opt="seccomp=profile.json": White listed syscalls seccomp Json file to be used as a seccomp filter - - -You can override the default labeling scheme for each container by specifying -the `--security-opt` flag. Specifying the level in the following command -allows you to share the same content between containers. - - $ docker run --security-opt label=level:s0:c100,c200 -it fedora bash - -> **Note**: Automatic translation of MLS labels is not currently supported. - -To disable the security labeling for this container versus running with the -`--privileged` flag, use the following command: - - $ docker run --security-opt label=disable -it fedora bash - -If you want a tighter security policy on the processes within a container, -you can specify an alternate type for the container. You could run a container -that is only allowed to listen on Apache ports by executing the following -command: - - $ docker run --security-opt label=type:svirt_apache_t -it centos bash - -> **Note**: You would have to write policy defining a `svirt_apache_t` type. - -If you want to prevent your container processes from gaining additional -privileges, you can execute the following command: - - $ docker run --security-opt no-new-privileges -it centos bash - -This means that commands that raise privileges such as `su` or `sudo` will no longer work. -It also causes any seccomp filters to be applied later, after privileges have been dropped -which may mean you can have a more restrictive set of filters. -For more details, see the [kernel documentation](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt). - -## Specify an init process - -You can use the `--init` flag to indicate that an init process should be used as -the PID 1 in the container. Specifying an init process ensures the usual -responsibilities of an init system, such as reaping zombie processes, are -performed inside the created container. - -The default init process used is the first `docker-init` executable found in the -system path of the Docker daemon process. This `docker-init` binary, included in -the default installation, is backed by [tini](https://github.com/krallin/tini). - -## Specify custom cgroups - -Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a -container in. This allows you to create and manage cgroups on their own. You can -define custom resources for those cgroups and put containers under a common -parent group. - -## Runtime constraints on resources - -The operator can also adjust the performance parameters of the -container: - -| Option | Description | -| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | -| `-m`, `--memory=""` | Memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | -| `--memory-swap=""` | Total memory limit (memory + swap, format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | -| `--memory-reservation=""` | Memory soft limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | -| `--kernel-memory=""` | Kernel memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | -| `-c`, `--cpu-shares=0` | CPU shares (relative weight) | -| `--cpus=0.000` | Number of CPUs. Number is a fractional number. 0.000 means no limit. | -| `--cpu-period=0` | Limit the CPU CFS (Completely Fair Scheduler) period | -| `--cpuset-cpus=""` | CPUs in which to allow execution (0-3, 0,1) | -| `--cpuset-mems=""` | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. | -| `--cpu-quota=0` | Limit the CPU CFS (Completely Fair Scheduler) quota | -| `--cpu-rt-period=0` | Limit the CPU real-time period. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits. | -| `--cpu-rt-runtime=0` | Limit the CPU real-time runtime. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits. | -| `--blkio-weight=0` | Block IO weight (relative weight) accepts a weight value between 10 and 1000. | -| `--blkio-weight-device=""` | Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`) | -| `--device-read-bps=""` | Limit read rate from a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | -| `--device-write-bps=""` | Limit write rate to a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | -| `--device-read-iops="" ` | Limit read rate (IO per second) from a device (format: `:`). Number is a positive integer. | -| `--device-write-iops="" ` | Limit write rate (IO per second) to a device (format: `:`). Number is a positive integer. | -| `--oom-kill-disable=false` | Whether to disable OOM Killer for the container or not. | -| `--oom-score-adj=0` | Tune container's OOM preferences (-1000 to 1000) | -| `--memory-swappiness=""` | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. | -| `--shm-size=""` | Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. | - -### User memory constraints - -We have four ways to set user memory usage: - - - - - - - - - - - - - - - - - - - - - - - - - - -
OptionResult
- memory=inf, memory-swap=inf (default) - - There is no memory limit for the container. The container can use - as much memory as needed. -
memory=L<inf, memory-swap=inf - (specify memory and set memory-swap as -1) The container is - not allowed to use more than L bytes of memory, but can use as much swap - as is needed (if the host supports swap memory). -
memory=L<inf, memory-swap=2*L - (specify memory without memory-swap) The container is not allowed to - use more than L bytes of memory, swap plus memory usage is double - of that. -
- memory=L<inf, memory-swap=S<inf, L<=S - - (specify both memory and memory-swap) The container is not allowed to - use more than L bytes of memory, swap plus memory usage is limited - by S. -
- -Examples: - - $ docker run -it ubuntu:14.04 /bin/bash - -We set nothing about memory, this means the processes in the container can use -as much memory and swap memory as they need. - - $ docker run -it -m 300M --memory-swap -1 ubuntu:14.04 /bin/bash - -We set memory limit and disabled swap memory limit, this means the processes in -the container can use 300M memory and as much swap memory as they need (if the -host supports swap memory). - - $ docker run -it -m 300M ubuntu:14.04 /bin/bash - -We set memory limit only, this means the processes in the container can use -300M memory and 300M swap memory, by default, the total virtual memory size -(--memory-swap) will be set as double of memory, in this case, memory + swap -would be 2*300M, so processes can use 300M swap memory as well. - - $ docker run -it -m 300M --memory-swap 1G ubuntu:14.04 /bin/bash - -We set both memory and swap memory, so the processes in the container can use -300M memory and 700M swap memory. - -Memory reservation is a kind of memory soft limit that allows for greater -sharing of memory. Under normal circumstances, containers can use as much of -the memory as needed and are constrained only by the hard limits set with the -`-m`/`--memory` option. When memory reservation is set, Docker detects memory -contention or low memory and forces containers to restrict their consumption to -a reservation limit. - -Always set the memory reservation value below the hard limit, otherwise the hard -limit takes precedence. A reservation of 0 is the same as setting no -reservation. By default (without reservation set), memory reservation is the -same as the hard memory limit. - -Memory reservation is a soft-limit feature and does not guarantee the limit -won't be exceeded. Instead, the feature attempts to ensure that, when memory is -heavily contended for, memory is allocated based on the reservation hints/setup. - -The following example limits the memory (`-m`) to 500M and sets the memory -reservation to 200M. - -```bash -$ docker run -it -m 500M --memory-reservation 200M ubuntu:14.04 /bin/bash -``` - -Under this configuration, when the container consumes memory more than 200M and -less than 500M, the next system memory reclaim attempts to shrink container -memory below 200M. - -The following example set memory reservation to 1G without a hard memory limit. - -```bash -$ docker run -it --memory-reservation 1G ubuntu:14.04 /bin/bash -``` - -The container can use as much memory as it needs. The memory reservation setting -ensures the container doesn't consume too much memory for long time, because -every memory reclaim shrinks the container's consumption to the reservation. - -By default, kernel kills processes in a container if an out-of-memory (OOM) -error occurs. To change this behaviour, use the `--oom-kill-disable` option. -Only disable the OOM killer on containers where you have also set the -`-m/--memory` option. If the `-m` flag is not set, this can result in the host -running out of memory and require killing the host's system processes to free -memory. - -The following example limits the memory to 100M and disables the OOM killer for -this container: - - $ docker run -it -m 100M --oom-kill-disable ubuntu:14.04 /bin/bash - -The following example, illustrates a dangerous way to use the flag: - - $ docker run -it --oom-kill-disable ubuntu:14.04 /bin/bash - -The container has unlimited memory which can cause the host to run out memory -and require killing system processes to free memory. The `--oom-score-adj` -parameter can be changed to select the priority of which containers will -be killed when the system is out of memory, with negative scores making them -less likely to be killed, and positive scores more likely. - -### Kernel memory constraints - -Kernel memory is fundamentally different than user memory as kernel memory can't -be swapped out. The inability to swap makes it possible for the container to -block system services by consuming too much kernel memory. Kernel memory includes: - - - stack pages - - slab pages - - sockets memory pressure - - tcp memory pressure - -You can setup kernel memory limit to constrain these kinds of memory. For example, -every process consumes some stack pages. By limiting kernel memory, you can -prevent new processes from being created when the kernel memory usage is too high. - -Kernel memory is never completely independent of user memory. Instead, you limit -kernel memory in the context of the user memory limit. Assume "U" is the user memory -limit and "K" the kernel limit. There are three possible ways to set limits: - - - - - - - - - - - - - - - - - - - - - - -
OptionResult
U != 0, K = inf (default) - This is the standard memory limitation mechanism already present before using - kernel memory. Kernel memory is completely ignored. -
U != 0, K < U - Kernel memory is a subset of the user memory. This setup is useful in - deployments where the total amount of memory per-cgroup is overcommitted. - Overcommitting kernel memory limits is definitely not recommended, since the - box can still run out of non-reclaimable memory. - In this case, you can configure K so that the sum of all groups is - never greater than the total memory. Then, freely set U at the expense of - the system's service quality. -
U != 0, K > U - Since kernel memory charges are also fed to the user counter and reclamation - is triggered for the container for both kinds of memory. This configuration - gives the admin a unified view of memory. It is also useful for people - who just want to track kernel memory usage. -
- -Examples: - - $ docker run -it -m 500M --kernel-memory 50M ubuntu:14.04 /bin/bash - -We set memory and kernel memory, so the processes in the container can use -500M memory in total, in this 500M memory, it can be 50M kernel memory tops. - - $ docker run -it --kernel-memory 50M ubuntu:14.04 /bin/bash - -We set kernel memory without **-m**, so the processes in the container can -use as much memory as they want, but they can only use 50M kernel memory. - -### Swappiness constraint - -By default, a container's kernel can swap out a percentage of anonymous pages. -To set this percentage for a container, specify a `--memory-swappiness` value -between 0 and 100. A value of 0 turns off anonymous page swapping. A value of -100 sets all anonymous pages as swappable. By default, if you are not using -`--memory-swappiness`, memory swappiness value will be inherited from the parent. - -For example, you can set: - - $ docker run -it --memory-swappiness=0 ubuntu:14.04 /bin/bash - -Setting the `--memory-swappiness` option is helpful when you want to retain the -container's working set and to avoid swapping performance penalties. - -### CPU share constraint - -By default, all containers get the same proportion of CPU cycles. This proportion -can be modified by changing the container's CPU share weighting relative -to the weighting of all other running containers. - -To modify the proportion from the default of 1024, use the `-c` or `--cpu-shares` -flag to set the weighting to 2 or higher. If 0 is set, the system will ignore the -value and use the default of 1024. - -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. - -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If you add a fourth container with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. If you start one -container `{C0}` with `-c=512` running one process, and another container -`{C1}` with `-c=1024` running two processes, this can result in the following -division of CPU shares: - - PID container CPU CPU share - 100 {C0} 0 100% of CPU0 - 101 {C1} 1 100% of CPU1 - 102 {C1} 2 100% of CPU2 - -### CPU period constraint - -The default CPU CFS (Completely Fair Scheduler) period is 100ms. We can use -`--cpu-period` to set the period of CPUs to limit the container's CPU usage. -And usually `--cpu-period` should work with `--cpu-quota`. - -Examples: - - $ docker run -it --cpu-period=50000 --cpu-quota=25000 ubuntu:14.04 /bin/bash - -If there is 1 CPU, this means the container can get 50% CPU worth of run-time every 50ms. - -In addition to use `--cpu-period` and `--cpu-quota` for setting CPU period constraints, -it is possible to specify `--cpus` with a float number to achieve the same purpose. -For example, if there is 1 CPU, then `--cpus=0.5` will achieve the same result as -setting `--cpu-period=50000` and `--cpu-quota=25000` (50% CPU). - -The default value for `--cpus` is `0.000`, which means there is no limit. - -For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). - -### Cpuset constraint - -We can set cpus in which to allow execution for containers. - -Examples: - - $ docker run -it --cpuset-cpus="1,3" ubuntu:14.04 /bin/bash - -This means processes in container can be executed on cpu 1 and cpu 3. - - $ docker run -it --cpuset-cpus="0-2" ubuntu:14.04 /bin/bash - -This means processes in container can be executed on cpu 0, cpu 1 and cpu 2. - -We can set mems in which to allow execution for containers. Only effective -on NUMA systems. - -Examples: - - $ docker run -it --cpuset-mems="1,3" ubuntu:14.04 /bin/bash - -This example restricts the processes in the container to only use memory from -memory nodes 1 and 3. - - $ docker run -it --cpuset-mems="0-2" ubuntu:14.04 /bin/bash - -This example restricts the processes in the container to only use memory from -memory nodes 0, 1 and 2. - -### CPU quota constraint - -The `--cpu-quota` flag limits the container's CPU usage. The default 0 value -allows the container to take 100% of a CPU resource (1 CPU). The CFS (Completely Fair -Scheduler) handles resource allocation for executing processes and is default -Linux Scheduler used by the kernel. Set this value to 50000 to limit the container -to 50% of a CPU resource. For multiple CPUs, adjust the `--cpu-quota` as necessary. -For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). - -### Block IO bandwidth (Blkio) constraint - -By default, all containers get the same proportion of block IO bandwidth -(blkio). This proportion is 500. To modify this proportion, change the -container's blkio weight relative to the weighting of all other running -containers using the `--blkio-weight` flag. - -> **Note:** The blkio weight setting is only available for direct IO. Buffered IO -> is not currently supported. - -The `--blkio-weight` flag can set the weighting to a value between 10 to 1000. -For example, the commands below create two containers with different blkio -weight: - - $ docker run -it --name c1 --blkio-weight 300 ubuntu:14.04 /bin/bash - $ docker run -it --name c2 --blkio-weight 600 ubuntu:14.04 /bin/bash - -If you do block IO in the two containers at the same time, by, for example: - - $ time dd if=/mnt/zerofile of=test.out bs=1M count=1024 oflag=direct - -You'll find that the proportion of time is the same as the proportion of blkio -weights of the two containers. - -The `--blkio-weight-device="DEVICE_NAME:WEIGHT"` flag sets a specific device weight. -The `DEVICE_NAME:WEIGHT` is a string containing a colon-separated device name and weight. -For example, to set `/dev/sda` device weight to `200`: - - $ docker run -it \ - --blkio-weight-device "/dev/sda:200" \ - ubuntu - -If you specify both the `--blkio-weight` and `--blkio-weight-device`, Docker -uses the `--blkio-weight` as the default weight and uses `--blkio-weight-device` -to override this default with a new value on a specific device. -The following example uses a default weight of `300` and overrides this default -on `/dev/sda` setting that weight to `200`: - - $ docker run -it \ - --blkio-weight 300 \ - --blkio-weight-device "/dev/sda:200" \ - ubuntu - -The `--device-read-bps` flag limits the read rate (bytes per second) from a device. -For example, this command creates a container and limits the read rate to `1mb` -per second from `/dev/sda`: - - $ docker run -it --device-read-bps /dev/sda:1mb ubuntu - -The `--device-write-bps` flag limits the write rate (bytes per second)to a device. -For example, this command creates a container and limits the write rate to `1mb` -per second for `/dev/sda`: - - $ docker run -it --device-write-bps /dev/sda:1mb ubuntu - -Both flags take limits in the `:[unit]` format. Both read -and write rates must be a positive integer. You can specify the rate in `kb` -(kilobytes), `mb` (megabytes), or `gb` (gigabytes). - -The `--device-read-iops` flag limits read rate (IO per second) from a device. -For example, this command creates a container and limits the read rate to -`1000` IO per second from `/dev/sda`: - - $ docker run -ti --device-read-iops /dev/sda:1000 ubuntu - -The `--device-write-iops` flag limits write rate (IO per second) to a device. -For example, this command creates a container and limits the write rate to -`1000` IO per second to `/dev/sda`: - - $ docker run -ti --device-write-iops /dev/sda:1000 ubuntu - -Both flags take limits in the `:` format. Both read and -write rates must be a positive integer. - -## Additional groups - --group-add: Add additional groups to run as - -By default, the docker container process runs with the supplementary groups looked -up for the specified user. If one wants to add more to that list of groups, then -one can use this flag: - - $ docker run --rm --group-add audio --group-add nogroup --group-add 777 busybox id - uid=0(root) gid=0(root) groups=10(wheel),29(audio),99(nogroup),777 - -## Runtime privilege and Linux capabilities - - --cap-add: Add Linux capabilities - --cap-drop: Drop Linux capabilities - --privileged=false: Give extended privileges to this container - --device=[]: Allows you to run devices inside the container without the --privileged flag. - -By default, Docker containers are "unprivileged" and cannot, for -example, run a Docker daemon inside a Docker container. This is because -by default a container is not allowed to access any devices, but a -"privileged" container is given access to all devices (see -the documentation on [cgroups devices](https://www.kernel.org/doc/Documentation/cgroup-v1/devices.txt)). - -When the operator executes `docker run --privileged`, Docker will enable -to access to all devices on the host as well as set some configuration -in AppArmor or SELinux to allow the container nearly all the same access to the -host as processes running outside containers on the host. Additional -information about running with `--privileged` is available on the -[Docker Blog](http://blog.docker.com/2013/09/docker-can-now-run-within-docker/). - -If you want to limit access to a specific device or devices you can use -the `--device` flag. It allows you to specify one or more devices that -will be accessible within the container. - - $ docker run --device=/dev/snd:/dev/snd ... - -By default, the container will be able to `read`, `write`, and `mknod` these devices. -This can be overridden using a third `:rwm` set of options to each `--device` flag: - - $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc - - Command (m for help): q - $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc - You will not be able to write the partition table. - - Command (m for help): q - - $ docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk /dev/xvdc - crash.... - - $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc - fdisk: unable to open /dev/xvdc: Operation not permitted - -In addition to `--privileged`, the operator can have fine grain control over the -capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default -list of capabilities that are kept. The following table lists the Linux capability -options which are allowed by default and can be dropped. - -| Capability Key | Capability Description | -| ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| SETPCAP | Modify process capabilities. | -| MKNOD | Create special files using mknod(2). | -| AUDIT_WRITE | Write records to kernel auditing log. | -| CHOWN | Make arbitrary changes to file UIDs and GIDs (see chown(2)). | -| NET_RAW | Use RAW and PACKET sockets. | -| DAC_OVERRIDE | Bypass file read, write, and execute permission checks. | -| FOWNER | Bypass permission checks on operations that normally require the file system UID of the process to match the UID of the file. | -| FSETID | Don't clear set-user-ID and set-group-ID permission bits when a file is modified. | -| KILL | Bypass permission checks for sending signals. | -| SETGID | Make arbitrary manipulations of process GIDs and supplementary GID list. | -| SETUID | Make arbitrary manipulations of process UIDs. | -| NET_BIND_SERVICE | Bind a socket to internet domain privileged ports (port numbers less than 1024). | -| SYS_CHROOT | Use chroot(2), change root directory. | -| SETFCAP | Set file capabilities. | - -The next table shows the capabilities which are not granted by default and may be added. - -| Capability Key | Capability Description | -| ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| SYS_MODULE | Load and unload kernel modules. | -| SYS_RAWIO | Perform I/O port operations (iopl(2) and ioperm(2)). | -| SYS_PACCT | Use acct(2), switch process accounting on or off. | -| SYS_ADMIN | Perform a range of system administration operations. | -| SYS_NICE | Raise process nice value (nice(2), setpriority(2)) and change the nice value for arbitrary processes. | -| SYS_RESOURCE | Override resource Limits. | -| SYS_TIME | Set system clock (settimeofday(2), stime(2), adjtimex(2)); set real-time (hardware) clock. | -| SYS_TTY_CONFIG | Use vhangup(2); employ various privileged ioctl(2) operations on virtual terminals. | -| AUDIT_CONTROL | Enable and disable kernel auditing; change auditing filter rules; retrieve auditing status and filtering rules. | -| MAC_OVERRIDE | Allow MAC configuration or state changes. Implemented for the Smack LSM. | -| MAC_ADMIN | Override Mandatory Access Control (MAC). Implemented for the Smack Linux Security Module (LSM). | -| NET_ADMIN | Perform various network-related operations. | -| SYSLOG | Perform privileged syslog(2) operations. | -| DAC_READ_SEARCH | Bypass file read permission checks and directory read and execute permission checks. | -| LINUX_IMMUTABLE | Set the FS_APPEND_FL and FS_IMMUTABLE_FL i-node flags. | -| NET_BROADCAST | Make socket broadcasts, and listen to multicasts. | -| IPC_LOCK | Lock memory (mlock(2), mlockall(2), mmap(2), shmctl(2)). | -| IPC_OWNER | Bypass permission checks for operations on System V IPC objects. | -| SYS_PTRACE | Trace arbitrary processes using ptrace(2). | -| SYS_BOOT | Use reboot(2) and kexec_load(2), reboot and load a new kernel for later execution. | -| LEASE | Establish leases on arbitrary files (see fcntl(2)). | -| WAKE_ALARM | Trigger something that will wake up the system. | -| BLOCK_SUSPEND | Employ features that can block system suspend. | - -Further reference information is available on the [capabilities(7) - Linux man page](http://man7.org/linux/man-pages/man7/capabilities.7.html) - -Both flags support the value `ALL`, so if the -operator wants to have all capabilities but `MKNOD` they could use: - - $ docker run --cap-add=ALL --cap-drop=MKNOD ... - -For interacting with the network stack, instead of using `--privileged` they -should use `--cap-add=NET_ADMIN` to modify the network interfaces. - - $ docker run -it --rm ubuntu:14.04 ip link add dummy0 type dummy - RTNETLINK answers: Operation not permitted - $ docker run -it --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy - -To mount a FUSE based filesystem, you need to combine both `--cap-add` and -`--device`: - - $ docker run --rm -it --cap-add SYS_ADMIN sshfs sshfs sven@10.10.10.20:/home/sven /mnt - fuse: failed to open /dev/fuse: Operation not permitted - $ docker run --rm -it --device /dev/fuse sshfs sshfs sven@10.10.10.20:/home/sven /mnt - fusermount: mount failed: Operation not permitted - $ docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs - # sshfs sven@10.10.10.20:/home/sven /mnt - The authenticity of host '10.10.10.20 (10.10.10.20)' can't be established. - ECDSA key fingerprint is 25:34:85:75:25:b0:17:46:05:19:04:93:b5:dd:5f:c6. - Are you sure you want to continue connecting (yes/no)? yes - sven@10.10.10.20's password: - root@30aa0cfaf1b5:/# ls -la /mnt/src/docker - total 1516 - drwxrwxr-x 1 1000 1000 4096 Dec 4 06:08 . - drwxrwxr-x 1 1000 1000 4096 Dec 4 11:46 .. - -rw-rw-r-- 1 1000 1000 16 Oct 8 00:09 .dockerignore - -rwxrwxr-x 1 1000 1000 464 Oct 8 00:09 .drone.yml - drwxrwxr-x 1 1000 1000 4096 Dec 4 06:11 .git - -rw-rw-r-- 1 1000 1000 461 Dec 4 06:08 .gitignore - .... - -The default seccomp profile will adjust to the selected capabilities, in order to allow -use of facilities allowed by the capabilities, so you should not have to adjust this, -since Docker 1.12. In Docker 1.10 and 1.11 this did not happen and it may be necessary -to use a custom seccomp profile or use `--security-opt seccomp=unconfined` when adding -capabilities. - -## Logging drivers (--log-driver) - -The container can have a different logging driver than the Docker daemon. Use -the `--log-driver=VALUE` with the `docker run` command to configure the -container's logging driver. The following options are supported: - -| Driver | Description | -| ----------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `none` | Disables any logging for the container. `docker logs` won't be available with this driver. | -| `json-file` | Default logging driver for Docker. Writes JSON messages to file. No logging options are supported for this driver. | -| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. | -| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. | -| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. | -| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). | -| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs | -| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using Event Http Collector. | - -The `docker logs` command is available only for the `json-file` and `journald` -logging drivers. For detailed information on working with logging drivers, see -[Configure a logging driver](https://docs.docker.com/engine/admin/logging/overview/). - - -## Overriding Dockerfile image defaults - -When a developer builds an image from a [*Dockerfile*](builder.md) -or when she commits it, the developer can set a number of default parameters -that take effect when the image starts up as a container. - -Four of the Dockerfile commands cannot be overridden at runtime: `FROM`, -`MAINTAINER`, `RUN`, and `ADD`. Everything else has a corresponding override -in `docker run`. We'll go through what the developer might have set in each -Dockerfile instruction and how the operator can override that setting. - - - [CMD (Default Command or Options)](#cmd-default-command-or-options) - - [ENTRYPOINT (Default Command to Execute at Runtime)]( - #entrypoint-default-command-to-execute-at-runtime) - - [EXPOSE (Incoming Ports)](#expose-incoming-ports) - - [ENV (Environment Variables)](#env-environment-variables) - - [HEALTHCHECK](#healthcheck) - - [VOLUME (Shared Filesystems)](#volume-shared-filesystems) - - [USER](#user) - - [WORKDIR](#workdir) - -### CMD (default command or options) - -Recall the optional `COMMAND` in the Docker -commandline: - - $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] - -This command is optional because the person who created the `IMAGE` may -have already provided a default `COMMAND` using the Dockerfile `CMD` -instruction. As the operator (the person running a container from the -image), you can override that `CMD` instruction just by specifying a new -`COMMAND`. - -If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` -get appended as arguments to the `ENTRYPOINT`. - -### ENTRYPOINT (default command to execute at runtime) - - --entrypoint="": Overwrite the default entrypoint set by the image - -The `ENTRYPOINT` of an image is similar to a `COMMAND` because it -specifies what executable to run when the container starts, but it is -(purposely) more difficult to override. The `ENTRYPOINT` gives a -container its default nature or behavior, so that when you set an -`ENTRYPOINT` you can run the container *as if it were that binary*, -complete with default options, and you can pass in more options via the -`COMMAND`. But, sometimes an operator may want to run something else -inside the container, so you can override the default `ENTRYPOINT` at -runtime by using a string to specify the new `ENTRYPOINT`. Here is an -example of how to run a shell in a container that has been set up to -automatically run something else (like `/usr/bin/redis-server`): - - $ docker run -it --entrypoint /bin/bash example/redis - -or two examples of how to pass more parameters to that ENTRYPOINT: - - $ docker run -it --entrypoint /bin/bash example/redis -c ls -l - $ docker run -it --entrypoint /usr/bin/redis-cli example/redis --help - -You can reset a containers entrypoint by passing an empty string, for example: - - $ docker run -it --entrypoint="" mysql bash - -> **Note**: Passing `--entrypoint` will clear out any default command set on the -> image (i.e. any `CMD` instruction in the Dockerfile used to build it). - -### EXPOSE (incoming ports) - -The following `run` command options work with container networking: - - --expose=[]: Expose a port or a range of ports inside the container. - These are additional to those exposed by the `EXPOSE` instruction - -P : Publish all exposed ports to the host interfaces - -p=[] : Publish a container᾿s port or a range of ports to the host - format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort - Both hostPort and containerPort can be specified as a - range of ports. When specifying ranges for both, the - number of container ports in the range must match the - number of host ports in the range, for example: - -p 1234-1236:1234-1236/tcp - - When specifying a range for hostPort only, the - containerPort must not be a range. In this case the - container port is published somewhere within the - specified hostPort range. (e.g., `-p 1234-1236:1234/tcp`) - - (use 'docker port' to see the actual mapping) - - --link="" : Add link to another container (:alias or ) - -With the exception of the `EXPOSE` directive, an image developer hasn't -got much control over networking. The `EXPOSE` instruction defines the -initial incoming ports that provide services. These ports are available -to processes inside the container. An operator can use the `--expose` -option to add to the exposed ports. - -To expose a container's internal port, an operator can start the -container with the `-P` or `-p` flag. The exposed port is accessible on -the host and the ports are available to any client that can reach the -host. - -The `-P` option publishes all the ports to the host interfaces. Docker -binds each exposed port to a random port on the host. The range of -ports are within an *ephemeral port range* defined by -`/proc/sys/net/ipv4/ip_local_port_range`. Use the `-p` flag to -explicitly map a single port or range of ports. - -The port number inside the container (where the service listens) does -not need to match the port number exposed on the outside of the -container (where clients connect). For example, inside the container an -HTTP service is listening on port 80 (and so the image developer -specifies `EXPOSE 80` in the Dockerfile). At runtime, the port might be -bound to 42800 on the host. To find the mapping between the host ports -and the exposed ports, use `docker port`. - -If the operator uses `--link` when starting a new client container in the -default bridge network, then the client container can access the exposed -port via a private networking interface. -If `--link` is used when starting a container in a user-defined network as -described in [*Docker network overview*](https://docs.docker.com/engine/userguide/networking/), -it will provide a named alias for the container being linked to. - -### ENV (environment variables) - -Docker automatically sets some environment variables when creating a Linux -container. Docker does not set any environment variables when creating a Windows -container. - -The following environment variables are set for Linux containers: - -| Variable | Value | -| -------- | ----- | -| `HOME` | Set based on the value of `USER` | -| `HOSTNAME` | The hostname associated with the container | -| `PATH` | Includes popular directories, such as `/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin` | -| `TERM` | `xterm` if the container is allocated a pseudo-TTY | - - -Additionally, the operator can **set any environment variable** in the -container by using one or more `-e` flags, even overriding those mentioned -above, or already defined by the developer with a Dockerfile `ENV`: - -```bash -$ docker run -e "deep=purple" --rm alpine env -PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -HOSTNAME=d2219b854598 -deep=purple -HOME=/root -``` - -```PowerShell -PS C:\> docker run --rm -e "foo=bar" microsoft/nanoserver cmd /s /c set -ALLUSERSPROFILE=C:\ProgramData -APPDATA=C:\Users\ContainerAdministrator\AppData\Roaming -CommonProgramFiles=C:\Program Files\Common Files -CommonProgramFiles(x86)=C:\Program Files (x86)\Common Files -CommonProgramW6432=C:\Program Files\Common Files -COMPUTERNAME=C2FAEFCC8253 -ComSpec=C:\Windows\system32\cmd.exe -foo=bar -LOCALAPPDATA=C:\Users\ContainerAdministrator\AppData\Local -NUMBER_OF_PROCESSORS=8 -OS=Windows_NT -Path=C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Users\ContainerAdministrator\AppData\Local\Microsoft\WindowsApps -PATHEXT=.COM;.EXE;.BAT;.CMD -PROCESSOR_ARCHITECTURE=AMD64 -PROCESSOR_IDENTIFIER=Intel64 Family 6 Model 62 Stepping 4, GenuineIntel -PROCESSOR_LEVEL=6 -PROCESSOR_REVISION=3e04 -ProgramData=C:\ProgramData -ProgramFiles=C:\Program Files -ProgramFiles(x86)=C:\Program Files (x86) -ProgramW6432=C:\Program Files -PROMPT=$P$G -PUBLIC=C:\Users\Public -SystemDrive=C: -SystemRoot=C:\Windows -TEMP=C:\Users\ContainerAdministrator\AppData\Local\Temp -TMP=C:\Users\ContainerAdministrator\AppData\Local\Temp -USERDOMAIN=User Manager -USERNAME=ContainerAdministrator -USERPROFILE=C:\Users\ContainerAdministrator -windir=C:\Windows -``` - -Similarly the operator can set the **HOSTNAME** (Linux) or **COMPUTERNAME** (Windows) with `-h`. - -### HEALTHCHECK - -``` - --health-cmd Command to run to check health - --health-interval Time between running the check - --health-retries Consecutive failures needed to report unhealthy - --health-timeout Maximum time to allow one check to run - --health-start-period Start period for the container to initialize before starting health-retries countdown - --no-healthcheck Disable any container-specified HEALTHCHECK -``` - -Example: - - {% raw %} - $ docker run --name=test -d \ - --health-cmd='stat /etc/passwd || exit 1' \ - --health-interval=2s \ - busybox sleep 1d - $ sleep 2; docker inspect --format='{{.State.Health.Status}}' test - healthy - $ docker exec test rm /etc/passwd - $ sleep 2; docker inspect --format='{{json .State.Health}}' test - { - "Status": "unhealthy", - "FailingStreak": 3, - "Log": [ - { - "Start": "2016-05-25T17:22:04.635478668Z", - "End": "2016-05-25T17:22:04.7272552Z", - "ExitCode": 0, - "Output": " File: /etc/passwd\n Size: 334 \tBlocks: 8 IO Block: 4096 regular file\nDevice: 32h/50d\tInode: 12 Links: 1\nAccess: (0664/-rw-rw-r--) Uid: ( 0/ root) Gid: ( 0/ root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..." - }, - { - "Start": "2016-05-25T17:22:06.732900633Z", - "End": "2016-05-25T17:22:06.822168935Z", - "ExitCode": 0, - "Output": " File: /etc/passwd\n Size: 334 \tBlocks: 8 IO Block: 4096 regular file\nDevice: 32h/50d\tInode: 12 Links: 1\nAccess: (0664/-rw-rw-r--) Uid: ( 0/ root) Gid: ( 0/ root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..." - }, - { - "Start": "2016-05-25T17:22:08.823956535Z", - "End": "2016-05-25T17:22:08.897359124Z", - "ExitCode": 1, - "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" - }, - { - "Start": "2016-05-25T17:22:10.898802931Z", - "End": "2016-05-25T17:22:10.969631866Z", - "ExitCode": 1, - "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" - }, - { - "Start": "2016-05-25T17:22:12.971033523Z", - "End": "2016-05-25T17:22:13.082015516Z", - "ExitCode": 1, - "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" - } - ] - } - {% endraw %} - -The health status is also displayed in the `docker ps` output. - -### TMPFS (mount tmpfs filesystems) - -```bash ---tmpfs=[]: Create a tmpfs mount with: container-dir[:], - where the options are identical to the Linux - 'mount -t tmpfs -o' command. -``` - -The example below mounts an empty tmpfs into the container with the `rw`, -`noexec`, `nosuid`, and `size=65536k` options. - - $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image - -### VOLUME (shared filesystems) - - -v, --volume=[host-src:]container-dest[:]: Bind mount a volume. - The comma-delimited `options` are [rw|ro], [z|Z], - [[r]shared|[r]slave|[r]private], and [nocopy]. - The 'host-src' is an absolute path or a name value. - - If neither 'rw' or 'ro' is specified then the volume is mounted in - read-write mode. - - The `nocopy` modes is used to disable automatic copying requested volume - path in the container to the volume storage location. - For named volumes, `copy` is the default mode. Copy modes are not supported - for bind-mounted volumes. - - --volumes-from="": Mount all volumes from the given container(s) - -> **Note**: -> When using systemd to manage the Docker daemon's start and stop, in the systemd -> unit file there is an option to control mount propagation for the Docker daemon -> itself, called `MountFlags`. The value of this setting may cause Docker to not -> see mount propagation changes made on the mount point. For example, if this value -> is `slave`, you may not be able to use the `shared` or `rshared` propagation on -> a volume. - -The volumes commands are complex enough to have their own documentation -in section [*Manage data in -containers*](https://docs.docker.com/engine/tutorials/dockervolumes/). A developer can define -one or more `VOLUME`'s associated with an image, but only the operator -can give access from one container to another (or from a container to a -volume mounted on the host). - -The `container-dest` must always be an absolute path such as `/src/docs`. -The `host-src` can either be an absolute path or a `name` value. If you -supply an absolute path for the `host-dir`, Docker bind-mounts to the path -you specify. If you supply a `name`, Docker creates a named volume by that `name`. - -A `name` value must start with an alphanumeric character, -followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). -An absolute path starts with a `/` (forward slash). - -For example, you can specify either `/foo` or `foo` for a `host-src` value. -If you supply the `/foo` value, Docker creates a bind-mount. If you supply -the `foo` specification, Docker creates a named volume. - -### USER - -`root` (id = 0) is the default user within a container. The image developer can -create additional users. Those users are accessible by name. When passing a numeric -ID, the user does not have to exist in the container. - -The developer can set a default user to run the first process with the -Dockerfile `USER` instruction. When starting a container, the operator can override -the `USER` instruction by passing the `-u` option. - - -u="", --user="": Sets the username or UID used and optionally the groupname or GID for the specified command. - - The followings examples are all valid: - --user=[ user | user:group | uid | uid:gid | user:gid | uid:group ] - -> **Note:** if you pass a numeric uid, it must be in the range of 0-2147483647. - -### WORKDIR - -The default working directory for running binaries within a container is the -root directory (`/`), but the developer can set a different default with the -Dockerfile `WORKDIR` command. The operator can override this with: - - -w="": Working directory inside the container diff --git a/fn/vendor/github.com/docker/docker/docs/static_files/moby-project-logo.png b/fn/vendor/github.com/docker/docker/docs/static_files/moby-project-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..2914186efdd0d3a6223efed318a9e6f09d79359e GIT binary patch literal 20458 zcmaHzV|ZlY(yo(C%#N)|GMU)6C$>GYZ95ZN6WdP5wrx+$iH*~w4F! z)%8|aJ#|lrO2mlH7DyBnN}?$PCveLIvw4$v z%-AE)xa!-$e`)>4SV5AVMc81}0I9(3ZL?|pnfk@E!OIOw+RU_fX9Xf<1h z%79YQ@CVZ(s}06#ZRBITfB*%`T+~(P`6WVDuURE-mqP+Rzx%n({ks0)?>a5^36>qA zv%(A2V#-n}cV=NWqEP?7#CHIv-!2*^m3qi}5A@@uoJ|JD;gCF|BOOGZxSi)i4Fc=W z4f3@nyY5E2bFG#8`;Uutjy8ug_RTcE-=>rLGH`xUf=HGx%4dhN--l3U@0qZxp#JM0 zbbu>Mn#+>uY~|*zpzo>=Zz`363^r@P{=JZfIpy1(T{1i-UuKHwjNkC76;hGvm4-u> z+ezgS%%JP~=PXwM$POhzyOoY;M{EXZ3n#4H6DRDMTWkj2eb;0BLxJtjyl#4(!lkS> z#XjfNQrGzu=NvdYq5u89JYe0E{q96+Q&o?2!^A*|j zTkL`{wzK)BGmc3@|6|$zdZ;t$&NT#{xW2O4bWzgk&0I~E*G$2%ybJgro9KXP@I&U5 z5@XGqGOeJgcc0ny;_ruK|8eXNH1Jr=nH3{KF4DLJ(=l1T7nCAw27QkMq(5v_-%3uV z=gmsZpUYKhttq%Y|H$iALJ|MhQbsWFSd5up@1v{(*Il( zBm@EK6)bqP&q6?VtSPjCmM z-obN+)IW?U<;}V}^<89CS&`rZBw~q`xjxtSw{xjFJznVx%l~BjcO$3y`>(^BS7xj` zpUSCPa*&q-q0;@q`n`{~zQ|nY#);|h7zl^uBmCFi+Ax1@_+yFK9EF&#Mdq^+i$vIz z>qIlzDlx1XANRkNQn$YFTCDu6{GT{e0kG!*?=;;r9n-3X+-!4NuJC_o93>eUC%(m2 z+)6kIDQYg+&JV0oe$jQe_cMaRC&=n1QPOC8GYtp-7lrh~1l?K87Tk{ivRb^zu(V_N z;70XGU@Z)k)C&-900WdtWNhv2vU@+KHocsKi*9+HKJ41upCgi14i?XrAuIF6BE8G$I#*uwV2&p6r$&l&XWio-5nTW6Ex$O_pPI`@-w-WV8M8 z?(*HsbNZt%6iHgKKs>KhF<;3!^nDufbzjo2*jRuvFY07oX~@}vN+yl5I7)R1&WXVV zYSR!Wi{C5zqs{%-N0)PYcAa_jO1qRX?hx3xn+Mn9&HTZkW7aFyYyuFUWar5U4q`RZ^h6tBR1o>=!X{%3t0I4t}brJ&# zTeWs1butYd9L8CjWh`mZ5wr9wk~VN2>cJ3z*4w?Aa`lHUjRPf_S}*vmTqPg!dT&+D zA<}6A{SgB?^2b#^hsi0*Y=Lt1xn`NdM+e%+ll_f&A4{G0y(iYZL2Ca||JK`e)V?9H zQ&9vqh2QpA66db`L%Airz{iorNsy9LKLBrkEIyjK76oC;`8U*D-|cYXTe(_-4@sCI zxG>^puy&K}7d9iIA2P>MeKZ{HRN89vDSFPJh*$vLF9BC!lr5h?sORu=FrOvN6}78t zGlV1@fE10**X}@?2KN4Nf!|yFb0Z1&?!5IQ?OJlR-1dA1T&3RJa8U%bCs6P4IHQ*A zR)^5Wyp=cU3nLU)soPBwp%=PT5hAWd?&8Wb9Fd3xBQ=k=53H6NqFhT5eNqc0lM~9- zs|AtH77Bym{@`f^8v6?LD(0BBZyNqEtdpCQN8a@%- zbp=V3f`YE6pBO_HV11kh-tl!T!!G3pT%~@g4qZxeIIVx?)XORpwQnnH-OLE-6(04--Py$XCG>S@cs?j)G-Ex!^FNt zQfDp7F4?kbaLd~liKsl8=Lkhm&|Halmha!DXsIC^l1&gs=CE3im&@WiDnB_peWYFg zS%#yKYRUE-;*Ma3h;0kkx%bG(HiEc5(}PX_5FORWjvb z5$L#bxOsgVoye*|j7G{7XVC9d3hbR>o^U&8i$3>wNEHTbw_V^qgXfkFz=X;#g7rCB zLT4GVg8>#?GJs#roij;k%TJ%w-HOXL*`Kyo+`|8|ALW=$&FaPkH0Ay(Jvgf1gz1O8 zdxwc#IqlnCSX9~4;VM;~7mq+kBIf74KVQfOR%`V|5hYM9p@w(^dIYTQ+j=SGM{UR_PNN2 z_RCwY`E05VS)>aF&Jx2&jfm*t%;dHj;xQF+U92<^FPaiV@Tz~8a&jm>XNi;)@SIbj z)*Q?+;JS=qrMKs{S#Me{66r6;l#H!q%UisJjV$S#(1@kn4T`0#S|3msT!yQyVT0Ao z;Nq$Xty-H8l8DW+zR+pt+T>jyJk*zs-rxka21`Kw$!Kfx4IJUT~|QZ8$D zV=4>3@$Xhxc0;Et3*7Z7exoI2S-Z~}>zqwfrP+B{Y))=GI>ddgf;Gau!!M~ti-$gn zh~>?)s||hZ!n$g$?$NAJifAZeu1n#SjHe`7whdn+jWtUi%%k?x&g1`9B5{Cbcm z5bm}sWS0=0aLOT7N0kt@9PZK{0lckg6oy~M%TD!keu|kNwP7Z4iEuC4h>G3AdYfe= zo!;YBFuN*EFE+6~GEVx_m@}L;L$0s;y~uPIwQOC-W^jh=d>e}Fsc>$>n;1OAV@L_T z4SSekxoMH*_)aa4mkY{=vQeq_3^y9OP@1ortfcHGhX04-SVr|j-6i&(gu2F1t+dK z7_>9@q~@IAzARc9+T!y3eK+%3T2J|Cfivf=G)YsfSY?R!-Q-b@FqWjdbvVWLZAU1U z&yTV3btvyN$6$tl-C%OQ3y!0~bzb~VV;dBD#$;Wy*&_KHc;ZwcWDwCdsm-E|`KUO~ z?ewL1GMfQ6mTR89u~8_~|4UeGdA6+&4|sqIOKn-W%~#$A&*Rg3d|GA_f%PC`NxyIA zej;COA=-S$fsnf0=}BZ(Se9G0^r`gMU;oZGk;&q7CXOE$MohlOZsevq$4?EL$uBJL{H8)k4^5bx8(uo75$etR%o*XTg!Q z&4H&2TZ!g|SP&Yhq%jR@!ZKGcbiOXXDB70lEX!g=1wl@_Fh{Pk7~TozE7leiY1&K>gJ=1Tw}70Rxmu4mN2gOqQn<;6RY#i0 z9zfNV-TpvmE-asq&;ZbA8$(mn@rmtrJ&{zv_QW^zs)3U(IQ#W_V!DYi@GGa=xV8Ke zqFfDm)?W$}y+W#X-}PaOq>uOy%GK8a9|;Y8i9&5bw@Vg(y?Plw)45nCBPIRf81{s@ z5?NXNRJt8J#I2EQPx!x@2w3e!u@xmdN49&SGgx$x0bQQuA!3s~mRQMP<=NH*J&NB3?_1d0XP!ZIG;$z$026%k&_N?oRGz zGwS1&(8pA2F~_w=@4Rvb!hV=N3PSJC50pjI01ikG7>JVTtP%#=-|2XP&_$0syO!mi z&RL82?6cKAFub0~;>bkhw!sRjA{{>Y9JWrjG$|#X_zs*qQpKUpaK~EF+LkwV$}A)j zE&#MD!wbBaa))+gwt+*;NEH~N-K9Eg7VhbMLpuHTtw=K*&*F!Zc{8f2iVR#79R4-B zx0b~yRXf6gA4lmOqJP~ts^0f;8&r_W47N6OwmZEv9Z`Zs7KLHk25DyCUaH1|8`h%y zcAF3v+Af(>)bIXEr&dPFaD5~nc!+=Ku)q;RX-KM9tLDTN3dZ?D`krm2maCVOm=MlS zPx8aCyyc=IeHU;_eLDFpgUdVQ_54FliruuDl-X)a;)Ji{_jUiz;u!~Ag$|hmr^^Fx zuPqsQAJJBz35`g3-+@*ov0q_5{~(q0(A`8RIX^`>c4}8`=dOf&_8gh3eoXBv#I{v| zZhQ51huFAAT+Etl0tuvS%IZ~`47kf~OTLiN=SWDeF#*%eyIiKS? z-6dqxzJ|=Qer?Fi;9W7&j4~%PBF7j@4Qm71i7<8vsgZjvWM@dnUjQ527e}6gR}RHz zHBze4Q`#(UhQl~Yb+@OBiP9MQU)LRUd=~F9ON`l!Uon~*2sYoYGk?bCl-td zu>EJxLNjPh)TU9XW_Pt9Wk;H;vC0AVWQ7XTP5)5t>he7-?g`Hk=~piypH8Urn_qD_ zH_-aQdGQpEZ1@jShkEt8V2unr<9>*CiVUv#^w6S8VyC}N2+NBguFiw2orWatT=DK= z)z_SF_m=ZR2M8D;EO>7NY6;c&@uhs#4GgxlRlAl)Wzd-LIMVbtc1yqhu61sEpbg8+|SuOZxBrto9%{ z9km_Q6WnwRoz5UdNB4dY%taz#5r*#xjzz12l!e@Cr8)c%S{Y&r1lHl;S%#O?;qZS) zA>^wr))ux$&(@;!^2oVB5^wM);`Z#YD0`@cxlnJ)H`H%idN1Xo*UiU?He?VJdO_9b zcH!y#H0WX%pB`CnvX+}EE~O}(vFdFU=zzOE8b-n|{AHtU+8+Q2;vJoZ_Z%H#<84oL z-xcw=9UFALJ+?dnJemz>n9y3tdD2a)EN=GQkhS+=fd~EOwM^UNv&U@su0ujxrO7%R zm)xiYuG=Q6XfdrbXs@cOh~F4oa?gIQa#d+TI{BX>T$jV4gRXnT6>w)TRi!GBg;e!fRP&jElJx`BoOs>`(N-WK(xN6(1 z#jrn-G8ngl4uedr+$20WY@P+?%Pc5M;IXf>?RFflR(igqe)eiTDVXR_#ADKCitchY zmB}a09Od2W#J#>z>)OffreLyfumaW*%=D1I_%dgi#cYS^R*ZN8I~1V>H*NnXSir&t z>*u?YwdyV+rjb{Z>1_5pOsgB4rM=%KoMRGnNNiRQQ-3o33H3U$QUdH3l?_JocOR0^ zs)D6SJoKn*ceF;cr*LJF`!qI=noqNHAF>*^AC12(1l)0p4I{@>J*vzpWFb5^_D{5) zoyuxd?WT*AF=ei5We9KEl^Lut&qZXD8T7l99(_f4e|BlIm^gc8rl+5w$d8RC404Jn z9Nc)$a-I5plHTJ+5nT7Vob%l3;I2JrcBx2?noI$8rk{JshVNLHH-H}fp5;-!tSqKT zZH-oYG7W(BOjBD=OIZ3~G5AAuWmQ!|3CqiSf-|2T#Zo5rf_v%yJdo7th&kZaM3V3Nz`NtJwodhrJ<(2*Rka zOb<=Y`PE*d(<;aTMa4o1(YV%@ks{u9Ck#rfX=NY}hWAwTx5~M@ z!?b2XGM(y)+^%d9H>1073YEW=B;YBQhRjql$Kna=Xc-N*c9X(?4WkSZ1sxO)&!YC$D5|K=zV@vB8O{ z#~etf_NZ?dr%{Es@ddi#hS-CDDxYwP56@e!C#8ER?c2`@&bexZ#b%Lc3-}J4E@de6 zr_>w}FrCg9N?C9%zMUxu{x!9j-?E(e$n(H(#e2e_J9nU&+sg9WdvURV!K}6Y_nxqM@|Ss{}Ka9wfGOReHBO zUGU6bEUcIL4EOTpEH>nZsNp<+A|~JZqUw`w3a*Oui8L_1ui`CV)0kY{j=$*7gEA7w zu)J`lW6_;WuPKPNV1la@s(w5z8!18Q-*LVkMtKYB6F5vV+A{+YgUe(>R<2Cl)G>*^ z-wg{@fbn81jR1Zu;N^Y+(~xhgzKM@`G%39K`S$fhkOjEVMNPVwoaJy~F;Rl2InqgS zsGWHkOJ+zS=KeG6RefL+2`ZhI%6J}C_bwUav_f&oyxn%!nMHNEO=BskS((zHs0l?j zF~jlA`m!Ch*GDtn2BZ&7%KXBR-ofT8GPI`kUqdQJ{m#QZIi0Q~l))(6e!b@MF%gUz zmP7iPN>7EwiU=u5GzyjEC_yNi`HG%tjIvKit}w9abEc_i!NxxU{uvXJS}#mi#MnlC zQt)|19*WMCfhy(~uN1@A+^6fV+e?Z*63YdgKLW}Yy7(!*h|f9x@?Y5v@MNFb(0Sw} zABNQfpjdBK+al^sr}oRMTkj&BZBFK|*jRU&8Bq*w(mM%4GS=^^guP$GQOQ1a5z(GwMMo~77s|_kGtt{QYSWMVH?b@t-+(8o2=(%W?2Vacwi^=>o=w_70;<) zuaHA;KuwtWV&A%lr# zPggV2u28HWxL0C_)Hl%JxF7_Dt1X1)zv zz#mt&KiK0|vdDmOJ3nm+PHLFq5|ty{^$3p>bk)7*UU)rNSmbM{i=rt94<-q3Wh0tR zeTqHGDmdx4IY!T5%S&a=5rB7Q4aO~w{w*M7Rhp%9$pB@A}t1m%X|s_eCx(Q z=aHs%a;8R7uuI>_2ihT>Pte3bPW_8=ch{)v7fU8ipm{!eKwl;l zD;Ba%`@d<{W}ACZRP{PM5>V2XQqzcf41PI?OOUK$tjI`P*#}5@e$Tw$QF;H!v7Xj{al8+puZ?Jy19K-6e#CtjK?D!|Vci*x&-1Cv;TU_6l-fgyC*9i2{qN?eHkWgjJg@A=vE-y+@#X)^tegApAC&58ngp*AMxidP`YC|Kj*rGSw5Twci znV8;(n}{>l9Hxw$(EWYeIbqf+V!6TlYCE7fR_*mZRhn zze&n4rGaO%4KXJCNav+`mHt-tJ1&LZG>Uj7=iUx6#hnbz$Lv-0>E``!Y!(Ro=wg%} zKca0zact!G`Vf{%59Q+|5cp|XVYO^G6C{z1f4BFb8_(Gvbu@p>p9@dG;!L7it{CgZ z2q+k?P7(NU8%8{-0eY5Jgwd7c`FP@R+pX}bROkeRkhAgcVAW%$+*28@cpF2kEuIe4 z=x4d5Ga^#NM1|d-iN&Yah6u{V)25S0an(1EpKiWBI=>ImwR!GA>vsAk0;Vw6aT(zq z+&r%%?97GSa%wnYYXq}(77C1b@R%(#mu1oWDD5X;U($WT5tPJpjehGRUPm@xAbI3Z z0%m+wsqxy9NzrF=bZ~<3N&M^`K7kw=k6bmpd0N9z$tW9O*1F(vxo@!RL4kOaD6NlXzvfN9>)bMI2e;U)|0Jk@tR@ceD(&X#cYMQ zXr!O(@N<~i41nx8Yc+b|-`R2w0ck3SC%v`r8tzZ-)9&I7o{&9DQ8{QPM_^R$qZp(G z9vT>g;!mL2fU_1hfj3U5Ca*Y2gc-+!Mij&F6L*nn>*`l15u2mJXHa@ps^gy9mK?FQks;IQq!zf$>nuXQ*w zB_C!&jUZYZoI@8dZo#2V;I!lV?0oUNIo@5UjcN5AU2bN^{GSs#gt^90B2q#Wt!?yKV7}5(ONFB-@Mge zh{+;CuIP4+6F3%iD{DN)u*-WmT6KJ&QG)~>MH3|-CjQ6Ux zJc^7PYF};dC8S-;`&pyiFMlzkx)@aBC1#px1U(AL`fcB`%}XD$*#4eRVO~J5Jo;=Y zKhdjMsR7yrN&7tw7xTwthZx}=U?hhJn{ca3NGq#Nc|_kh|F?h*8r^S}tUtq%Rs>+& zoUdxM5@)Fs1;KD=$2)(#mAwfB5$cUL9jT^*ZZn^N!B#Z#lQKNx0ei3)IA~QW>loyE&Mm^4T$1G(Y@_;8p+1QIp(A0=s-J z2Z`yH_vX=xG)jBV4o#algt{`+RdE83AWJoJ3-GtJ>aT~IDEf)GJ2({S3xLSLP9hqF z!Oi;?g(Gu>^0(-+yD3?CH-jyJm#aa#TF4CfGCxHYuPx+M<6^^TC`u?%961@73&7s^ z-UkD=u)?!km79EPCKVtQbt=uUtxbyyiz1c$m5UTMLX$j_9GY!#SUzXsRKG6A$xmN_ z7CR(6K=6JHvy8xLx?HQBG>g+k{4v+47CWW^BmLP0`|#;LlUz~QEbQx`&JkCNJ$IvS zYdV-zuM*=;E-mgeWqC7^`UYoRnRb{yMbjfG)`jh6F2yMR zx!U(}G~FZ?8)9BO>8kOw^zc4bl%-%n#_^-6xO|IAy_PgkdNlIq_N@Ep34n4{CG&9} zdlc4kzv4ke%`)@D z1)5YmYuWxUuqOLUg4H5X_N@?wtJNLVSl&_Cx(LoUKm88cUCa)(D;948!^Lpkaj4Y? z)m;b=SqBDqes~`O)Bm_hV5`JjDMD#a9C^zO7X%A*0e8gx3 z6l$$f{}X1(Z@HT$LBy7D@{D6U4tt%^Fly7Fg6e>sZU6zB9H0;c78rWG8mcfK#<3`5 zDgNd`0|N6{G(7eBZY%fnW#yep*2ba+elij>xaD@jAh-524&Y=88qD3T-lZRu9*Rk# zU9)iRv3={60Fg}gevm1ZI{hYbGaV#{ZL8$yGZsq4tX_x_@47ACAw&WmLB1nSdi7U0+e*Dm_OgCqY~QZ`s#kC!Od3!dEK@_ugBv}+P;>e`AqINU(Qyi-?Yus6?OQ&sc-VeH%gp;*jgxQ?avF(mm#WW^uVl^Kx2qD z^8oK^oUorcR_wNFAV0tS1qOM0$A!>>OwW!G`1Qws@GEmM118FQ*ZM+%c5-~|LmKoM zT$Vy3RP#n*Lz(x@SCrWb^_ply`W8nCgQ-3Wknkoil}U~&?;4m2g5Ch|fF7)q*6yc) zd3%#Slyv{0PN!6y7W?#-S{JYH?wP!P5m#=Z4mfsFIoz0bDSCDTYM3NF{2{2gN&ebED2W?lVC$@@bD#R!kj?HNcKBnc4cBK3w+{&z|qRDVi1 zw6ZG_STl8re>EY9sVW(X-z59S;s0@UO%|Q=0)sLEg)#*z$l2|EAHlNhxB3f=BD9A( z4IZ)sE8t#+^ERS?M{19897zddrX`JD)>4|V`6-LdFJ42eeK70E4&bNTA=-dN`%&Y6 zt7s!|Yqaf{9cVxaLiEzY!5HozrhlO;qH@&w)zRdRr_h>yt&C|GR!d+?Ea9&HZkZM; z`1m10Q2gNJ%DAiHyQMT3$+@o6i2l!DT!;~ruFn}%`i5%FPU=XQKQI0yu29Ye+)XVq zNtB5KhjjU%fVn@abmfQ!F6zN!|5Tb-Maa>QT}c8mU63RfE8w7F*+HAJWzBP)*{mIc zdsiN=Ff%tL;_@o8rJT^~f#RtZ&XEw}NeM({bmx8ft|Hi?jtgIFKo7WLhD@o}8aEMt zc04Lf8clTAElT~R3yw6DK36h7p5x{9Bo?_tv0dG-4EcOhSw@{*L#24*JL)!!zb z)6ED{E{8{c^93nijj(+-|Da+(q7i?a#*ScZ_0|u)&+8_a`=eSiDv$NyN-89UxqA%r z7^EhrD5rjkn`%bmEtW|hNGCf#YY3l#iOazjf(Q?M?)PxoAl0qRRiq0M#-p&F;tQU# zUIv>WW|&?Dv-QhPR!V!`ZaGNG072OQ`150eUC|Bco$+N6ql~V#x_*@OwUP<^%P@hn3 zB0`T3FL>$|ZM5pb;rB-=Xt1Fmz_(x#3Hh?)J~bJNq*N-y!43 z#Uwckx+7%{zC$)Djv#a`!QS1?xrv8E;DI^YEN8sou{I3#%@_r{-qmRRGC0i6JlMt9 z_k`5z^cDF)%0P?ik19@M`U!gvoF#GEk3}&fM{n^ts*bW+Q=u|3$=}h3%*|?4)eAML z4he*WSc2umn4yTo%sW;rnA64*o`i)5p-Py>xFTcE7$wXUk;>&5qtVCWSI%ewvBUz3 zAWrT0Eks(*ab~F!l+do>g|E>|R56?Q;9(GW{S5uVN9FgwTPF60)0n92s?X0EC=bSygB(q#PuZVKT4iP; z(|W3^G}~iB@k2C85JU`1-Hwn`l&{Z7VO7kA3a|Hu@}S1m=B$cssFWCq+3Yl?MZ_6G zsQ~u+kScJ;Te{rkSTiflF@$Fl5%`qs6f$xCbA?j9A4<^g6mn0Aulc0nSixW$W!!hi z(x74=mYU^d=ZO4QFlz2B3I0I0eSmkcz(1fFOyJ*g7^A+XeeC?>bj!^6K zYrpk$fJiW=1JPka2B{66#UgER1LC(>YC+R-OKZOg>sGJmEKqa+5QfvgV4Ah?B#&gBDQ0B2ijn3SR&E_o{ zjv@x;f^yekAle@(Z`qurA>`em`{QdP&yk@RH~Ec~Ix4-6mbq*WM48F29cM%=0@_gr ztGC#5w^w_yOcVubkP2wz07i%az?b9|S5DI#R=m5{G=o8n=ghV4Ahf!b%|KpkaB~sg zlbJyZDn7DW?2C74v+zd*x86BS(8R)I?(aalH1(EI0%^v-%qohb&CR!o zke{`w1^unpo5Fzg-5}w8=8WUhM@9Q|hXYiVmm1H0O+YnIM-y9pP@z2=FOMZ=*(K(5 zlB<1zy4f$TpJ-XY4b*321WS3Zxvy$@MC3A9jpZ6nlRh>JYHz4wr{pmX3Jgh;fM#8KrpG(*?g{% z*R+iSl*3LFtU20@AgN^NIW6f!n1O$z^_+j`{TXJZOr|?DgnpNIUT?wBY;c)hi*-+v z=J`5^_-uLnJN@?PcDYGBz+rQ$;_Q6nw*~8N;d;sE50KD8AeBGQ&DNy?0RmbXOnL2b zZFjghm6L6QPC++QUz;ChX3O}fSSP&93rP`CTj z6(-$C1E9tC7Av_*Z@s7wmtlD}=rtXg;Acc?nVNEg{g7bYA9}^rSnm5%c?F-FT#!ch ziA1&Kw|N9g8M{f_Ya%qjniGigbH(uz$KAL`ysF$ zLJ94Ex zH@)IZ1r6>feHxP8M~O7Dp@LUtP|E$56dQk?17-R$=bBrpm{@7#QW#E;ZlECvvZ@7P zG{~5Te$=EdGZQ@@vpRHDc(>rR%AvUp2y|wf7KtAS8A0( zSos_My4g_YM1LTQ<~nN6P>-6swth3wb}U{qLFIac(X_bWEfH&RBn*@0;YRNVlR|d3 z3B+o*M^)bE%&`QhO<|C}X}9vX(dCHfA{xn3u|W$ngL{f|GV+3mm9~y_p%RKi&A1@! zq2#TV&`dXQRw<{K%$Px~N0~C0Tb2m(hQDo&h;;+wFP}S3u)Q9ZuHBCuBBg|kTwxv< z*qYV^T>Mou#N-#ZzcQ-SNll&PJYlukBZ&K<(di?92u6&m_KY^-Q$riPW&qWo=6Fgp zRZlZ>G{H*h&!H&Nc2jMCL~$GLM-p#$E%c}nV5N=>S3`*P3``SypDhum_d;8%b5sXd zkf)nbs5yo4vNeK%4lLX26BWxyKAgV+f9s<_KjRs@>KQ9q9)A@$>Tm?Cxtc=|LZyua z3NDF}`}K`#2CEUMrow1J2FDRzw3-dQNiE+|>*LaDG-vg=vsVu2jhxrwRHxO>c(B$f zlHxSV#guxTxkRc6jB2ZdLFq51s94g%F5?DbO;!D7E1skkBV!)n`U(p{u^Xjx0t2j8 zrpTOyX~eltKRnw3xis|T`Vn( zQxET+m1;{Ho&lKD1p8{e)~|#cZFJgW3ZhM+O;#(el%P5{17I&D zSP-V=tD5d5q@`&_HViYi0b`sj8juHTpbB|*K;49|L!w z-(hBR8*}fIl131^hi_uo2)DNQ-Vo|W4zOO=n5s~t9afv#RDBKgcIRzyS?(<`B*YwzMF$*uNw z*f#cPL7dS%z8X&!r3u2ytZ`#Xe`u4Ca zvQmK-cMt!?6KYVAAbARNRd?EQ)OCSLR|jKm2B%N4nyc$s&g)s=1TjIi=zF_c{rb=> z)Cy`8!Gb)Nf@yx=M^^|oKbkcXl#}T=b=fkVOXl;S6I+DGw}D?5nfk3PJ`wC&zXa`1 z${k`P6Uo0(uuUBjwx@v)dq!`Fx|jj{^F}dDYt=!+X9x6pYBxbmYQMSr=SSET%ILGI zHpfzq(x4dyRLz1MHHHFK?y}aK>NBC=p{Ftlovpocjmbl~N8SoeyD?qj6TSGI#OjNv zjKn!vl^SdcoRRlhes!b7>t--J(MRrM)+e`A#%Dnk+0=gUai0NoB@@2qSuaMY)+dzo zJ$EyV@uOW?%MtPjH>eFxv5M6R3UWYzfK*RnmO_EZALJZ|#~{wVH$oAh52?qrEmc~x zTLTx6S)qY@I$BvDoBfn%CjsqXwUirUoNf*93bT$koXipP{v-6nDYnFJ708rr6a0PL z1vsKyh__fyokD|YYe?J;v~)g??nnBCO&Z%^V@ln^c(LlI;D%ibzEWrA$H+fpo1^pYeA3T6$cDrCg=N?uhSPMSgl@N1q^{RjAC48eED}oV+v<-} zlgxS=A?noHYmRt#6=HD|MqVjef2{3R>v&3u$Aua~UFgC#e6_#tJBc4FbUL4&dte>C zi7~Ptq#5F?)$o>rItsLMA_E4M$OK$i=LD46u44`uT$Lt_zaX&5a#vBbEIJOPQw!lu zq-Tq(cmj1pJ>i57u(|&rv=~Qmb{44t@7d(n3s# z9T}^Y&}sWeTEEIr%2VsLwqPd zOT@lqh16;ifTTZTru!md_NA?1m{DgugQ}pgx9&=?(i+y9Pf|ANoD7r%_IQumX}8V3 zM}7)f36r2%wfQ_|-y8C@}iPUkhncqC&ZS%MYK}DqTiMrEpefud%ZnO(I9F6n+H=0h(DZ7=d0-Xm9>8w8q7X(w9!bYuiz*``K z)KC1KhrmTEXDsKzjO;bzg8D8~F9J3xjPi!1M~l9}sam8ihsq9I-_n;p7LpnC|PZum_{w9h{{aSjTT`(OIC* z>|(wFyI0LC79J{!w>Yq_hEn=0(tg`$XKG70Ht0yiVHDV)$2D&W@Ai+sZ+H8?o3_&W zLP6LD`M+@d3MHNX%^kp8_inz<@WDdcN*S@(@O5ib(|3zN zQzRy&a%CpB&y0PkpSLZbsME%Cm=m5x-pj|LPEQWNkR3IdSTbZ$l%&=U=X|IgQ!?ZG znbH)Twr{G~(c)mD%jY^frpFFYBmvY4`&UG)_hB<+U09{iK+6ektQXt(C5m0uHQqPs zdpm#EEcRUEb&p*x(J%x}VX@f(+n$8t`W&+%gu^f21 zoj5q{L`UlHUzgU>6R0&U>~v}L>F$&HnQiN9b3tOmWvD-Bz$j5Bfs@omyCJHN%A^H- z9}Em7?caX^lqSYyBUv1F1h|I^-a+#+!f1R}8)%`q74{FyI}$GC84a7f`N}2yP#Y=$ z`$cN)MA~1oqDW+J_7cj3y+K|Dx0-%eo{xGlp0cB6{*81!)h2CuuVe}*ups>w&Bucn zA{5SCFeGi9=5Qjn)^LB5+70b1x+Cl&` zl}5LxJXO+CDPtuBQcz7K#SM(%z%9N}`B+Dxkk&!}trLqz(;S*hkkE1!!U)!aM8uvE zT*@ix#b2pFh!>{)xt~$B{YTxFLAK#?p9TvYQ437y^DCG-8LXKo(9taLG)3Llh@7g9 z4KSWU@qcPK^Khuz|BpvB6KVz}vJ)c0(;#FgMcD^qE&E`MrI0Df`j9QmV3K{JVeH0I zvTs8n`;4p&WiKLS`5yhA-@o^Do%^5rT<89r_wwSDTsyzjR#<9$nx_khPAqwXDsjlD zkB;E=;Uw-w<{uPuZ_ig3)JOWLg*A`&zeqmsBHy`k#s23LM2NAif>MfS1{Oa$rT|IR zvd{u_6)&bc?1Nu7#~1Iud6?QY_5{R_roK;oyaJo|Y2h4s#sC6x;%@A%)4VN{Th=$7 z9tG_B*tE0#`$bE2%1^Pq+o|wV7r7G5>xrFmuz`!f%`lC?bnwz!1lIN%c&K$AEBy!x@zgp_@N#YF>y{-vS)6(UNaV(zXcI zOQ7%NHj>uLawOWj+B>4`r73Z#8UtAcZmZL5eDZi=RnlQhk{}H=O46~N)bGB=6r-lp9Jm;i*T zQP0LFJ$Jq|b{#r<6j@8Fj`lXuF6%VH1VGGHk?_s(l0EflARt0%N@!6f!@}q=GW;cV zY|bySMIZtblwL{2%tGPdV%~g{6oubU8Bz>?S)3NCKV!o=Pe~G#&-n8$BxqezqbnZy z`8@y877SiOn|Sj*VrU-gRcw|JqJ@{K*RUj-&Xgq>Ru$eZ25pVjiFxQ7{26ok-b&w{ zAeXHvFPaqE!6n630n~-JEa=rzk9ZQK!go(9D0C|<`4^_hn1n|o3wCkoKDj1JC!vIy)E#FJ--}q%BFZj**!#%pDqFSpI z5oNYV+V-aly4qIhCEN2pz}UTKDUJTzF}U!%ma#Z0oXE?l=-B%M1{4?J7a2?O7>IzD z9>t2l%ksXZ{m*VbkRSHV=^MDsG2DOK*<~XP_==ef{df=aYIXYE&(T*b+~06|SfFNo zLS`bXaz?ENZHy2cyP-d`ZQU7K?y@U z3rO&(^z1Hu4Mt^Q$ylLhC!pYVrjHtB;fz{_#Bp?+MO165mvxspK^uX;4%oypZT23= zMX_o>)*zll!>$*~Iw8~spD3Ds#8$~nTHqS%dRv5_)tgZv53DPLoYsD@#cLNoDa+y$ zVos){T>jw0c|zJ@0K_)ahe{q^@8cT!q)4(Z=FaGD6Kwtk{YnkWeaJ48p`IyKWkLD~ zy#dZmv81q{36;|gx5XI`i7?Hq{`x*Y6f|~<{jhANQI9rR9dFk=QDvIHa|b)!jBzIz zwo;Dk@GR*hj#4&T#2`*t- zn6gcS&q)BZVo>>HcZNzd_{3(n4^@_6; z%MA{ObQTYxz!!A2oj0rKpbfS^A=Tmu1iQ)VM&=f*=U7n$%jwzAPQK!7jC)B|G8u%g zitgIG_{L52;2kps8>JBcHCYvk!`Ir2NBN$vE}t#G)w>L}!GN3t98qs}}@gjfebVY3F+SKs6s^+LrsnmTs+h{HB zR-iShZe{E_VAptc-kuRQTXn*mV6=+ee8!A=hSKCFG>#hy=1o*ON!oL`I>d(=T1<@v zi!h7r$X9xrimdLn2KV!iPyp)#+|;JbW;z^Fi~QL(DIlDM%@#A{u^`jNuNFK&njzv~ zM^5Dp$3620J1}?~A^;@CtI7RyWjc0|DN%xO*8wQHJljN2)HNl@E$Og z1jLBWfh^bbgM2JeNnuUDP$lVRVc~u0)dA48cOZuJl6q90T|+2m?ebvMdpQklv2fW% z*#usb-I-7DDei+IwKX>srl3kPlM>GiVvlzjh>b3@Te<`MKz!^=j%usvf&B9 zWRe-UI;kI%iTiTcuCJwDV@(%2rRaOvvGEez1(P5AMn%V}d?tD4E{VoP$w|kE7i~!2 zy;K^@p63O4iv3G}1k^BxB0YGXt)ziPCdXO8TQuXznNJfYZQ*`}z=7AmIx%nOb0sF{ zod5Q6EpH?rZ7j6Yq+KU@iGsr#RJTYFCU1CeltiUhHwRyx{axeQeKq|1#rQgCZjUy& z2`>(3L9&JRqe^vOXgAgv*7?j3KBxu_A>6iizDx!nS;aa2Tu=O zDa`2iuvl5fjcW?kydc92zTEQWlh>|+E{zt=a)z+`vYxM#**~TP742^1+KguYEO-^P zptGYu1H>+3zqG%seS=$H_{!c?XP-9KH@fz#6nT8Lw+%6H@*YCLnZA0ajmP?4Eof8; zJ;ye^YrQRR`>7FikLK`=FLXF|;%zIujmJ6eoMb-iR6isGxR+C_-XUF>BQihL1A7%ygE{TwZpOdtcuB(iaK54kw8OuB>WyIXLILxV0{Ak$F#09_ zubm{w=yN4kW;on&8LI&?e~Y`jcJo@e0S6*(_BrA$(hzy{;giG0MhEg*LnnAll}nU% z)R7|WZC@Kn_shP&JpKMf0LQxzTAGsfF8DOb{%>~MBG65zW3tXkOfe5=`>VlI!O!}S z{9Pd51BEgazLcITt_@JW7aX_2bFnkXOK)n7#O-4yIOY2OLxXKn>-OMLl3dM zhXMW5BFon1q#Wtz*9jAuF8cy*GWw2SjDrKMf(OdKKgUkzW#w8O99J@uPwFPEUB#=aU68Wg&bp( zaL5H_c_58^QCZmJq53z$eElK{-N0P4R?0#SEznthaFayi%`WCS zk{p_rjPL*~1MVnI08c|GHSJLc^dTYWY)qIHDHnCW)ga`Y`IB<%<`)k88vmrB_|Yo* zHjRKIVfu10;ms-YnV20-mY$RcsZqu?ug^+H<<~I@R%WO3bpH$T#Uay0HLdTDR&X|L&a>X2t5^RsV7Sr&0p9YHZ*J%~%joua3^w)i~&YfkH=K>zV%+4uPTZ z_>AX&vnJqLaO6b>R6~1D&fEKaiuku4%7%8nIEQzkohi#UWg%VY3E%sYh?Ja6c!;oe z|ALFOJNf@@S0G=d{VI&y^aOBA^9WvJLm`XWj0+V&aJNcoU92*XXq5F;LMlsEcJy@d z&8QzsoSrr`UP(Y6KE@@b3qzNgYt{~QyK-Dh#)n!9x(k_jAcE?7N*mLs2+P%^fj@j* z_P0EeFZqd(bFy$!*sc4>Rh9r_LN{jWZ>r{)Dz~WzK;y8%(@!^(0hvpD@0r6pAb3 0 { - cliDoc.Example = cmd.Example - } - - flags := cmd.NonInheritedFlags() - if flags.HasFlags() { - cliDoc.Options = genFlagResult(flags) - } - flags = cmd.InheritedFlags() - if flags.HasFlags() { - cliDoc.InheritedOptions = genFlagResult(flags) - } - - if hasSeeAlso(cmd) { - if cmd.HasParent() { - parent := cmd.Parent() - cliDoc.Pname = parent.CommandPath() - link := cliDoc.Pname + ".yaml" - cliDoc.Plink = strings.Replace(link, " ", "_", -1) - cmd.VisitParents(func(c *cobra.Command) { - if c.DisableAutoGenTag { - cmd.DisableAutoGenTag = c.DisableAutoGenTag - } - }) - } - - children := cmd.Commands() - sort.Sort(byName(children)) - - for _, child := range children { - if !child.IsAvailableCommand() || child.IsHelpCommand() { - continue - } - currentChild := cliDoc.Name + " " + child.Name() - cliDoc.Cname = append(cliDoc.Cname, cliDoc.Name+" "+child.Name()) - link := currentChild + ".yaml" - cliDoc.Clink = append(cliDoc.Clink, strings.Replace(link, " ", "_", -1)) - } - } - - final, err := yaml.Marshal(&cliDoc) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - if _, err := fmt.Fprintln(w, string(final)); err != nil { - return err - } - return nil -} - -func genFlagResult(flags *pflag.FlagSet) []cmdOption { - var result []cmdOption - - flags.VisitAll(func(flag *pflag.Flag) { - // Todo, when we mark a shorthand is deprecated, but specify an empty message. - // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. - // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. - if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { - opt := cmdOption{ - Option: flag.Name, - Shorthand: flag.Shorthand, - DefaultValue: flag.DefValue, - Description: forceMultiLine(flag.Usage), - } - result = append(result, opt) - } else { - opt := cmdOption{ - Option: flag.Name, - DefaultValue: forceMultiLine(flag.DefValue), - Description: forceMultiLine(flag.Usage), - } - result = append(result, opt) - } - }) - - return result -} - -// Temporary workaround for yaml lib generating incorrect yaml with long strings -// that do not contain \n. -func forceMultiLine(s string) string { - if len(s) > 60 && !strings.Contains(s, "\n") { - s = s + "\n" - } - return s -} - -// Small duplication for cobra utils -func hasSeeAlso(cmd *cobra.Command) bool { - if cmd.HasParent() { - return true - } - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsHelpCommand() { - continue - } - return true - } - return false -} - -func parseMDContent(mdString string) (description string, examples string) { - parsedContent := strings.Split(mdString, "\n## ") - for _, s := range parsedContent { - if strings.Index(s, "Description") == 0 { - description = strings.Trim(s, "Description\n") - } - if strings.Index(s, "Examples") == 0 { - examples = strings.Trim(s, "Examples\n") - } - } - return -} - -type byName []*cobra.Command - -func (s byName) Len() int { return len(s) } -func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } diff --git a/fn/vendor/github.com/docker/docker/experimental/README.md b/fn/vendor/github.com/docker/docker/experimental/README.md deleted file mode 100644 index 196e67aa7..000000000 --- a/fn/vendor/github.com/docker/docker/experimental/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Docker Experimental Features - -This page contains a list of features in the Docker engine which are -experimental. Experimental features are **not** ready for production. They are -provided for test and evaluation in your sandbox environments. - -The information below describes each feature and the GitHub pull requests and -issues associated with it. If necessary, links are provided to additional -documentation on an issue. As an active Docker user and community member, -please feel free to provide any feedback on these features you wish. - -## Use Docker experimental - -Experimental features are now included in the standard Docker binaries as of -version 1.13.0. -To enable experimental features, start the Docker daemon with the -`--experimental` flag or enable the daemon flag in the -`/etc/docker/daemon.json` configuration file: - -```json -{ - "experimental": true -} -``` - -You can check to see if experimental features are enabled on a running daemon -using the following command: - -```bash -$ docker version -f '{{.Server.Experimental}}' -true -``` - -## Current experimental features - -Docker service logs command to view logs for a Docker service. This is needed in Swarm mode. -Option to squash image layers to the base image after successful builds. -Checkpoint and restore support for Containers. -Metrics (Prometheus) output for basic container, image, and daemon operations. - - * The top-level [docker deploy](../docs/reference/commandline/deploy.md) command. The - `docker stack deploy` command is **not** experimental. - * [`--squash` option to `docker build` command](../docs/reference/commandline/build.md##squash-an-images-layers---squash-experimental-only) - * [External graphdriver plugins](../docs/extend/plugins_graphdriver.md) - * [Ipvlan Network Drivers](vlan-networks.md) - * [Distributed Application Bundles](docker-stacks-and-bundles.md) - * [Checkpoint & Restore](checkpoint-restore.md) - * [Docker build with --squash argument](../docs/reference/commandline/build.md##squash-an-images-layers---squash-experimental-only) - -## How to comment on an experimental feature - -Each feature's documentation includes a list of proposal pull requests or PRs associated with the feature. If you want to comment on or suggest a change to a feature, please add it to the existing feature PR. - -Issues or problems with a feature? Inquire for help on the `#docker` IRC channel or on the [Docker Google group](https://groups.google.com/forum/#!forum/docker-user). diff --git a/fn/vendor/github.com/docker/docker/experimental/checkpoint-restore.md b/fn/vendor/github.com/docker/docker/experimental/checkpoint-restore.md deleted file mode 100644 index 7e609b60e..000000000 --- a/fn/vendor/github.com/docker/docker/experimental/checkpoint-restore.md +++ /dev/null @@ -1,88 +0,0 @@ -# Docker Checkpoint & Restore - -Checkpoint & Restore is a new feature that allows you to freeze a running -container by checkpointing it, which turns its state into a collection of files -on disk. Later, the container can be restored from the point it was frozen. - -This is accomplished using a tool called [CRIU](http://criu.org), which is an -external dependency of this feature. A good overview of the history of -checkpoint and restore in Docker is available in this -[Kubernetes blog post](http://blog.kubernetes.io/2015/07/how-did-quake-demo-from-dockercon-work.html). - -## Installing CRIU - -If you use a Debian system, you can add the CRIU PPA and install with apt-get -[from the criu launchpad](https://launchpad.net/~criu/+archive/ubuntu/ppa). - -Alternatively, you can [build CRIU from source](http://criu.org/Installation). - -You need at least version 2.0 of CRIU to run checkpoint/restore in Docker. - -## Use cases for checkpoint & restore - -This feature is currently focused on single-host use cases for checkpoint and -restore. Here are a few: - -- Restarting the host machine without stopping/starting containers -- Speeding up the start time of slow start applications -- "Rewinding" processes to an earlier point in time -- "Forensic debugging" of running processes - -Another primary use case of checkpoint & restore outside of Docker is the live -migration of a server from one machine to another. This is possible with the -current implementation, but not currently a priority (and so the workflow is -not optimized for the task). - -## Using checkpoint & restore - -A new top level command `docker checkpoint` is introduced, with three subcommands: -- `create` (creates a new checkpoint) -- `ls` (lists existing checkpoints) -- `rm` (deletes an existing checkpoint) - -Additionally, a `--checkpoint` flag is added to the container start command. - -The options for checkpoint create: - - Usage: docker checkpoint create [OPTIONS] CONTAINER CHECKPOINT - - Create a checkpoint from a running container - - --leave-running=false Leave the container running after checkpoint - --checkpoint-dir Use a custom checkpoint storage directory - -And to restore a container: - - Usage: docker start --checkpoint CHECKPOINT_ID [OTHER OPTIONS] CONTAINER - - -A simple example of using checkpoint & restore on a container: - - $ docker run --security-opt=seccomp:unconfined --name cr -d busybox /bin/sh -c 'i=0; while true; do echo $i; i=$(expr $i + 1); sleep 1; done' - > abc0123 - - $ docker checkpoint create cr checkpoint1 - - # - $ docker start --checkpoint checkpoint1 cr - > abc0123 - -This process just logs an incrementing counter to stdout. If you `docker logs` -in between running/checkpoint/restoring you should see that the counter -increases while the process is running, stops while it's checkpointed, and -resumes from the point it left off once you restore. - -## Current limitation - -seccomp is only supported by CRIU in very up to date kernels. - -External terminal (i.e. `docker run -t ..`) is not supported at the moment. -If you try to create a checkpoint for a container with an external terminal, -it would fail: - - $ docker checkpoint create cr checkpoint1 - Error response from daemon: Cannot checkpoint container c1: rpc error: code = 2 desc = exit status 1: "criu failed: type NOTIFY errno 0\nlog file: /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log\n" - - $ cat /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log - Error (mount.c:740): mnt: 126:./dev/console doesn't have a proper root mount - diff --git a/fn/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md b/fn/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md deleted file mode 100644 index 8abfd7635..000000000 --- a/fn/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md +++ /dev/null @@ -1,205 +0,0 @@ -# Docker Stacks and Distributed Application Bundles - -## Overview - -Docker Stacks and Distributed Application Bundles are experimental features -introduced in Docker 1.12 and Docker Compose 1.8, alongside the concept of -swarm mode, and Nodes and Services in the Engine API. - -A Dockerfile can be built into an image, and containers can be created from -that image. Similarly, a docker-compose.yml can be built into a **distributed -application bundle**, and **stacks** can be created from that bundle. In that -sense, the bundle is a multi-services distributable image format. - -As of Docker 1.12 and Compose 1.8, the features are experimental. Neither -Docker Engine nor the Docker Registry supports distribution of bundles. - -## Producing a bundle - -The easiest way to produce a bundle is to generate it using `docker-compose` -from an existing `docker-compose.yml`. Of course, that's just *one* possible way -to proceed, in the same way that `docker build` isn't the only way to produce a -Docker image. - -From `docker-compose`: - -```bash -$ docker-compose bundle -WARNING: Unsupported key 'network_mode' in services.nsqd - ignoring -WARNING: Unsupported key 'links' in services.nsqd - ignoring -WARNING: Unsupported key 'volumes' in services.nsqd - ignoring -[...] -Wrote bundle to vossibility-stack.dab -``` - -## Creating a stack from a bundle - -A stack is created using the `docker deploy` command: - -```bash -$ docker deploy --help -Usage: docker deploy [OPTIONS] STACK - -Deploy a new stack or update an existing stack - -Aliases: - deploy, up - -Options: - --bundle-file string Path to a Distributed Application Bundle file - -c, --compose-file string Path to a Compose file - --help Print usage - --with-registry-auth Send registry authentication details to Swarm agents - -``` - -Let's deploy the stack created before: - -```bash -$ docker deploy --bundle-file vossibility-stack.dab vossibility-stack -Loading bundle from vossibility-stack.dab -Creating service vossibility-stack_elasticsearch -Creating service vossibility-stack_kibana -Creating service vossibility-stack_logstash -Creating service vossibility-stack_lookupd -Creating service vossibility-stack_nsqd -Creating service vossibility-stack_vossibility-collector -``` - -We can verify that services were correctly created: - -```bash -$ docker service ls -ID NAME MODE REPLICAS IMAGE -29bv0vnlm903 vossibility-stack_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4awt47624qwh vossibility-stack_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4tjx9biia6fs vossibility-stack_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility-stack_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility-stack_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe -axqh55ipl40h vossibility-stack_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba -``` - -## Managing stacks - -Stacks are managed using the `docker stack` command: - -```bash -# docker stack --help - -Usage: docker stack COMMAND - -Manage Docker stacks - -Options: - --help Print usage - -Commands: - deploy Deploy a new stack or update an existing stack - ls List stacks - ps List the tasks in the stack - rm Remove the stack - services List the services in the stack - -Run 'docker stack COMMAND --help' for more information on a command. -``` - -## Bundle file format - -Distributed application bundles are described in a JSON format. When bundles -are persisted as files, the file extension is `.dab` (Docker 1.12RC2 tools use -`.dsb` for the file extension—this will be updated in the next release client). - -A bundle has two top-level fields: `version` and `services`. The version used -by Docker 1.12 and later tools is `0.1`. - -`services` in the bundle are the services that comprise the app. They -correspond to the new `Service` object introduced in the 1.12 Docker Engine API. - -A service has the following fields: - -
-
- Image (required) string -
-
- The image that the service will run. Docker images should be referenced - with full content hash to fully specify the deployment artifact for the - service. Example: - postgres@sha256:f76245b04ddbcebab5bb6c28e76947f49222c99fec4aadb0bb - 1c24821a 9e83ef -
-
- Command []string -
-
- Command to run in service containers. -
-
- Args []string -
-
- Arguments passed to the service containers. -
-
- Env []string -
-
- Environment variables. -
-
- Labels map[string]string -
-
- Labels used for setting meta data on services. -
-
- Ports []Port -
-
- Service ports (composed of Port (int) and - Protocol (string). A service description can - only specify the container port to be exposed. These ports can be - mapped on runtime hosts at the operator's discretion. -
- -
- WorkingDir string -
-
- Working directory inside the service containers. -
- -
- User string -
-
- Username or UID (format: <name|uid>[:<group|gid>]). -
- -
- Networks []string -
-
- Networks that the service containers should be connected to. An entity - deploying a bundle should create networks as needed. -
-
- -The following is an example of bundlefile with two services: - -```json -{ - "Version": "0.1", - "Services": { - "redis": { - "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", - "Networks": ["default"] - }, - "web": { - "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", - "Networks": ["default"], - "User": "web" - } - } -} -``` diff --git a/fn/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy b/fn/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy deleted file mode 100644 index bf0512af7..000000000 --- a/fn/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":447,"height":422,"nodeIndex":326,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":9,"y":10.461511948529278},"max":{"x":447,"y":421.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":12.0,"y":200.0,"rotation":0.0,"id":276,"width":434.00000000000006,"height":197.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":275.0,"y":8.93295288085936,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":14,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[82.0,295.5670471191406],[-4.628896294384617,211.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":285.0,"y":18.93295288085936,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":15,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":316,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-204.0,285.5670471191406],[-100.37110370561533,201.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":203.5,"rotation":0.0,"id":267,"width":116.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":28.93295288085936,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":290,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[217.5,167.06704711914062],[219.11774189711457,53.02855906766992]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":57.51435447730654,"y":10.461511948529278,"rotation":0.0,"id":246,"width":343.20677483961606,"height":143.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":18,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":55.19999694824217,"rotation":0.0,"id":262,"width":262.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Unless notified about the container networks, the physical network does not have a route to their subnets

Who has 10.16.20.0/24?

Who has 10.1.20.0/24?

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.0,"y":403.5,"rotation":0.0,"id":282,"width":442.0,"height":18.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers can be on different subnets and reach each other

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":252.5,"rotation":0.0,"id":288,"width":238.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Ipvlan L3 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":172.0,"rotation":0.0,"id":290,"width":207.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":3.568965517241383,"y":0.0,"rotation":0.0,"id":291,"width":199.86206896551747,"height":42.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Eth0

192.168.50.10/24

Parent interface acts as a Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":29.0,"y":358.1999969482422,"rotation":0.0,"id":304,"width":390.99999999999994,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

All containers can ping each other without a router if

they share the same parent interface (example eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":24.0,"y":276.0,"rotation":0.0,"id":320,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":48,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":316,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":317,"width":109.44000000000001,"height":43.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 

172.16.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":318,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":319,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":300.0,"y":276.0,"rotation":0.0,"id":321,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":49,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":272,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":273,"width":109.44000000000001,"height":44.0,"uid":null,"order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":310,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":312,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":85.93295288085938,"rotation":0.0,"id":322,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#434343","fillColor":"none","dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-191.0,222.06704711914062],[-80.9272967534639,222.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.0,"y":25.499999999999986,"rotation":0.0,"id":323,"width":135.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Physical Network

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":53}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#434343","strokeWidth":2,"dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"orthoMode":2}},"textStyles":{"global":{"face":"Arial","size":"13px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117032939,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png b/fn/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png deleted file mode 100644 index 3227a83ca1541ec68e06b0aa105e22fdf5ae9e6f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18260 zcmaI6Wl$YmumyT>5AG1$-Q67ydXV7m8r+1pok>`#RfTA-*)bij0K-0C)TQ9yW|^N>XfP5?S3O{d6!$!B8y2koK&0EoKg z14FAa8wP53UX5(L=%bDVL&q0#^aP6T-8%nWjl|N@0|HYEm_x>-zYG8X+2Wz0n@ zzI=J7-uZdIPqutrz|x1Ai?3;VhTlje(*-0XDKBVe#9*5bMa}EV0b?Ns0MG@0Ui2`_ zyA_7pwKNt+WmVFd);j7*KV7ZMKpvgIJVpRB zk%B`-b-EM-07kw-z$Bs2hOsF>sATd?h@Xx{3=r`1M{l@jXOaH_z4MhL$3kWlz_!oll8RWh!Q%ROqdsGnb z(TIUiKA{RJOZ(q_NwQH7TH06B6ls}!%=IAY1^)X7pr@=O1Q$j*e|F|%#TA_w-Y;kC z)_`zJ#`9NCCW{OdMugwWB?QLTkvyi0WxiQ_#T0~ZQXY15j6v3s1PYv2ei$9RP7duU zqHT!9#~0bi?!lGe=?X6%oIH~_-Z`^`G@SyH#Q}0r7>^JBu!Keq-(wCBenOz|2O&fP z%g(=xOm2N&(JGPhAZ&+bB(oa!l?)rZAHYn!%X6=Q?12*Z=-5I<(n;+L6OzT~%aEvO zgv8_ogMJaz{p`T;w{DzN-7~1pVqIlvWlRHg>_wsK z+@e+a#w-GRB!9D%P4X!uI$n1QEJo%7!P9sat=Yi+8?`ar93aXXi7MWN=hA*yn@Ikk z4&`q8r987}hlINhTR;^mdxMx-+>@>TP$>)3LgwW~Dq;mQ#%#p8{3(LFu9AB9%YwlX zX^Jt4C~j&qDj?O8cx?i0hwDmDi>9@%F0a|XWrlKxLu-fY0YY#dF;{(Xz#B^()XVp~ zA+#qOWF3Pblb9U2Xb05_fp~})4vZ# zE$5YiKsXBY*+b>07f3O)+SciM@ArFu`gqLyfO499UTM3S_#0RA)ci8kaC()t{CdH{ zUsheO{k`w`&G72YZ?(N1(zlZBufdwTu4o5M#we0nv6WmAo=8Wu(MTcf4Y?&QG)0oU znn9uhM0uu(8;#a5mXez3LPqk*Z#>J&vJ<;CvtdznTbY7$yAnJkFbEtY;(+bo<^Jk( zA>&D(D;*-QXyd%8x~=Fqj7_gcZx1elO`pe&sgB2$F=5#aF7~q%`{`%x&~7&!Hn|YE;!;Q?f*HvOpP-Tm=35u|Ah?>^rxk9yuVL1k zkP^ZRJesL>xg0b!OlMvo;5~)DutD19qHL%dXWPo&|3$Fk!83qR$Kc!-p}nmF?BkD*xxZZfVH1jrnX&-rzd4 zB9>wgQ1hSXV;Ch*6{l5Ws3&5^Z!QG)EfD;%2R=$vYzyZBIc4%=<%&9Uj*)QAJ~z^( zFcF@R^!oic!@aLO^ zn?UzwRe>!I+wm%NXK&$s=!@y=yA~n?K4GTE9nLwYxBrB5XVLJwnD6h{Z+ixERd5+Q0=_af_Z{pPmwPMNNVo= z^rA8x<0ts<5;8ZH)PoX%ne?R%?CbFx1>|Ft)?W5U-c9Y~dz5En8}m-QISYB@`4mY4 zW9t(O60{?YD}w}=snv7$F%1kM6P(L@8S?sxGEPWj|6uQpuv}y$PXwxizk|Yb$fH2j)Be<57<|=?) z7=toefVrv94SujOJ#Sg}yqaIWaY&Y??*6k$VyYbK8$AKc3jb%HTNw$Jb39d2mjxM@ z=d=|(v4Gd_E2cLLyx|}PT6r4s!8}pN-omkWg$xi>=)Dcr#<%ZPVn6f(SlFy~AHLY}=vShNZvs(>Pq`tkJX`n-95d-;rIln`$j| z`F``ctP$Lp%wj`vVKkAJffxJ%mR%E**F(o4QG1_%i2C!cB_M0?~vd%-A{K#TYrfuY6jxF_V7z1G^W^5 zTkPJ+SUdJ+k0#}#=jb4>-k?DVlkLplLG^rRCC{vgKNjI2DZP{(l3auvN}-9e*HX3E zY|ZkKA5d6X?|Deh9z~GQ@vQf=zMECF6tFx~Cf96uRL2Dyr0O3vRLGz;L)IUN?B zsq%We9EPv;iZn;9Zw!hiW zLd0HaeQ06T&$dp7q5FmKY2{BO06lC!TZ_Ri!ge{a&lmz7vGuv+$C zbi2UsYwN#=!F=TImu$daixwP`59?9biqDER2Aw-gPW8$CFsdmt!!B>PT5w#|C)r}_ z2;_M;_b&-|Y=^zpH(i*;wxxzVY?Or(o&kELz~rcAsz}r4i~dI~HKcqu_#{jl=wi@s6Il?Ufv3kf$F~w?T zXS0y$z?7NWvchoB$7{tI#Ro0uTOhEjvHzIfn}xQ_e1hz7neMVY0$xKoF~@7A9Z6Uv zjoS!uqN>pUm?DoL+82J7uGh=@Y{gT@)8l)~nTHM}BDEce8f#*F&?`G>1XN}0*gkzG zYab%K;%OL+gr5|jq|5GB`|;AA>;DplY!-@Wne1udl)5|VjAOAF3mu&HBp4$r3eWWu zGbk%;Co*CYpfqG1SCJ$%SuWA1I_p-mXg+p95oO_zl%Vn>hFagYp4ls4X0;sVKXL|A z{9GC$LuGOv_`^xGS2dPKM`&)S#B4DZIYEPdjfYgGbCPpwzemhxK>YV6e{jy>o1q zL2V0HFsggj|2)H%xFm=oCa1Iz;*$uC=POKh$r;s;^A6XJ@FF^4mVHxM=l$BmB7biWn!)HR#8Sen(!Q7)C~Ladc&k82OJh>_J?`mN<))eYFWR$j6;7-lu|#i}e*ftl`m>F$2P~R$ zHbl2-&`;>f%_-Duu(o5oF#7o8cgir@y3)Tbnrqf)bYz%&RCY36ECGEAl%NaqW=Z(< znTQ{!!A7)0jX3!%h`PeC$J1#=EG>{YX zf7soO!SffH(yI~zflPj>oQCo@t-lf&W_Z92tQ&ZvF0L$8@B-K(#Ts_P^WjPyv0c~- z^`vN@8u~lK9P`oGLASHezt;*GG*p)HXp+nMY@%4K|HK%qF7io#l|7}S_i&*S7nhUsDyo3PDMlyrL`O?WgU7i!5kUL%_&l&#LbkI zec7`)NhjISeE>!LAyZl@GiVcMQl9)C{q3fOo4?KQB5WK?^6s`oooI@N)o8jP^LKL3hoE+ zf2>c?7s=!J#GA#Bh7D}hhj=I)B!AA^aDSP$wKz9iHAqj~_0J=vviS8#3ET+ThUMYT zu+U#iGfvpVV2rZ+^kxq3EvtV|=*j&3jf|iG5|z7@YE1rCg~eLD;>eYV0^CU4A3-FcXaU4u!CqR>5Hl zd6qua$)>X~Mo4r8?m$#l&>lrw+J|9rK_+EaV|k5i+CJw&@{6QIb9UcoXT-@`LqPX= z@2LgNU;N_jmXq4$R9!QMHq>S?1x%Y5foxeE*GN+gB0nym~A+YzVc9 zU;Eua>iH+B215L3o(BG*_|J$>^?I<$Ih^ZSZyAk`Z(hG^$j>pG>YAN9X-AWrU2TtR zy?oms@-**ya<3-)vVM0v^AHg&4w5HV!?=1k$Qyxe-u6<-NFj9hq`v|6kt#j{|E{9_ z5mekudG@#+N6?+Y0vt8yEfdyhoD&9CN=ba8bfU>rH|CGFbFw$K|t&85e9*)7!M`NgD<&I4y=aaT$oI`wC|^b5mEAEkDqraetCI@ z-+g*2Xo<5lR;PBwMnSn1%pzOnWL-(ZL&Iw(wL0CAAk_YhgLL`<(V!oE+=6=Cq$Q8} zEmZ3n?!2)Zfrt;EIlv{tLhb<7ooMUl(oJEccYX#@?z4vb9Fl(L`x=o-4NV0TQlv)f z_fz9`q{Lo`NhyWhg{gMIl)3%f&fl6e49qG)NS)IJFh~T3otpcK(}T#uIuQPS51t-! z3wF8b-KGC1rEQC79hVisWmDd*0kueu3I~dvL7>}j>@O;qcE^#3$%N4j3VJzuZ$n;s z%ZjphD|29+=ZGDyJL3a?L$ek3D}8+cDS|6VooLE%{DIgSYfX3A?K=ZFltvHJTz~oD zG`L-6cY|6tTYckE3K)(x;F%tbyne&%i3pus>yR1YWX^;>1bc-AB_ESPd00(M2c#!h zQ?E1L?8xmUSjDM5IPe2HM<726F*gMwG8Cpx(*808RW#Hdz0fRYWP<)+7;AL_d=b@d zR>CkiFt3y5YSHER+rM5oUi+zEBt$$EFf_3^V&p5e{{1kZ^1)paM(iYxRPCknIPGuq zMVh#Ym|YQu<8SVpX-77=!cuiq7Q&L(v~EhF{tGM%Au_474&ZX`qAZqOO%j#sI#6crI0s6gy-PogU(m^H|vAak_JK;TY^q>7aM_&(BRmQCRV)If8U`d z(#A2~)7(WLxgH8sWhf&*$6UF%8e1J?M4emU)3>jx+4)9mDI(yRdj8!bzoeR^@}&&W z8{XG7N90j6?hg{g$l=u2#ZJW6C8)4)hT4P<7vM62l55*lO4Xwf+gJql&_Zow2(yrW zWXZ@x%QKs>ISd}n{-$$%X!Jktd{Zh)2hlLlbZgOH?E zBTgBxF*YAQ6uK;4Z)c8hb-)H%|2@s$Bde$U`^#=GHBGz zCI0QztR>93kKu{T-Xq#iY*_%N3aL7i5p0W`x3?zBe7W@e>AZ0m@{ftO<%^T3<3sxZ zH}`TDzmjQywsB6-R0+MQf~477*(IqvA@DkQ4)|zsR|RG4lCU62or*MyO^jX->0X$1T2hUkIC#y*XSfYWY%=xE zjuW;-ifb}QEQVGzFmGhmF`TVaiKE%G)T&KHVj%ohxT-9Bt0&u^O3Ej*luP(^yms7S zOlW>GUo&d)6`7&g(NG7lSN;LQ>-1?J9heLiODulF<0$vH2Ko!`w9$nZ4=$#O|eAS^G53K{9f1SzpXVlAG{i-8;oMhzcstd1R;i$4q{(g5JYjl;B-E>j5?8o_3$B@{1 z9Ghdc!YH9HjFl@-p35w5K-!5-2onQ;j8y_zS@m$uJA?m??<0ZPQkp~gBl^A5TDG?j z>`m11Dl_rq8sJN^*ik~>_F590nK<58GJl8t6r#GxLD3Kdbgo!5<6A>rU08s!-5mi+rT-1Lfs+3qYIZz-) zcm|_FsVog%T{Z9rP?%EyZ;qYAx9mh~XwX8t`)^^#NczBLsoWkv+A@FW)r~QmOz4k0 zqM-`k>IFKf*~FtfJ;Ql0!<3hRd7HzRMp?qd)03tDRo<+@63DwPP$yWrD}@S$~xxJ1zN+vzE@*%OxZUWp`mZKMoZiq^yniF^$sROaAF ztYBOQ5|TB~pOHCO$@^%13XwMcMp>b&PRk%T_JK;tLaJX7o0i>|ywKDkMc*;ZM<*H1 zmVBrLEaRm?0N;gc37I}}%kW6f;#;<{_DAj0I_bHHJ0#=&$2mVEPn#^?q!Q9ckfgX9 zP%3_Ur~@ifD|Mi#To~j{kJSLz*YfnV$c}jA#Ds2TB?CfxduQiO?diuVsJbd6F_5O{ zDVbIEmL2cxqCxdAp_T89V^8_gWXgj6X;(j%%sQVdXDd?&rdbwHK!s?wP{P+3vB=1w zo)2c)9&56^${ZZAUh?YU;Mm5&lAI)zv+MfCz%>VL5WGl><^OgrzsQX45!U>7m4B^U zr;mCfa+_~EI%T@<@@IN{`6B!zT%%-mJ{<6qlSt|Q=U-kmpvrVjv-N5ZKy8YhoSH4$ zZZo+v&13fqqkV06z#%gZXRMnIWcgy^!92-1k!~z2Zm6HOT1^AhO~CP;WQ~^xiB94Ur#J9jM?w@ilch`WKCRsASC>?DCfOaa`ep&u zN&XkzuJDr#I~N-Qe77>Ob?0Qx9avkA#8@Jsw4X~&kr3Jd2+civvp;#?43FNl zS9uG$1$mH#!-K`EWU{f&`wc*dDzcrjB=ktkE^|dimZ$K`6UB50%H80{eo0tsqLmEN zVAhq+mblRz+x520I3^1vI(e)c40TY9{Y!ODmq}-~m+~^{C@~j3`yP`XT}Lv*LrGMR z=FOzdYrIH;P^(>kh7>Ks>3+XGUreeR?9=*R1cn=Kg5t(3cqwX4F0mG998NYSJYNK~ zn!zPCoRaecBNzCtRzy8K-^sZR}uw}W|XmlCUlpf=V}11|M3J|&SfE` z*&G!j*JH{+vfw2?oAdT9(|YaOUYERd`gXohy>oi}$?I4Twz9=K@=OENOvUu+jtfs7rmT}Pfc`gbFw0>?M_G$M0*YhDffOcykB zSUeyro(Z$65FVG3E@<;H@a!vt7qH2B6hVoJ__2H<=3lf?<;0P2Q*ExWQ=C%dZ$FmO z@~j^uP4GIH>?F~Uz0p1|7v5S)rSqIg4&-itkqf`#ZdGNsZ~rk*p$MfSgoo{Mejn_19oQuxL+X}`WXEcb*=12X`_`VjL9XMyel0D{7 zU2&clVO>RzUkHoSrL;#GAnAdNu~H*ZU%~H^n&XFZpJLmpos&nv5$Ir?`ak;-z@v=W z11=VBE>@YanO6!+`;2-t;7omez=@^Rx&iLv8u(W4zd;>=rqZ)O;q}7tY#RowJqfzsqj+eb#%PvYZE* z@vCHaR4TT%LW)K${^f^GA{f6|vdnLIFD6Q@{;SIb{2JW{?nTNM_9~O^jW8ezEo~v- zq$ORU$H?>Gswm;)uhT*wJFL0U5brTnl4;poes2RxUQ*};>2o+swK-xXFGWH7|Yvj5j+X2zTGhw!g@4>Jt#ouOz@UE zZA;qQ2x6`(ZJ{R_=RhB9!5SE2|TG=Nssc!4(zW++6latl&==sNAlf znZp)cO|xFX&E++jiwoWu$E-B!@v_Pc^!dLSr_a3mvSzs0>t+_cDuXP^349L&*oG+_ zJx1Onro!7kZz7MdE4pcFqtjs+?VtDvB}*y<;Wxstw>x%}K7Y(yR5rRB&@;SN1eOy` z+o8s4q6>d&4ocSLT$QtVXjXrSBYQyT*C}5g-k&x{R}g)m0u)?-)POVAN?Pj*F-w;N zAGttpdZni+MLW(hq>_C4CvWRXTQ{B4IQZ};t zfCj7(Xt~%y8XVYU)JZDS?#t7JK=@^t_oIjpOq7f@w z;`p`C26J;3jE#E&7UUdR$$7tB83jcabI@LoQb~`h2GVOxeloh*Uh70KJZOXIt#Lsd zI}m>ETK!y7A{nYF)bd7VjkTqz=nlPo${!xY;d%~37Vem@V5w|a(c-lkrfA7IudAY( zo*8q*XzqbdOG67Z-QdmrO+FgYE+6(~5IRj9&IpNF;%Vo8f@JzD^SxT`%g-Fs`gkTh zutYedybNXt1dEgw01vV(#leHm=fo;8&#vHoz$YtPHo;Uf%p_*V5Z9dyg$JREiCNB5 zA zEO^oEVHdHWj%rt9em85-_mr>9sYRDkP5+LAM=L>>TlOCy!8dviR3$H@IkA%SqrL#4 z8vDx+B?7`b^L4S?8>SCWA6Gx3&(Fw~*VjrS@w?Z_*P354tSs&07D(zM z8aU4Z5@MaWMHDC_iNzTO4t#|f zl=2Iiop^~f*_>Buw0WVxpstXzI*OxmW(1Z436J9twion_%fJg(1_d5q*Yc#3d{nld z-I9UUN0XOkID$A?sM%`HC3X@!Sw`y8w&nA;ak)zInSarJ&Vje8IBPZYs>AHd2f`B= zzgX`{Yd@gzyU-?Nau}u8%*BLSlm}EDH)0J!7l1p4dcINO*vb@s`eqsBqg?7DNEt7> z-|kGbxW~A6t1fU%xyD^5y;|cHmcJ%ip?&$)kYP}u0JYJ{KV(A`DJ{pE3yt4 zM4ilIClq;rW6|wQE#coE#<94s;WMDS2+A^Ie~|u6gfpy#8Q{Vui5|Gf;a5yW;(sb@ zwJ`7nYOpMf6&8Hp5E6euxzALT1kdd?LTJ7IWO2o1ehJ{q>;*00~! z3yo>y5#01Qe-M!v&xyCJZ9(CfyG=xyPtJ`|YShOw){!epHxtNc$4`?y@Urm_H>1`z z^z2KF;odTo(a=dPUlNK}8CKQ*#viT#BTUVyDD4ETA%}MNa(NY9qcCRg2CW;%FzU9b z`Qf}OTbjCfLZN+%u^tu{=G3;{sQ86gE@*LW9yz1n91stI+B7??%F7(KXjah_0%M70w0uw~z4S2K#R-)Ts+r|rV(f=UjY4RAu;`m}7^e3b&LuKoUR{77?`+abD z(a%KcmnY=K_NEL{o^^kw&M~4H--%=mLBL8$yGMPB^4VqzN2Z|$y*ll<(b=jN;Ru9M zmT1ZMGzg!^QcR_#J85JW}m%S{w4tpau3Bh+2cu1 zyLR~C>qvZd-@Jw`!eB+Z!73nlPyb?=J~3Y&1TDbY=@ndKmcoIgmLr?LjVeuJGP>qp zs`eGyyeE0mw<*W*jMK4zfQ(S3AU1)>G>D&MB!ZN&-1BGhLmA~6uRkr)AdrRPXLs1- zb8xym-7@HLbi!xlaHq6#B~DfhD$Cax~r!r@4H&H%%O0YP-No3YXxj;+naW{ zJmtSD9W(Q7GuA6dspe?Pn*J#0wR0Jh7Fj=v_sW1%MOypm+r?<;jdLDqFs({EGc!~! z9N5)K>F)8ez&r)?JrSA#@&a2l^f(pyK$rGuny#z%6zK9)ewBy_?DBrd(?)hZNt%J= zz^^4WD)MkHtAoc~V;BT2i7sg9zOCD6Ez+gvUZTPklb56&U6qU*L(2-}ulpIZ!XS^rYQ83bmQzLzP!)w)4f*^(&}n7_KG z03c8?QdQc;z!FqMf2)MxO_M4in9ZT-L(0j;QeeL})>bzB38xLHt%at5)dlaG7e+s( z>AXiz&-FY7f0c19!qsLZwaC(>iL)VU#OtIAJNmRAO#SSwpM>!E5l>=t6>ezI*ML}s zZGOF;(bK&!1R%=ZOpd@P7(jOLI{NDtx`ENh<`35g$@+)bZq_-uOAoyhN`5}WikFcT zhnc0;p#I2~1wrr913&d&GE}WVc8(Sw>wqUSu4gG0|C(Yn;@*TI;9F*{pW!DvA%#|E z8Cz9z+zhlfA_}_0@7SnCg6xF!+qB3n$ypPypaL;e|{zyvFx*a$Kb>1g5n-WDa`yB;gL<7 z6StQ}cPUw*DSsrE=m*qW(7p)iGRJU+(GMRLN!Nwj_g)Zh?JV$VJ}p03ACgN*oj7J5 zBYojC@=|y}3_cvQoErfwactWc6*g^*giL3!&b)#Q!XhyCVA4^qeSq$t4I)^sKJ1;1 zC4N@1Xd?e*ME_1d`*#l+b`wtCo&gJ(q8M^^E|P=MH4n84Fb*u5ed}b6=0U(;EDbp) z^P`+cR+Zf{k*0uiHnPgHni7>G$q=^^rXT+~H#JT{Q~qC{TlwCzWuQgW-8S4M*NTTEr zaT`Y=a!jhHTVn1OGkuXyR_IUWRF(nnuiNskz$zj!Bs7!M(?6Q0`a}>Y!4N}@9hg@Q zWPxXd#T}1_x1s4b?k*Y`mTD=H)T3V;MdCg1a;MACU;odjaHt{IfJK<$H}$H|D8k}3 zA-`O%5$BH+y@R*kl1ZLwvdS17mdTd)IfZ0;dIg{m7p*hu4bPiL?@a;_>aSnxDn?pr z5|IVfBMZz(Pn_o!Z10Wgm_8SLU6S~ww0m>ET0TG}Di|*Es|LI)lR^JPh%+88%5lX? zCL3k%mf}<#N1R!bUGCpgo}M;Vi@yjb6qH%dLW712_cHCbSK19Cu>eI+erErHh-O-H zG^XT_)Pj|m=zE=gEsZy>Kp6#==aSJ3aFyH%jm-~nl@tkt*#ljYXjmw=k@SR04BgTU zy&ZggKYC^cIr-hxZ$_A|^OyBi>GAd2ThZXS$6yAwshKJEcH~NWWYl$}*RZEF%p!4O z6k|tsWrOX#mFZn4DbHooDZkrb00sQ%{f~wox^)j(PRYv0yB&kX3Fk0)l2z=?#x*m~FXEGfqXoN?@|qa^2Ng$%=Rhy1@K2>B#_6Sf~uG zv?Wpzy38r~U4*S;-jD#v$nbg~)Ds z-V1jS38MSa$3U!PWOv>AG!KB-3!SVD2~@J&Hm$GJ606ezHEt5R!^dP*Sue2R5LlI2 z1-LyCs3tiKBP~1~b7XB;$r2_FqN=cl0N%;V9|aaY?B|!eYOM0-ZbpIf8n<-@TIxp9 z^7c-33NeU<43x4ZFdk1!;pIA|A}N-~J>BmGIIg$RP_gtk#`@VJN5h?>C0I{^b3N~1 zL}T7Y_*AY`dvMB{*^gQSaJ{(MhhL$y<$292A*UFozBZM3~(;sZPVTPX` z;ztmP1_t8Lp6k=iJq^y+;yra(H3?$I`wt!P&D$Rqaw9m~zrI7yN>tnfofCm)94zL< z{8-Whkl46_vzx--yXbF#KUFaL+AsF%slVT0W>T-qm6{TrH%TU1R#Dz#&sIqI^kc9q zw;ri=0GpQBmuLj!F)rno96#NBMH7K1mIV&TBdM=`PwA8(&&;y*v!;cQ2^g}>|7IK& z57obBP%K%COCx4G=d{oLt^#C||MTFtmskAJG=zLftXu-gq5R2!)-dyNzsq;AhpJ*K ztAu{Ph1kAg!;d3$9)mK%N>LoaJ6GW~tMF$LmgWBjvKBQoRUp>PGL|Yg#ahkBU^EbI zCN=B-|3LY1ADW^>m0j6H>nhiv@Dh%uM4VmO^uIyUY7_l5?Qaz;Qx~5a&@|NyX?l; zPWS{aMqby)>DCcpg25VM8%gPd{ z`i70!YNM^ddIOqTtT0OkHEjp@<;h0ZO4uM;keT|rp-APLsivz)9fl6|zTY{|<3`r2 zp2t(8iifGA!(nH(*QDE|&z9Gt;4Rep%U|4mZ@RU#qedH3g)#NR|G8wRR*hT!yYb=O zXDa#S{z1Os6_P4%Hl<$e#142Zt zoR|#2%+LVsMg}7bni>hQ{pP8=FrVA45RMiq8$yvnSL*tL1$I(QfS%M(yfP@iS1AWP z2lIKKPNAM=3+>*#(-?K6gtfuQnxk1THy7?{9n%Sd!kIXmuYZMOhTWt37B?yU#B7r? z=G^ACYZRx!PSFSS6NzhE# z@p9cd3|MUYhbKhOR>}1xDRPICg~677VMF>KM{yOwwG8jd7oE(Rq;LFL?168HD!j6u z)d2J5o;$D6xnA0_@ZFBP_J%DqW3f8XN|uy@{jn%Mm#`VpCJ@E4D1=!QIe1d?Yy(Xc z^!Zww#2ur07i<<=5|fSP_Bw?qoU{lGBg;fMyhdql2A=R4Z6;84N?>%8R+{`C(sL!izbn-3vm$vn0B+f&fsN`3e9f{dyAlRtYQ)7R&;f zo*r*1v&TM6$SXV;|8_Its9kt?6T)TK)NxUS7*1tdO>{+`#NYijV*`DvbsCLSv@;ES zH;(!Dz+Rc`LkCBnZ4O6IIJ}Ij(-iK0oG7sxy3yt99q~GzzL(svlWZs2^>>VrPdQPH zpw8bvR!qC&;aIhf*u`*>;K?rINRW6-(G<<#iDN{W6j1z+D--bGVOxfI!Jd?*GG@HGRC=q%kZm^;YZa#SvWfyxZylM?9sygkg0xe-)JN$i~3j4=>q5^3lW_ zeKCy?#k?>FCGd`ycvAk$(@B1A?dg~tl1$5R1!P?();c;9SYAsr{G;+cQTl-icPs#uf4>R)!KaO&u-5@<9+Qyv2 zgSmcVgp&RhF-Lx9Fr;BoSG%pcxS#E*M)5{$Z{6oiIA)0^`2@ahw9T$?)zep6<8@sN znFvBk8>g@0XERfv;dVO*95Bl7SXK=9|5Ku%R?vlcUbP+!ABR6X`x9tPsM28o9NPb&|y<*Vh|BxTP&W`MwosXwnC$;YCxBRwUUIQMF@s0~s zhMr9cE`1N-j~Lsn*x)W&h%1}OK~Y+-(!IdT4LCYMTgSTKNuLwG#SnV9B^0$o^I-&d zEB_4%Tdej+$XUsazv3nMegSFWg}`zr!-Z`38hLMM_dx=fr$EHXl+Cl~p&_DwluAB{H~tZkx<-hN;&4CCl5*aJQc2qi7WNiE_EbCV{|Mw%gm zU7(B4SKd`!FkvTDi?}!5)_zC(qda#I8p9I+@`^e~n(dK-F%=;)_Ij+JIAnVYBo#AE zNAEvV=szD{AkJY6Lx)#1qa`-2sL7jZM=%W?9RDQ>&TH~rjkqF_X4+h>?2S;6q0mfD z^sL&_!jNx%GMnaAQ=w%3-sXl8aH5+3HF9b5pfq z`C~s?RAg5YSprv)0CvY*>~1o@y6xaU!E9r=hAU!;bt7L&#uJloz7o;bTLQ@hOT*7iQmj=<}S* z8JH^4bM=LI55#zrwlHaIVwK?_g})Rp;Gqjc}(SeJz3?Xs4&({VfvzRl&J?ISrJ82m~9yD z>~4NbSo$qEyF5}p|!AiOZ%uKV{J9q|X2bSTwfkBzp zq@9pkm!7bCcfIRk{sSE0^%U1zxsv*Ssptc7=_tObJsm;QyfBLU=(xs`m9BukN|8_5 zDia)iupL;TlV(JjUiPiS7yqG^P9VU>Z)u3~nGf zJfmNSh;oc(4-Dc>)wXKuuyG=r%n$~OT>B|)nB1$>&&LFm%^LaVf6XBk55vOp;n-8V z>~ICg?cJg8EbAoN-fZZs=`1NYH3}ehhPQ!Ln*2A@%HG&rjQc`)(xNoq7iQoiaZ+Si zGzvHwmCjc<&A1_3Yk-f;*XT~KcR)C3pG75LYWD6m0WWRX51~AhcsY-c zMZP`DQqzpvRmFOpWF~^ljzWxa*@(`&NY$)-{hv)RI(nzPk-Mo451`P@ZA#P|fP&@t z)DqqQR_11mtPm=ESM~X`jTKE3+#*^Gp#Y^+09xTH7FK_C;2m19OdMvPe^Yl;wp5*t!ch-YkD&-ho1 zoI$$7#aIZ&^zJ6!%H*Mli>CcaOPqULQHg^k@gW?ykRae@*v7Pgex5B&-!puyro;E5 zY&Jon-MYCOT?u`(BMS?~j|v=NWq~uFdkSaO%D=@dMP3g^v}IxWy7g)Ic^Y6xu13EJ zQe!HY>%j;7gWQ(FocLV?%oPxptEmq0?EvU$c^&wU+C?xIm$A~h7kXzIycIepNR_u_ zgWyon(Q5ODq@muYEd-&I^w)hdKjn+uAzpj8DyF@x(J~4M9iIpkp0n6rl&Y8Yx*8SSX#4b&t6MtY-+vi9=BG05!BwEc7Z~- zwyE}!zIs(w>9rhNTYsThyfMG|@#yNaw|o)$E^=ru~97`Wc|aHmuv!iaz7SWsgge3Egn&R07)Tuf1jRb-a#<{^6)!Iko7oP}F* z5qbk6nqWmGVs}aGMvZ0X*-MRl*X$|8@IltFJ|Pi=u_WzBTutdTTz@j6Px<> zRukMQ8@8y;1k?ixEt2%P0fzVT^6{HB+G;v)8DLlftHq^IZ5iWC8_#_|>+)Xd>fMlc z;(Cqf=lgW3+Cz`tctK_Z>NE{H>Jp|pMqk|xi@gdByYsc&=n}0Z+(%R2(W`~Lt3#2* zdO)S$L#m*I&Vt&*3g+r9qUoeTgFTe{V@Mw!^`IMhn)SsvQR~5k?4`*B+$kH@C`@oT z^?*W)WPNUc;oVkIw{?*Th9yufNTJ#;w8;{Et#hP)hb|j48~$`KLQ{uLKA1PNdX#k< z(+KCy2%UF{`eT9G$Q8o_{c?l4O<%FsCHO0FZD;3$pxX@V^{hd(@nW=ppxbSrX-xTP zf;x2L;6X(&575D<-fqX{Yio&&E(^7;I6r*q*2vQ=0a1JgYZq|GDH~b@6C7|2D6~lU z+yKKnr(q)l%wK&q;8Lj836t@i+I{)(hYKStSUg|)ZJ|`qi&y<|X*}8{jd!DS15KBh zB^$@01$&gwg+zmiwP3$wtvXkvvkQ}nV$easpaxc=p}#dNmRc{}{P3w;qjt?w3{is9 z47hpG_4^#1EeuBTuswLsYekDWh|x0=UyQqzEP?oci}16k0UxbHl3h z?pe+V!|JPnl*A{(m@!7^EVqp0py&{)@-kH#;Zb?Z(@DN2vu}y6{$!TAk(gyT^Og#B z`wr8ag5jhwiW#8?s6&y>o^3Kmu@@a=7StDkwX+j?01LIg5PA627^7Ep5=@%tiBpZNqnCfvqrQ)xzWwjG|=Qqn)&cF$z9^S{JzWk zygtOc>)O{ulgSEIT!PtXW3x-|6&0188+vC6-}sqN!(1=wP-M3dZT;KB^3g#8)WF)= zE*kxj=p)0_dYh7mPmPhMIW}#g^0gBeSEUQMQ#P~+CM2BtIR}Ln4WAoUop;YhMi^FK z4WuLrW2V5#Q9LPo<;s`*X(l$q#a?lxckao*-sZ`F`1gxXUL*vn|I-_`hu2&{L?`X- z;)?wF-w_kJIuuFtP2CKb;{Y8rKn<*o&lvkFyJPAOG06{~8Y5q`n1QJ19_!f*I$PXn z8(P$60_wLN6k0TVZdi5RErEhF!?60Q#idZKjM*dF-HnZnJ^Kk!-{DockerHostUnlessnotifiedaboutthecontainernetworks,thephysicalnetworkdoesnothavearoutetotheirsubnetsWhohas10.16.20.0/24?Whohas10.1.20.0/24?ContainerscanbeondifferentsubnetsandreacheachotherIpvlanL3ModeEth0192.168.50.10/24ParentinterfaceactsasaRouterAllcontainerscanpingeachotherarouterifwithouttheysharetheparentinterface (sameexampleeth0)Container(s)Eth010.1.20.x/24Container(s)Eth0172.16.20.x/24PhysicalNetwork \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy b/fn/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy deleted file mode 100644 index 41b0475df..000000000 --- a/fn/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":323,"height":292,"nodeIndex":211,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":16,"y":21.51999694824218},"max":{"x":323,"y":291.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":241.0,"y":36.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":41,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":85.0,"y":50.0,"rotation":0.0,"id":150,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.1159999999999997,6.359996948242184],[85.55799999999999,6.359996948242184],[85.55799999999999,62.0],[84.0,62.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":22.803646598905374,"y":21.51999694824218,"rotation":0.0,"id":134,"width":64.31235340109463,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":43,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":87.0,"y":24.199996948242188,"rotation":0.0,"id":187,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 192.168.1.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":147.0,"y":50.0,"rotation":0.0,"id":196,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-82.00001598011289,6.075000000000003],[94.0,6.075000000000003]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":220.0,"y":79.19999694824219,"rotation":0.0,"id":207,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router

192.168.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":27.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":129,"width":262.0,"height":124.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":33.0,"y":157.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":16,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.9951060358893704,"rotation":0.0,"id":95,"width":62.0,"height":36.17618270799329,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":4,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.2300163132136848,"rotation":0.0,"id":96,"width":3.719999999999998,"height":29.7161500815659,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":13,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8599999999999994,-1.2920065252854727],[1.8599999999999994,31.0081566068514]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.2300163132136848,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.292006525285804],[-1.4193795664340882,31.008156606851536]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.5073409461663854,"rotation":0.0,"id":98,"width":1.239999999999999,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.4306688417619762],[2.0393795664339223,32.73083197389853]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9380097879282103,"rotation":0.0,"id":99,"width":62.0,"height":32.300163132136866,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":38.326264274062034,"rotation":0.0,"id":112,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":157.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":58.99999999999999,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":33,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.94518760195788,"rotation":0.0,"id":116,"width":62.0,"height":35.573246329526725,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.1761827079934557,"rotation":0.0,"id":117,"width":3.719999999999998,"height":29.220880913539798,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":30,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.2704730831974018],[1.8600000000000136,30.49135399673719]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.1761827079934557,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.2704730831977067],[-1.4193795664340882,30.491353996737335]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.482218597063612,"rotation":0.0,"id":119,"width":1.239999999999999,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.42349102773260977],[2.0393795664339223,32.185318107666895]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9057096247960732,"rotation":0.0,"id":120,"width":62.0,"height":31.76182707993458,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":36.36247960848299,"rotation":0.0,"id":121,"width":150.0,"height":30.183360522022674,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

192.168.1.3/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":102.0,"y":130.1999969482422,"rotation":0.0,"id":130,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

pub_net (eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":93.0,"y":92.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":14.0,"y":114.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":235.5,"rotation":0.0,"id":184,"width":196.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker network create -d ipvlan \\

    --subnet=192.168.1.0/24 \\

    --gateway=192.168.1.1 \\

    -o parent=eth0 pub_net

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":45}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":6,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457584497063,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png b/fn/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png deleted file mode 100644 index e489a446ddd255ce9360445f0f895acad31ae214..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20145 zcmcF}Wm_BX)-MEt1b26e7k77e*Wy;ZSn&jRC|X>LyHhNz@S ze8i0cV6p4QB~YT4VUhO25)Fd;Y4*1#ZpvVXQxpL@g=es)ECJoB_ zyXF_<{ZZ+Qd~qMX23VTO0}Ci$EJ^VH|K+bA>SiF`)Q`u6!a+qr=091`>~XaCToMYq z%#PZ)GO53SJ}8O9N}r9aMcoYQQM|NynYTCxUG1+$oqx-fFj)QU^!q?7qhN(_vcsMC~IKT z)i*HpeMViZn9ole+tgKap0V#v_U+Fc?`5d5fT!ue0mC!l_V&Quf=75Qy*au+MC7l- zeCh8?un+IInI*_z|7wZv5M{;bNI>`**5&%Eq!%q41!ycdM7^OH0GJf z#?QYSMbkM*`&<<-gdC^9X2vA8C(zKOe-NE(1VgZ#JZJHk3m!Y(6RbSS8hww*G5t`t zSz6qUA{66ryN~oW1Yk&=qIz5u1wRcy@S@AmY|BSPjXc*BJL9{2X~VRI07TaD;6-ut zO0EEWC8%$xX;5lmLA}upP)&ZthyU*FAaF+2&rpG)o9xT_1osxTGeRG+9>^thtqc?{ zB|`lf3d;R$sPp%6>)a0*^ngtwg*NIi3KA{J4B||mL4-1puMng1JxKL(fVHc34DX*n z$GN^|;Y&9sgHA+;==1OdA9bj879%e!v#`T}*Y}NN%|r=!#L|g;Fs26;L82C|Cajit zRyHwNo;V|V;drzYKZ6;1f-QgFA_$;vTvnDT1l_U}#f@x47rwW^oEo)$P5ogQUi1B(|g0`n?TBSX{|gNu|)%Tz)QCSI{btITmE5X zNJYEcoVGIdrxZV+3Q!)X3`=`{dEO=?rBf>?%E4V>a6Ko+8Q(Ehs-3|UsW2|LwY z+ud(#h@OLgnDwq)Eh9VIH_-+@=cg?)vhTqZfZ&O$WOrJU|rz+4cMZ+(EHf1j|RUrYkwMbbK@;q_D?O0qC2uIm({e-I z8Prm-->~Qq|HP9=?zdH`wQB(L%j2k&?KJRsCxEXwunnL_6PvKP~^+s z<&JOFVhBJhSqz^b7&WD#NSrR&MVv3O8~oHrJbjrXzJwAg*4_ET`TVUoJ^ikQjL{Xe z-=sBrf4LS2v3wi0yuHo6hj_#qMRXfTAOI ze{9uYtqWbo{thk+^c)|}@Po(6y8YSRB+y{)=V7TaS&R&4*F8cy_%1@N^=(e*U(-$? z3?!%Y1~uN?g)IV5b{jGs9sc|2dHu1YQm9%BCMlWo<$WExAKkf?I+RS&iUIm{3O#EK zEu`=PP8HK%HGMLXD;bI+(KWnv)qXt%-ta;fv$DFD+$YNxdAN<|(kIJaV_WS+13kS|OHuZHDuqsV&rZti zUC4?bAHu=keS>z6VY;2p2m&XfvjA*E`;9KVeQxFmagN6l(U{T^Yo%hId!GuUa<5a( zKPX0X@MV%47lw9uad*W|r@m-`{|G#P9W26`izdRAr`AHA*v3j1r5OCgxr`LJ4MT|k zj4m`@Dz8*9m+owX19@VE>8dLS5daXSA%mqb#i2xcg3%&X(;4rn(=4juVbn(M`Ac!4 zW)IBabKS1NDvIV>*k8^Mm%!{8AWDL|=?UO6&BN0l?&sKhk3f!D>DvWlFBf*=xOF)* z2@cX7J^$vdnUS?XYe$2n7AXREUHE*A?#iK>f((Gr7@K)8Sa#|VW-K}qpd=1~C)knR z`5gjXOlRNJ|z{xF1@TC!z z3@>;Tnqyqf8cqhVntIkzbKfy$56 zU2=7{wMW01Y&g5H*3ZL$kUfu!2AeS&)CguhTh@1&?Bh!oabyfQL*4o2(_MPuQ{LV= zJPhY!>4H5oQ8E(!#5FgBH^4ubK>RqsF8^#&HvQwhRnft(#0Mv?9n|xKCgI1SFH602 z5uNdVl_7?Ly6rHeSt40E+YcE$OtSthv8%?maN&iQ9@Jo=T&gN!Tuh)STa6r4xSm(v zZwEiOaD42XeYh*Evi6&DpJfo4A7BL@0T{k;5#&_y&{jrgt$F@PXUQait4|7=NGqWr zZZb|OWde0>UmILF8dqDNsuvVWNPRXVCc0TaU;NbAh~}GnIeY5Pm@JOyWbvbu6|C7u z?lbg9IfZbu@3RA+h8ZhJ9mrw-S1aGe3|9e!it+SxhjfY>U-eqB(J*|H>+GfIU@R@oDe;$GFfr7B9f*qPUV;w0wJ!K2l4 zL+-mP_AThKY}3b--xlc+*k)zhw$WQgEU5uUKjyQ1Kd}8;K^)zRv3)kIxS;OP3b?tx zmQV6<2m^^#S5{VbbdZQOdVj0z=?Rka5sm=h@i_5k-)(<~3m55nG1GA}LcpxobsQ|> za=8E0Q%HQQ|2TNJTadKBAU#bP&g8J1SeO;zrDx*YJ)4dJgkt$qL?98@-V`fywjh{w zBwX&kEHmfuE3U7qa~8&n!Qh%Dw9+K)S+A)6o+yBMp@6rW_$A!C8>W#jL;TU+zPzjq zDNOQFuPizy=Hf_ENojn1{6dMDnK_}xcwu3|>HUU{)_VC3;;j8w%gGO1z?Y{$0l~&_ z#K1HFOsEf1jJpA7`&imyiA@azrS|;gHabV5SV#S}5guvgs~@}DeY>p~I%QtZvP@~z zi)m?8s8Ptx&5e*evkY{V;IT4Y0BDvKB3O|dP>Na#r>>iw`EKA9f-_l0%=^;e(eRZ) zyg?Ci*ApjlmrH&U^!c&w?r6>iQBg?awNMRPA-k6>6!2H^gFr}v6~yYXSp(yqS&SKLpp4OR~pXGlE_ zU`GQP#!S18EqK(-@A5PADxELAUL}%+Ly0!))v7)6QHks2X5SMl8<)Bszh0%V2Z z;BC0huqcb&C-Py48><`jO2tEQn5Ua}^p!TfrkKnOs_;ngt!<5jejKx-+QQapMFJ&1 zbNSZ2gRsD?gvy>Ql>J8@#)FoJed*f4y>@Q!s?b`DvIz*}Dkqsuv^8*I@9ZB2T= zAb$NWBEB-#D-oLX-%JMi{+Ddbu9&1GbyJlxx1 z$YD4LfWX=M*$k>D`@s>q};wwm3lwVnpv|ublV+h!=f&V0w?V~cLakqc_$8RwGp;IL=-RF=K z!NAqiqcbuLEf^&a{i{2vkeOq*Tsd*rP{OB)7@odm@@}%w+9)9I;;hmAplO7ENj{QW z;6DvW0!WT7QknJoaG~6=x;TPafUk#sm3y(?mwQpmWd+_C{^5nDJy*yzUjU%>2h)(~ z*Kh1S4EZoM90XwM3vL0|ou>rmGG?ET zgX|!bKwsE4S&$YCY^UATu&W*qP#s6+)a0KT9yMSfnmV}r-lwEFoDCP!Z$M-IYo~=U z8fGZ+!k>F{4*iN;KK`^LfH#Yw>cd~Zy$f7X`qwhvij1sXj7}3@wyyA|S`|&7K`b(& zxAVXZb;x}yc&3k7FG;=SFKn+b;GX#iHlR!Mb<(d(w-ltOf#99%8z26EXvGBb8t~te zygyMBv7J)+z54NBrDqA_kBxI6*606e^AGNCRoDNjuX_hk9FXNMe?+Fo!ue-Ph)X2E zh!^W$FZWnU@53-4&O08qkbfO{`_TCI@d20869L$?f+|MATFsrI{`cXjhzaymVS8D> zr9WQze(CSI$CrOrIYph}=KjS0&F}hc)Wd&F|4b;%_`M>l6DPhEn6E|+L zN@t|Jz;X@%ErOVseoV8I`e*Dn*5Q!p8 zJ}36X%CP>2Xpcsc(?w4UtS4ze{6Lp&_jA$X=p*BYb6AMOQU|(lEMwJj5W}%TiQWHT z)w6&M6iN6=UEIVDJhoVNQr4`|*Ono`Q|R9bo+#k=cDuuv<;9zyrvI#+tR3E0BVWvH z%dT_lGJg}S_qDA=@k76|Vxh1AlPrJ6k}s)%M!ohCM)Xxh$@ekabu{OR)!KRk!?|jt z8aRxF$41X5TPih=n1_GaY=6yYtgV{Uf&V+{4P*ZI+TU$9oO4#;nUeP3?Sxtmf3{{@ z&>^7e$7gI)CsI1ZXAGjWKuKk!@!D4h^kH{|9$q&HBt8hm4n3u<4s?ohe_=H6bc_aB zevAb4dM@=pylfrh(sNV)BeXtThE|Z)=32VDF7_BMbkHy=`fqU{W6ZM8`*BNW4!i_u zEk(LhK5d=t_F`;eH!$EVUjqLG4Cvm>)cKRw8CBuM!RG6)k>ks|p=<(zC;xdvu^_DW zFL{cSf!ffrn0taGV9##G73eaHR%qiSS!=UeO#fv0am}Et7aB{;bJ6UrA(8LLd(&{9 zK)aP7WLT`L#vLA7F>>d8D^XAW4`{)^YEOA+A;(%7$$dS zZQJwMI!iaVB6Xy>zC)JB<6`?HsA2Y~38QJ)3&WmTE#+anjCS2E;_+ImC^n_=1)h4ZpPV`CxkusY!ON})B=qUH=k+r!BXxp}C> zrkyKz1oukDrpyGd6%NBCxAI18pR-9}K5?$_P!6q`wGhf%0;7$cS2eK&6d-4e!Eu-A zk>n(XBmw7KLK$m~@|0Z5EVN^Hzv0Jhdcl2WQZ#HAuhuF)t(RQgpL}N_&+fb^^ceT??2-7<1H*5jSQ~#&KD4C5{$G= zOvfmmYmL*=Y>G$@dW&XRe%X(Tr?V(4`Qg12wS$S+vr-bXx7c*%JW?U`Jy=W)9*;w{7$2 zT;rd3&aqgp?X-P$=Hu2h=prcY9F27Z$kbDFP6{8=3Xgw6ZPwDYv?Wdp|3$FmOU|hc z7KdHdy7#w0^v-MFpaCDpf-4#S>Aq*ki#VP0-GQ74fxD-O6(_kQ?&m;UXrn(o zCUE&Xn;CJ*ik(a@iUuW8UHwf83FWmFLGDS{;@0r8HB$82C8)HuIFb3Hp*5;5Ik96T10b` zb2&BzJo;l%NOnCeh|qw5RW-jZLfMOLE5n~P)zv2@(LMaF94+WNVeJ-69Cu$Ev{;fh zsYjfCnowV@Ob`*1GM33*IJSTbjW6Pf04`SaNSqrmuIG4Qj%znm3R{))PhSWRQ_ZF4 zvll{b)YoSQg+jscrQn^()>581-UU_{sp&XlK+s=l*}3^(=7nqC6B}u)tmng`;*<+n z_-N~_&oYlR!J0pt>G7gloP0Z;;p4($=a=hAQkK7dh_zJ;EYX;jGTn#dOV`^;*Ad|o zlG4L_umIG2wCu;izsB%VF~;yLXM=qEBV%Pz`S z=g59bi!L;7w<5z}mclTkS%e4q@14v}eCr4WJK+i0akU^DT{@0)XP9(IxN<3T>7UNm zCav!keM_a&3b_KN>5;cu*@}Q*VCCpemqNjby1Ke0B_(~h`v(UCK`)(Bg#qv+rBIh! zmFEqphtk2#bDo~Y`r}zVRnNlpSdCIME|f!hm<~-+Jk@a}H#-|u%k1uSjWAY_wop9c zgbn}*FWp6kZ8kJC{O)m}`4!N>k`Nas@b)IX#`?{(7QI+rs~!@pry&1wNq15zuVllrgDT(P*7rGViL$jB*pruuz<_K z0PmisOiXnin)?MkQ=)JVKE8+zao#MM@pRgQ=es|zrKP7O&F*`utB)7M6bZ4hvGMU1 z(FOV}oI|8PyxwjBwPL@nqLr-N_2{~YF1F?To;5-*FMPZ^Q&Uz(MMceT_7<}(aCf^u zH+Plz<$29W`s20$=60K<*0H*2@)2gg)wILwWP3E(@Amj0(5)|a>tbtoRtqMI^oMRK zOgT;H#Zwpx0Xn(xV%JTR_u218+v(rk0V{8l%V*Vec%M+FG{cQB(|!JOOuvYTUU9eM%@NtStWi59a9L*_U#y(wTk= zVq(S8xvs7*LTCA;>xpSHQqoYj0k$`+Vaj#P4^BtE&{R(yp8e^eK*LA|nhn3*wK}hT zx6Y)q!Q@>;rx5k$=i-8s1*5O`aVaxwelRq(vjxou)@8&f3Us}LQ%+q?Hs^iGs4$aa zVB>L$+tF$m%J#zgFhvn?t>HZfU}U$iJrYbq&T)~}j~&>${OcbSB<4#r`52X|B>j%R znD-k@g32sVE93hq_#;OIMGZd{5$Q@5O>&HQ zXQFmLg(XiaKLu8j&pb1pW^Y^>oPnX>7>XNYI3T02dH0|NM?Sd-7WgsOxfo|BUP*a^ zBjOuV5ALp`!Ny5+`21R^*U7Ty@gfJe@JdUoAr#w};~Whx`IIiy8a>vR^vCOx(Bq$y z8m{=gS_9L$yY!RRCs9T*CoOt|PAw_iIks}Q+})fbPp%f`ZgY>^pZH7ng!fK- zx=Ycjz*gKXS8iBjCOwU*c#>3bDk1qFgH@_;KP9EZ35O;9(Pj%JXVL?GA(r9tVM0;R za5aZTmlt$vV!%hIB#j$3``A&QGfgwAPj;bS90*qxaa$h6{_rDg(4R>)v+QheLY_E+Ka_ z-6snacjq5DjabrSK6mQE+iU+BGsdcuSEa>H!iGIf&| zVpmg-+ohv(!SUjtG4T-JPa>kb;aXDTE?=FK*M56aZV48ALB@%Uex+TWH0*Niu~4oW zE9M!ll(ArCUrzjT4aU+Ke@$hKOVS9xSyollOVI&!)3Kp}@#rpo+={8+wHGCPvT7{f ziWU+YzG;?Z(hFv^GF$VL`>1wgMP}+kl^z$5IwQ9gd6lq4ElVh!3!=YW?uqWIR1P-e( zb)gatU4<;f>yivtJ8oxX$nXPGIl-Fnt)@fk_Qh*&>x?BCZmc6*e&L zvn(^Zw1BqSet$}g zt*LS4?z@3gcXqX;1IOalflV2z{VA@zV0q$huVbnVb3w-5V-+>SX7 zoSflMeMmetKwEl!)5L+0Uw9fFH*gq}`JeJlSUgB{;#lZV8^|0&-ME)EW5O55YC`yU zrT54aHU)3xU|Sx-BO>tpAk~+R|ZI!X-W= z^ms^J!H)EUq!h%=GYApE@S_IU>r7JN_&V#k?>iffLNkL3Qld5wx)zHgw}(@Hbm6M* zDt_}n**q&ANsPL^mDbEmOCf^9>OfZTN#QJkhD2pcVHQT{6y&#iQJ)~87{ozoMaFAV zDE$e*T#Vpt^WCi6TpJLG0->Nb(My1MopnZt3l@|4`X&sK`dK8OEm+Cp#2Uah`AMW- z{ltLZAOZo=dE5A!7b)fBj3N>(JhYbHH-_xEj zN6TjP7-^jmqS7?CGg%HVtNL^4P#VCnlWzn-^Lk=l$A6z=4j^45p4T~3+I`9+`Lv6&q~9~b_$*mQ(C}9|#gX4wiJwgPo*+dTayt5UI(TBJ$+Cs~|9X15|%tnViju(nQj?B<662bBTB=}y08IzTJz>OZ{ zC2({rJG8g2!VH6@1lRRL{kJaJyEa|%z5}q}ps~2gz`_rCF&+8P`=vpnj02dRf{-f* zAWVPv@u#MWFrGI$auVGqhsaj*Afsk&;mFCk6Nfp7bhmxgU7k?f&5-h(shpAJRBUPF z!n{VBk#T)rNc%mQ(xBrD7wNRNJNos{Prguh@{Q4h_1+$J<#t_hlkH{WBK+BnqYm&F ztEmeGwHO?Y~_7q$H%TR(T>v#Z!!=cDEr~*dAhej`#KPXbG+U7ce)1F0Wpk% z#MJL%Q--q<1%?f5iW6es)zSf}Wc1!wrul=8Lt7kgc+y$wF+9F*%lM?_pYL=&$l<@(Jjj(eKT3F9X?J*6ewX-py#=Jj=R zRoB3|4!1=*$7g3!bfmToCHH8EU|zR)vuYp-O^6A?sSp9)7cDfN(vaXFh(TAr=K>pG z{un#VO8zO%8-Y%j)7#Pg!%UYhIL=%3F)QhJ(Cw1#wz1)+86ED&F7?L0*Fa)iBbY#Q z0=mBEg7f!CVF*dp#GQ&%1IA*lR-(}sfmkjlC<2sCoiK8hmNQ|x;8Ee;{vbmNR!LF()|Q{2H#_Z2_Yr%IC!-mQeT5^&)h1kVxk$D14MjjG%5 zD|JkP)&V1EJ|Mu^csd)~N*cUel8v1quPe6$HF_Lg*%Sbm4(!10x@%zbM>?LKuxOPG za)`l>#Paa$?h;7oZOWg&(fO>i2tUhm)o;R&{!sb07Hc?m$@ikH|kfCWHU*tE$) z(Z1gw@{OZ>Ru@9Zu?3nHT*uP7qkbm|#ULDl)YJh&Q3;b>){z&-;?C6d^kMJm(ON?x zFB=^7+wl;15wcx2{hf36edl#1i#3G!WKuX}-{n&T*uGCz9@o%;WD+yrgmFGxHO$@O z+}}Z++mB!OP2kERCjPqZ>BABZxB;jrr<26pWzK7ZUR$fJ52_FOUhN&+QqGGrz)U|m z?AVyaV=+Ts$=HLWM0Zxh(JX<@Rf3-nGFnT6UZy4vrF@be8V=Q8GQ4dPbwp7Lkvth> zJm)5+G>giI&LJ^k^;6HsYFbHo^lYEcf2dV6(jGTbMyp3?YPafBW#b#Q!nW9*7Msi1 zd&Mt9L74E0uLO`yL8xZkwvhWSsulJVhA#Jm(^(Kzvgz@t=*8vzTrFvk^B+=$03WZj zRV^d>MBBTm2c}5s`2yU9Xh(=4T7frJ6VfLy$Ci+BtQdTU3 zd5l&taN}ZRG2w%1x92AITl1^?!BSp|fRA%u_!5E+EB|Lp%ni+E!0%0xi;}t+RGiD7 zzM`~SCP}>2?eKYF%NCiZ#2`R}{&q)wSIp{Asy>C$kZO}yMLs^^;#GzKbS7(k36!uf06Z#|e zx*tAU%1811d|yg?vlD08PR~m&Vb{?O`WG9`)JFNb>-`xHoJ$#1dh#AmT5pVvS0`e8 z>ITWWXd_kNEvsRCPku)X1vCz>><^yk41wTBFa|~A9uTMi zGU*kw3ArXIqqp_#5x){d+Yk1qFh0@VSj$}sC+k}c&tCnz(%Zyl+>H!4t&;l-v^QbC zuFL~^61eSn)YEj;n4T$AMMx<)t19B@yDtn=DE z%d$cD$+cFO%Ok5&N3t2i|DZg6WO(;4Z1R8zUarsw+Gl|DCZ^F!6Ku917=nRUrDaQC;((|5bk3#EuDg(C8dM8zi#N?!^DD*J zl8^t#L-=KQk)_=!qeF097**xENI4nB-Hrk&-+2EcV;|mD$>718qhD&d!sVi^%Z;`e zG->sKZb)9jCZ&4z41z82_sbDph9hBScaTc3ZkzSzw6`LH{Bs@7xCLDQO%m)8o1RsJ zb5OG+`3JFuX&5Ys8_5YA#4=lqhulntMDjTMpJWudYSN1Q&zLa5VGdCzO(VnNW zSaxrcLSi)I@azIDV_5_V5eU<Vxka##r?>5(^|}#>ROKt{QnCU z#DwD6gpTc4zz+kj!7D{9vADZx**ku@&1Dh^vau4`aBsh2fM1-u+iJnet#27HXt0=yjrZf_2bdD{3u0wb1 ztk`VS+>C=nKm@mGQN$~|q%BT9IpW2_ma-laOSD1cW5eAtMxF~NX`&b=y?39O5!V)yk*YUvpXP;-ot zp3XYt^|@o@^!JC(;w8$X7uU!_Z=qk~1gPZ_p6SY63< z-Z5cBjpCHm^%bGrd+%^>m%+Ey)TNbp+o(t3zs8x?f#xPncu37YNd~SIgFY{F$8W22 zn^@u9pNUw1c~;Dzdg*pQQ$76{{X5R@-U$9#HuGkriW#`A~*vshU#7Z zbX?V($VC4>nR04uLJ&LfcFwx^-=S1yC`I^&Pp_{2#NRj@w>_V9;u9fSNUga6heP+mrf=TB-R7JO=w;_ zzhdVV@Tl{l`|E@v3Cb6ExgFu)!Jyi|OIFapuhNF#6S5o(-b!p*-wyr3Tyio({~$l} zCz;NR(U(s$EppiT?MsvYrl-}?CGz-ZDaMC3b5zLNM5r=nx8v2iA;wK^*>I+KRKe() zWhSc+azN zyPPrUXx~AJsk34(T885e&2hgYseccc`*+-?2Fs#S0gLlUqdpWCf|boccW!7GQ1_U z1hr32NThsRGG|^}I9g3ZV`!FqLnZ&=MGDWXc*bqPGU>~RG@&tG{`9m5j5X}-$;xnR zmnNj+^|Q9sFA?5$3zKMy`n*BVzZ5X97##Ra_#bP$Y3}qPuP6F=Sq?)Vt-(CIC#KN5 z(x3Za=uloqyq%ZFXC%?{ssH{aK~3Y@`6R#R zc2?ovyuPvGP5Yu|rRO4gj#eo#R1@Hs^j3a%b`{^LD8YSpL5a1YQWfnB1zrI5^1aRgrX1`-0#0 z@?^eE6Xy1xNP27H6A|AoYiVf-R{@9*dAE)G7C(U(6IfqN9@gP6?&3Tx)8$M{_t#kc z_+1c1>W}{0E_+58F~{^6s>XFT*GEU1VgdYWvmGYU{#7ak&gr7u8~kCQpQaqsE#~pGnDU-zx%p2coe&^WxpC^{J8yg>iUcB z%7KQjsT?Bqdlr`&9xqW?QAb=F-~Fo>p?tUcAn#FZ%zHfHa75l%zM}?u=B`eQLBl``W8T#~K}l2*i);+X6tjk1od`E<Y=KV3#P-k7eqGT4vqwhBlo65 ztTaME%Y#Jh8FkdQJSCx#Hr{%JIXeBPR7JPYBK2Jl#-%6PY5G~ph}q1Wt?czkwS`X{ zyc6%}J&qC-5yAmc{s#k1o`37J4Xe|z&0j{^Wbsn}T(zBQ-+Ay@YOPaH42kKAZQ&~o zB;#$xe&4N1i)V@I25-gNut0?(nZyyoHsN*cK3%6vsLeQdDv^v`n3=ihh{qwe2X-y+ z|FzN;nV|12mLS?6gb7Nm;H%};AHZnUJYw6~Ak#~YsV!^wU)p9i(c;2-41MTa>HF@U zb|^cRS!3_pl~O{)sndA!Gs>b5d+xKweeuF;Mp*FzY9WX{^~bLnFzN&Eyo??MJ1CIW z)+_Lb4_dgOBTwXzsqGPs-Rn1hlBQSq#x(L?H}XiEh<^&NuH^7bVU_2$c%Lf=rdmp3Ika3ZNCpvIcWJ>I;p2pp4KJ*Y$bb5iMrlIp-YpUQ-XPY< zVsD;u#DWo`$DmhnSi+;|o5sRgY-d$gVu&8I6~imjCdd2~-T-1k59YaGA&iaHIi)8SWJp6&U?3zxQ1yobk<|N-nQ1@dcLG>1g#7Lk$$dV7a4riS zs>4Lzu_K-6tmmAPPXBEKlk0x^yd4W-isCyB$!GCO@vaYg+0aDT@fI)NvHW!XA7(0O zL!G1qYjm@LGk1*sHw$`UO3h00EEth@TUG(p5`PLNevikg7>4$TVq+^&&`#BYx^9O}}Dq{44aJB916HfW>L&MNk!s)l$m9J4V|-Yjd2Usct1c*rDSyF z8J#b$#-xF7lKcFs$@;E6d{7&ZWkLp|c*1^iFZew;q;1E8r39VGtvpq1!O;q59g-EM_5M5wJ&jy95n3e11Bjc+k%xM~pQm4C;D9XpYL z^zkQhvUO1=+S^a~@*MMXU(6^8QQ_}bWszEHHs2mlU?DB$hnTbZNvl_Y^{5tmf_kW7 z7>PD>P|{RpgiOT=f`4xi$L>O{N~$sp?L&)Phg>H%Xj+-b>4(QIgl*oh9FvxZmlx=I z?Uki9PNg&Gs%X*JS7fAzIUEVc6U@EnG7n{XI+SSSojGM>U>dA9yO$6a4*Qq)&8XB> z$9LT%ZlUkovhbG!Uf2Z$$|El>$JtN>f#bCMWUTop5^VCCKN_#;98u-Qzf(q>r4UR1 z)mHJyRTTa0HPtjfHZt6Qf|2~O>#xC)uqyq*lkW7O&$F*$B8HKnQsSnoiO(MJPDNmW zXXV$3TkYN3qkZ^lu&f#%#gN|Is@cV|tbrAx*TBWamFItnJT*?UKUf#?q0kh{_H=*W zMvb5p%Y}uYRt_-5e|rc5DAlW+XY=FgYW5nV z>+tU`KO&)l9xAHYuw(I(rp4zVZ}y6}XE1c$hJEF$M<2&Q_O+Oso6}t`2_5js+?WD; z2r)vtP-X%_iA*!^>!RCJD-O_vypBKbr0Nk$?P00TaXSz~|=(@*#;%gqky z)Kv8hX8FEZk=v#vYx1k_F;t1kd@(keVNT3UEMf2P#U zF6jI`E?gig`BmUU)wh~r5DN}_3Up*M3WuI>+1#B_7woHJL1}Jnk*es@^USNzTNT6h z=N*c+xTK_H1voS`R2vo<8Mz#Qjg74dq@_*6QQmr8>aZ{|@;n*4lisAb2Xuq!(0CZ_ z8L6>kG~}feh>PX5aj|@XUnFyL^RNvxjD?G7i1(r%X=}EP+FUn+7js!$&YIhfY64CL z+x_xRsL06Jpg37^hzPr8pZ)Bn0j;c*=7V>`l&+-kJR5&>hr>I(3VUj_csveGTCSez z+>Hf?&@i54Kzz=u6QP4Ri(O_@9zL!|>6zhQ!8OaJDk%W&G^rh0qe!RwFTGZb6|Z){|6HO0lno z_-RWgnH#>kQU5)Ji7Uw7c6qY3>+@5cQRFwJ2K0Dw!cyeL>YX(Nr5|aggseJoPnQuW zDDtcLf^<)D=Pg+$MNK0Xi^&eIAH{5QF@yBZM-9N{0YuD#P3x*TOs;~3nh6m6$*cK& z&9K`5=9>)tP3wb6L&t9E<%yNzpm3W|7i+tg0*{Z^^801*SFx1uj}8YFwj1h(xR|yq zON-+tRS6c7#x|2wb)VLnsd>Q@*h%W$Bn^|rEM_dJI@MY?Rurq&YY3?@QR>cztxq`qR!X0xEi4w z1ZH~?=0G?)ieKqxuZV*f+I}y}$6xV0Je30Pr)gnefX97=q&%OuV%=mp1MKOt=g&RO z2u>v>)?;mT%BC)(P;@fXhB)GgTsF(7&G@ieJ^uJETKAkNks!L=(bSMeNNPk>kb7-q zeae=F{aU2IYSGMu2pM6I_Z&kq&C!k?3|$J<*npK@v>=4TXEw4 zKNc2K^j-Zy!zaxrc~Aw{e9_h3<#nD-;w&7&qRXGLZTf3mxkdLSoNw1N`tL1@3c8#oY zg3t3_PY>%}g;8+#FC)t02H^-fe#REeOti3|FnBq;+rDZ{R?qhT1-F_jwfOlv2xTZ+-Q5bkc}5Rj=YJ(kfT^WMiLmwaxlXw1X~SQaIquj6l%# z$*_!JtuSpLwk-+y{UxD^G+S|wX#FH@m=|OA?iNC_?F#$Skc56=*8ShUUFGT6h_%h3 zDuW+X(^+hcD`-^2oBB zED1w>$or-H8^9bPN(hqe@J4%M4Nws{27yY|*jxlrE)++tg3=2+JbzZ2Ht-15TzK2( zEq@uGva_@>V@*r=f#R=^#tFA#S;+~JCLezy#kdhPvTm=@ZAard>^n4z6qqcAIr~g_ z#zlSVwb9|mvC)lKK8gH<45w_q2$fRdOU>ZG%?fwY1k)=DIMr@iid2LJV%i*cvHJxC zeK-iiNwsjoCc9#5IWhHip{oDWoEzKd$WoG{m^-&B@+J*vHuEXhEED z?|T86vw&w*or*ZAt83}swy@B_|5L@4heN@2@iB+lyphifqZgWQiDiWG7T8 z`#xoC*(Qx8WNT!X$Yh%g4GmdJV;}n(2H8S=qxX;R`|h9TxxahQIrp63xzBT+=WO=Q zvv&xkTVy$y2nDO7>hr(8kj?NYZD5}*E(}&iqqr>BZQ{l-<4a|ZLzRzN`e|QDzCgw6 zfaMiSn!lqWoAY!Rog+XkrjUS?%ah~rWqZN{^AZWtEs-h$=E$*ep^NvYb}u!eHgLjl z{IRL(gGx$XK1gG!i_vv3rw?jw(0%yz+=9LHr{&*^N*uX82v}xIFilM3EqE#Ex_W4P z42NiqPMkynkB`8mg*LM~Cd9L8X8-v#;clUNVMJsUFLrmG$>g@t-Q(RLtt>j0 zz!E`aE|&d;%2fEwG`Y7S27mk{%Ej!}8I$ADwDz)&CvepPARixJ0j*klw1HqLiaLz* zu$|mTqo7W7AJ})o+B`9L53V0O$*IzmZ68%Dbp~8kk>pQmK-R>Fo7=$|wW=)mcnNEX z>TF#uy8Y$~Vt(ohZd9CJ)b6o**~|?SuF4B4ul}C80!~_3Ujr2*?{(!fjSjobZm&5~ zJC1vPed|ZaULhXuN^`*yK0CYn=B#1w6>oKqS+kdSOOWb9dlLjuw&-)R@h#9y)D{gU zW0NLSpF6K|)Xljy0ay}!EaWCFnNqC{*3O+N1Jeq-gekxBj6F!KmdH&0^>9H?hFgTG z!;ODkzV#q=_^Q11$Kr{;nJ>lfyp7DnmQWJ=N#oqo2t;QslVI5Ji2?o}na>QCp@T#V zE69-}5-}tT9hPod_#wgw& zDk*mX;;kBl4Sl)0bO;`w!1Fxbnt752v~PYN%)Z7$>I2P~==T<39(;w5N9(v3WIkmW zd59mlO}#Lz&mZF5o_1Saa?i;A;~6E|(~rvlZV)Bw-t^v_H)E4eu@!t>dDZS+`FE+t z_OhJ-jR9Z>_-<A32giG6D81%G&( zRCAgWi$9i+g-%x_`00^VV7b7zsW-YV`Mf#*1REgaT+!4Ryu^By*bsYkwdcq|pf!iV z7am3P(0ku(1#X-A4OHIQ%8M^b*rV=_67D2WU1vD4y8B3Z;o;8#LIj7kG+5-x&K+O3 zH8!NdfWP}KlT;&Xo@HjyAn=hiLrmu_sm$A_6N5@p#^a@7n11aWHs$hT3#-@?FCB(x zj8Fn3swk#Zk7YotnXl<$P~Is{!cRoT1jX-4l}(eC$BWIS?URCyWfF|LZ0+a#fAJ;i6MTs z2Q&}r{8_BXkVcz<19BRgENIV zybHBb4)gL)Qb>;N-i`i#D{k_u!9|eNiNu5H{mf|@1K-jqn0m z_1xEkjfz@j{&L|h$U?(v-m&(p9$rsHu892_Yc2@g($tEoe`6EoQ80uiFcnrRsHFuy z`1ekO+)nk8FdibDWB0x45%^w@{$!WtkT?5q2BTnh$-mH!+%}X~bkijH$PS0M#oC{z z==v~Ms~TwhA(`apZZqTa^2DIJDLIAx$V$KB_v~)yQX5);1Eyok`VkvmFiBqhFRpy% zQ+4Z)?GGtpYFvk_8>Ug;x0HbJex+umjl!_4SRf0HzQDKBpa9(RJ*FuoYoE3(inCc# z+%Knz9vYVG|FBj$8gf7a-ZB?oA0gaoJD9=T1sXeV`z^%Z8i9Q9pKNqgU9p`Ja$QY+ z2kj`#6k|MRtzno5!Xe0|Y8Wd}EsBX|!aA1; zo+O^W9QW8a#8Il?W+-QS8wfADdFkCioB-boKcPGHGDjrlkY(!|=G#L47oLntB&ad8 zdJfh%-H!`F)_%Rya`|T9Acr2w}sk15+(1H z=E^P#LrFjL-*YYN|3Fhy>)7EA|UOxZ=LwLZUJ;8NyHp6N@5M+40)g1xpq)1b#0CaR41Yb znbX*_ismgYzu4FFOfY%r63M%p>N!iTh`6uuiW@4^gZJk#F1v$8X!JDx;5x(B1aw^B ziv!@%#~}@6r?BJH8Am7P9M|}3rsl54YzxNO@cn5>3Ln`d!;SwRHE21Tk^1x~Pgs*EnrW>_iT99xEcEPiiYM8oqJ9GBrt| zbUs@Lx-}L7tKmRYcontainer1192.168.1.2/24container2192.168.1.3/24pub_net (eth0)DockerHostdockernetworkcreate -dipvlan \--subnet=192.168.1.0/24 \--gateway=192.168.1.1 \-oparent=eth0pub_neteth0192.168.1.0/24NetworkRouter192.168.1.1/24 \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy b/fn/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy deleted file mode 100644 index eceec778b..000000000 --- a/fn/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":541,"height":352,"nodeIndex":290,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":2,"y":6.5},"max":{"x":541,"y":334.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":2.0,"y":6.5,"rotation":0.0,"id":288,"width":541.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Macvlan Bridge Mode & Ipvlan L2 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":177.0,"rotation":0.0,"id":234,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":16.0,"y":240.0,"rotation":0.0,"id":225,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":235,"width":106.56,"height":45.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #1

eth0

172.16.1.10/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":138.0,"y":240.0,"rotation":0.0,"id":237,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":238,"width":106.56,"height":44.0,"uid":null,"order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #2

eth0 172.16.1.11/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":40.0,"y":-26.067047119140625,"rotation":0.0,"id":258,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":237,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":50.0,"y":-16.067047119140625,"rotation":0.0,"id":259,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":225,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":60.0,"y":-6.067047119140625,"rotation":0.0,"id":260,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":241,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[75.0,180.06704711914062],[215.32345076546227,90.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":184.5,"rotation":0.0,"id":261,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":283.0,"y":177.0,"rotation":0.0,"id":276,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":291.0,"y":240.0,"rotation":0.0,"id":274,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":275,"width":106.56,"height":45.0,"uid":null,"order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #3

eth0

172.16.1.12/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":413.0,"y":240.0,"rotation":0.0,"id":272,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":273,"width":106.56,"height":44.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #4

eth0 172.16.1.13/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":315.0,"y":-26.067047119140625,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":18,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":325.0,"y":-16.067047119140625,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":19,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":274,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":278.0,"y":184.5,"rotation":0.0,"id":267,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.0,"y":3.932952880859375,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":270,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[340.0,170.06704711914062],[205.32345076546227,80.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.32131882292583,"y":39.0019243141968,"rotation":0.0,"id":246,"width":216.0042638850729,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":356.0,"y":150.0,"rotation":0.0,"id":270,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172418,"y":0.0,"rotation":0.0,"id":271,"width":104.27586206896557,"height":42.0,"uid":null,"order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.253/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":81.0,"y":150.0,"rotation":0.0,"id":241,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172415,"y":0.0,"rotation":0.0,"id":242,"width":104.27586206896555,"height":42.0,"uid":null,"order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.254/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":224.0,"y":64.19999694824219,"rotation":0.0,"id":262,"width":120.00000000000001,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Gateway

172.16.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":307.5,"rotation":0.0,"id":282,"width":541.0,"height":36.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers Attached Directly to Parent Interface. No Bridge Used (Docker0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":32}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#000000","strokeWidth":1,"orthoMode":2}},"textStyles":{"global":{"italic":true,"face":"Arial","size":"20px","color":"#000000","bold":false}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458124258706,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png b/fn/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png deleted file mode 100644 index 13aa4f212d9db346f307dfbe111fd657406bb943..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14527 zcmZ8|1yEek(&pf9gS!px9wfL7?(XjHfe_r?o!}4%?(Xgc5AMNT154iS`)haSR-Kye z`MUe-K7G$Qb?Z)ql7bWpA^{=*06>uji>m+tkZb?|gb6&FFcuiA2}u9| zCuqei$M4-5nwr`%i3Np4vgR^5xmi?p#UH=*mV3XyA4*G0x3#x92gWff1cLwoYwwVe zXz=#-mVViE$?s==*J21!?}^c7DO)2a0KlX5gh}1OzpT$YG814}Y}0%tmN+Aryb)z3 zsT@@gAoG`$lpO2IFpREFRt13lD?iI9=-GG+2moqFC%hy9I;nj{27uocPT1D10075u zO@yRFl7errMnFEDh#5#PPuh?-x?z&dII=d^&poRym>Ga5A8o_}h!g_|nSE(VHt%mQ zPqw6czuO|90az5wa0^O8kUNShYVM8{SpWbzzUn>s&g!NPQdUl24Wmw9x|Dc7tCD5i z`~h`8htZ7%KLEg;77+f;xPQ6_tOjVPO#d7)KeD-x!mCj40sbB># zBGK_J+`o&7iG6fe=KSsDW0j8+4tQ zhH*-^spHNug`oyE=L)?pv{pax=*n0-F0Ov#MtGq;M)d`C_0|)mHCU--2WMp&h)U&P zcQv?6T)kMcBJ6c$fMY%x)hr|An5WbgL#Op*Pvno^B*6%cGQulpIp?=Ppr)&ez$$6D z*}MDmX)#sm4Bby&%JvaxP;wzHM8DFF^FHjHwaXuDtd)8S?sxf2d3U>RT&%lJ$$CKm zXh*C8HSbv=U%Mg`XBgyBD%Rl}U!)etqPnI`2p{fA6T+dIgA+kT$S=YKz6gfU{%qvb z7e`jpjWyGn%7K-x9`5=yt$bko+n7@#7IU)OAp}+uF5#D0@BlvI_$(!08p3V=l>$Z3 zlwFz4CPzXyYS1KhRVa3_pd_SxNF|)OT`e>lX_!!Pr15$;eD}>yYE?otTcpveS87|& ztkT$vjWL34w}3#Tgw7O^gpwUU2|(|iPy<+_ncLzhsmGAV&thM_)Q|PylCK^T0QjcK zo)J4y05vSxJZF_EhzlM^A^oT4R?8Dmr zwcQDogw_q@WZCF0mOTbPNpjBRXO@O@nrbl2Vm8jTFDVV77`yjlsDjtincO zM(x$`)?EVqN$)d!m{}ZTC)2AeEw+vI>c6fF??;Ax1Z*Js`7TrxV1fJvHlbX1Hp{=wWxaUkp%r;hrW+LO`2E=(bg%#N+RzoYBK2xv zWVjjX{L3{G28PiMMhX<0`Xnn1r`@t})K`Pv)@qv4v=R5^9oiqikp-o-UU|L*Ja?lK z^~t|nwjE>6J~DcyXdjyY;MzpznqdRM$Q^AYD;i&eSwO;!ow^Zq%U0@9iokAYuN1=0 zgjyk@V*PUkjr8X{l@-*_=Ov5&jCwY{A}H~ahq|QDLtcg)p;~3h3no~JY%&`*vC&7^ z>eEyEgAHMBKM!|f zS^386lj~YFezTObA2o}(5mchyM-Ly|toT6%F z7)_9n2@5gyTM>li#2#N+?(o9x8uvAM9D;G%>ym)K>fPrXn@mRJ#**k+lXkp*hum|c zq-1Zrm(C;xkw(JwzZP&ka;|2lw+qcoTme=rp;XgC*aTN#3s+cEC11-J3o1v9za4Xx~HF`t_^?MsU8C-VhCW<=qT-s%dTMDFKtR?Shvc)nrj6n%d3#Y_z&7J zrn*Ns22|<_(Wc?fCoX+yTbklb3JVSk{~bMgl-KmFC;*#;@%qQDYeOJCLe=MOv&Hw+ zO<(DH8U2Dk^4Basq;cIAD+E;=;f42~rYU3FLb!|ErqsXR$WU_m&VGuc)+^xQrq)=I)A(> z3eq>1S5EZYnb3qc&FEHe!-=MDnrX#aNvg?bf)@Gl85B?bP<6YHD_N=&cfdll0*f672)(R19dd$J@@N{w_z&pMbh5kL5@`Ex& zq&SW}KA<5D|7<)1kw7ZCF3_b&b?Vo#7D!g(=aw2`run#=*WBbN>sO)(Bc-=a0u}hBb$_t3wwW`DS%BKn1z*%{>8}Z zL^-J*dwD-znMzHcwswYp;Xpdcs)spLD?%v%r%!Wm!3~J%y9`ry3lbe=yhWi&noL(O{~pRZ zhF-BaAUy|z^c;yWqTyz(=2#Mc0q>nyb_n^oYH%fyF}#B$-F>b^teXvT)0d6kkIl7z zQ=IXU)dXF)oEt1lrWK3f;E~veRry0233TD7pUIQMkKk268myK!minxp;_1w?wPwNBgq62c=52H1bC>GzD#Q>{GypZ?aN*$R=IJf2|D>8XPCX4HG|$i585Vy$kj zUPg%fSbO5#DUq8|zNayBaW7VF15SxZv&K*w+A#k{w8(fy)U>_#_1o@Auj^W&Une}; z!^!7CPCY#K0Ror^@yBz@1Feq3mD}7}P^iKOFdu5vpq6F2Z#Bit=d8$Z`euV38G8rO zB&WfNKOFBuhr`6eZ{NF{ZqA;<;Qf)La@{W*9qQfT7^7`OhJW}>P`~cZ z@Aq?>q)ssj-FYN)y!}~!-kvXw;uyXs3MKs5u$hG1m-zLw?*(o`Qm(S*-J+QMkN4Jg z6;Hd1JBg|2SF7D)zDmEt{alPVl1Q!U=7!5sAuK9pu3&{O8%QegU0`;e=}Zw*tcuQK zl*FhPZvLrri8@W>1H{>pXNY6T@o_2e=8Q~!I2tu175a>nKu5raW~Tk-FR|1mS`}&asn<@r2JsIP(WHGM-CCu+3#EB7cL+5tY`ZAC|;K@PjgQ^CrJS zoSZig&6L68RPw{CZ-?UHBgAtxF`Dskz;p21CwWf^(x7qD@U#L(@t4Nxgj>hvbDw9k zt3Q+;dfN^3`G1g+vFLwE2QT@C-=ed<i+abt)eg^w7Ps@X*{}DCI9dEmxnBdP?5w=jgEL$wSKvEO?o+=B zAu-?zTrBEeJhd2}5X_u+Z^)DiK6uoSR3ioj=sln8YafL*F2>HC{uYyJ79u`EIP<&e z6`zxVV?9zZ_D562P*iO}qr8B)=!DgeJn`m3Mvj%H!uzVap&K>BWMMSDnb5@_=F9ig zXe8{!h78pn3x>z|{vI#MP{epUyBej~V@|07?X~qOiZGKRjrdTPIE=(W@XENha4pF4 z;5*zkK$WHn@-RQ|+-rFI2QL4H9+Yc!J%Z6{l-`Lz!__pVRA15BgkdZe+(Vbr^E)of zX0dK&SY)66l>%;Q$HT#tZ<{Bhb-`WMe7z+ziQeg_WX|EWqwZuk-F6Ip^JEv~#OK$n z&Of6syLejdEuP3#uXY$)Q@OQMCH&*2te)CT)M0!+LukO&^`p@=1stEB_#-plM$TQg zcY1eJTr<{l?u{B)rf8(+UpF2ee5yT3R+n#*g6dD!6;9um24)Q?LJ8Fn(6FBcZ}jow zhEs)_aHuQS@2l*sGA}zWHt!U0lEesoHXiTw+Sf8X6>tuHBJr}iexg64;e+~ag-l91 zY|rjCS*AZ@vBB+dsp^VPWSn$cc}9_%Jz%l<`5EOwNkTB`eE^SyJj{+e9{&6uDnZ~A z%D9hp#9&k1*!F1tdf`3>as2E$d3<3Yn<$uFObQLoyccw((!rC9 zYeaSDtk!>Zm_wn!lsf#f$`*oV?BsMVi}*D-9)_AODWr@>A&1j1%`d`A( zs0rEAdW(%IiXg0?EGB5N*?tcR*z;&JvOLELo7%Tf}W@Ybq7Kz%Rq z6nkUtzzR)UX1h`NaO7jeGrov$#v-IAnB0bRQIUX2`0{Gwog!&=c7_y;QuU!-7!Ma+P9i0)hk)ZK+qvNG=QPW?#5)*a1&e&L2}m$S6=*ltishAM!VI?EpdD zywb>>t45Z2STvPvvBq^@y>_qPLM>lG$1Q)z=~BH$?j={S3{dmK_k9>;)L%{{TUDPa zE45wz^Ur#>q$P>ggIiADPbU12Kd8c;L%!=RG6bg{4s+Kn^8Q?HEtswg1) zcd|>y>)-E_XP#=c8J%U6Mz3hgppg$K7FNBVL*7C3ZLPGG(L78oB3e=SoW~I)9C!J} z=->~^wYhKN1m7HTUJPfDs5-T|sAGBiV0lU==$dyfsppC(Yt4*mlB{^~$I2bqo?c2h z7Uy^>Ozjw)MUGvGG3>2KX)$l~kH`2CgG>>@pDPPgK_P+&UkAC=s8#6Mer(h0wH!Cr zXIs=*Xl|3i;LCGp)sL?*BhihRWcFsRo2k?jVq|mAS;N|TNUNI_&B=Ymnn&JH;jx-z z4rVsW6wwp8XtM~jh7F0H(XJ<4;N+m^U@ZC-cy04xxXx)x7%#ZTVPZMykUT$O^K1=U z`!h2KO}~dT=oL{PR?OA|zsK65?L!@!%(97#Pg)mX9?Ut9yhG&INaWTnLpqPl9JHc6 zi8_z$=O(}O=?J0(|Mf@*mvz!;a%!6_D>=UuUk}!`TVO^1AI3zVf64vdmz|x=oRnXu z9;%rlRn+Q1u=vrEN-3PU+yUK*(JI03^_FnCF=oz0J%ih_OeWAje;9v(zR1jM``$Ba z^JzWMh09%K_V*oG%mQuOdeLX#u-sW9^`r#mcpU;!xQ^1&e4cJZeLghq{aecTN%ZM2 zul|C=+zHK{r@seq7ySZB(zH!28AGO2ovnCCOz;LYBwS~;V8c;x%i0CI* zAIx_N?q-;{IEV-{?MRVi?C?G}Uk?*mh=?@tE?;65a`46D4_8Doa~rRC+Q_+WYG^dy2hDRGv7A*swCIzA1X;dlV0jQk+;#o~)8RkzCo)X2~5 zT9#uaH)daTwGqG_phR;`H%P-NoLRzGtf)bX(n^vzjgcOfm*bN1?o58bbj^z3>%oj4>6w27DAOwo z02PHPSmM5cwh+L3+s1800uZBzH=G>8aRz8_t)s>*Ql841o0)|z?FY^lIyX1f54?!) zSEJ0x?T$+%xe*ljKw>O&8jKsxwqyu`Q8O0@4_=9d+u5X@mOQk)pPc$!aUA`8mYX!) zfOz=0Pe7i``NPIA%L4GKntifOVp6#Ga{b?`&a?aP(PQ{VC=Jft50aYd65m)4CqNYH zFQO5WL}#Y!$2Y6}9Lk`W&49O5W}b+2IEJH(ORn}*XNA?3otDk+EOL&+T*7HBP8kTw z-pKN+-If1XXoE%!lgRl$@Ka4JIr5DokH0Qv$K4*JXSsNCYj>0v*|}_$nkG!{?Wc~{ zK3FA!{bjjS>H%h}{LbUu45)Q~FcN0!6nHgm4Iw#L(FB;$W#-{%?d5cv=Rz6?8jU=fJ2Qjy8M>%cca~=!gFPIR3H1L z_^kb54*k6U;_t?~&xMb3BF{Vy^(UC_Hu+A07ek~*CfaMEeSsqsf%?c{zi*}SM#iYX zuZ>ZStoiJKIJUXr!fGdZyCwhHAaTQ>)Xb!gZ(iZ`7WiuO%OV98e>)4(I^w*-HBT4t znmyx>Mp>FLg03DLCGD4RQf*>-#a6}N@5wOq93=F7qkT2#B7kI>yt>8# zB!@iOF+roqrTO2_M4HX;ay4qd>c+!+N;+#$9|8QI2dF6)F2~T*Q>qQ+6G+Wi5e$0g~XyMHq)YIIKnBwI!+uG~KC!al$ zks=8q28*I7c?28|K?bGtEl0&8EPigeE~7#&S`nNQ0NI>1hYu(TEN7K*g-vpg-lS>I z8Fzw^YL5vOjr0mMOtY8Oeh<%kvWP778^aj(!bsne5r&3^r~X~@7Z?1NVTvI}_q`rP zJs9~!`s?6@pNL_A6Wl%O@06BA6o=rbO~b%EjxmP}hCoqqR-|_ zr>cCM`X2a5baf8vb8~Gz+QCN>E2);k_?SB;s~qb@$rpC%z$rxBnRK(1_y9n#A zCz*VzEuc_Pbo|-eFjz5ZOh{_9V2=KMCPCnapii}m8g-Y^jJ7x}GxVx=?3b1o4E3I< znrH>vhCjpkE(~-a-Iu}K>D8Je3s{jxJwyDN{0Y%Sbtz;XB+MXesZtt|rJAd~C*6Ho z-NxKAMv0X0hy%-#Q}2DgA#u>N7wQZJ(s6j9-wDL+6ONCSH(B4sSRaB$I~?x1wts~) zkGDi;{s#ee3-nW3LK_f-LvX8}d*pL?6#8Gvu5-Z>gI*$sWRITZq+c%QL?tw5aP0X0 z!&BqKR}Q|Obd1fdhZ;duMAoFsV#^N!ekgs-vcabBzi##5iK zL}9GCcTG!`Slc4pn{RVmw`r_p@kTEcU}j&x@??KYs$Je!sot|G#jD!Qfbg0S?KA#k z$9Go1HR*HKnb_xU6BQ|!jRH2zv!;kcbtwYw$ ze$+TSB-*0A3vR_+zu4=qkTM6`5Cgc?Vu4jsKI-$#d0fxCO^MXy@cG1fxjuF4>m z6!S<({3=7jvd@pfmrfbgf@r*_Vwqke#ardA%Vy0VjD~J6DDnd{S1ULdnZ$*wLO+JZ>1-Ix#p1Jr2he#-T!q!N3^$ zUJe2fRJsZJR%AeN9$%uSZZbx`7^72XtZY(SWFi%XQyIEIKN%Qsx)xwbs7wUsL^Q1v zpi=nbP&y8NOudX>ROIJ!0ah(!D0s!ZSeMMS$qlLOCx=LL06gZbb0uRF1WW25G(}^> zp5Y2t`;BSP(%bntPe-X95^n^4`m!1}hsigEO&hgmK6 z&`Ro8_ubx4-3;UYhrLOaB`QTAP{ay-2@C1p*-g?;RoR#ByCwE8gs|GS7V_$ODB`DL z+|;!3FLDB2iIJC!Qjoy5fu2D>A&ko&wOPyem{;z}XHV_JB=i>-zEv)FJRZ-R-56uW z545gHmb)K_2oktDAcsALslbZZr-(B!J9t@+PBoSO@>7S13s?L&iWaLF*x!h7q_;4f zJIbR{d$}b|Lc$xpp5+b?w!*wwz0_dZhAt!v+sW`?JeV4MkczII8dJQwj`eE3>2p4O zpn{rLFnDQ6x3#Qj)$+Oo&EqZwOw?2A(0@ zcAhkIXw+!OGU$IZk1gW1{6=SmEzf|TPGB;b|D zF41;e6fMG{rgD#DzG@F!8KK=N(bAOvGdX;FN>2>FH9!az41IQb-um`tM077kN8F(Z zL^MAVhzx$C2D*U`d}xuVE@^?!1A9MKLNRuHM?q`s2*5%xx&SAilY5#F$Nv_@`IKmp zG(aC_f^4l8ULVp&BB3LNU|LKlzHLn*2a-J+0vd2Mg~+ASk#PR$K4I&OHx`1oN#faz z`*$VoB;y;}k0$bu9nuB~mcwGn1P*+M6cfLGr=ea+f=B+E4^1K=nuXV%Mr;cl_Wbiv z&d6YDw(po{zw6X55sMY#@iKd6lEf!Wn`;7kab*PJD0*o^4d8t3s+9=Hum8DTX3AhA z_06{v%|{N9N{FW#${m&MQoIVOvkV(51eXC#0~LM=5@7u^C6|fz2Jj`kAb(dX_#i27 zGEv_#;O3JaQdc>gh$j(vn7?HF`{f&^J)W!y7D6suwU{C8hfj3S(C<4pMw~-}gYQZ^ zURcu65fH=!M|V~;sq2(F2!jgGl?^q5$N z7RC!Nm?U>bV1RRqKu&_$8-rrXrZm|_MKv;~G$XEkc4xNA^ttN|8m?xZu!mjyVxc!} zeEZo2C;h>CP4_3hH>32dNgKl`5N)ha8C6OTv|X=mWKjUgajgaXYZ3~Q6Y2@f^Wgd? zo|&AZAJdrhG?%A-m_@j-8L$d?81^xCLAXzhm`b}v9Ruf)+QT|IyzF_=5?y z=N&=@r0yCij?ilEV0y>W(28Vxp_K_!2fGHC84)1GYU7z*pkc+#Wbs({D6E*B^RMaB zexKss0$w^QwC2&O)%_t`WD3vA^Z(eIPFu(1Q6;bEh-g1RPhxnF(2%5k#Q-qljI=o>a_qYk z<+tmM$tR|f`10+gfIs{}jaq$k*|Il21CKzP=t4;Ug1OtfC`aWe2brsB_3TbweLorr ztC)1^ju>mN!AVKCR33|V`0*$Cf#=e&n1;ik&?eY{Iib4IR*48kTaZU(rD;c4C5ZZZ zTZacIC&w&1!I3h+DER3s?c8J>|1zm)LEsn<9N=p+l9EWL{x$GL2vjN&Jtmfpg^_WC zznrtLo9+vK9RiSWAjF}fAOzhsdyo&*1-zlqvjD$l2mL_54}2p43AD%9T0<@W) z#UGCezvUT+2p;$x+Au|{RMutiUZ^LR*TQ|M2YOo;G1)^t(#LWOG0^E#41e8Q+dG>KHf{j z(&ex6wRRV1VDqAchNU>gyAzRq;Ye{F)Gx+?^uT9PZ~EyKpFABJVY?1~m7Y`FONk%Z zl~znQs~}Jk5XpLW=Abgc3Ayk`w#meW`fdtUx|CiqyMjK|F{!8{MlrSG0paNqRKi`_CEB$gv3l})i{pa#+EtuMxyrJoV?qH2gx z#Fd_6mkmxV8s*S_MZZD@s%PIo3qaXatLN-fb_CzRW_PQ}Bfs}J!A$m2zZhxTn)->L zsHp1|;{Iv~hBL$T_+mfdw%Pmi$=!fhu_{5$@dp_LS=MV9vSib zdk0r2#aFq9@9e|%yU2B;v;OK{vtfc6D(Q!X*&}JkT=|)Jx5g^YUpA<_0!==F_YvSC zhZ%NyH3ZW}@8L-_3ecQ+0ChqO>wO{tMrX}@Oc_WrLO{O~2hb@^f>c~K-oqS}7Uk$U z#EWGQ0p)gVzS0KVCqjoLM3#cwVLwe_&*K)I;XSmQjIjz3wE*OOr~?$4bIkI>JZk$b z6je`e(bCX7P|~cg2yDo@F8}K%dYBogh?({;m>I$d&Q?;i>DP!Ubtu}faaLPUh{@lo zO~FNJiQi&XH7p@CJ1F{6jB|fsew*aSCe+XbNO1tqd6+?Hd3f?cS<%`nO>aNJawh-x z6`ORk;Roh>l!i&FrC&=GDPUpi7&OdcwROM+!wFRzu`8Ibv4d}^hZxA;2rt>O640n% zlAQm8q+_d$nbvu=Hj4SWs)51*^Itv9hhE6`%CcgQ>a3(q|G$oD*g9eqNpmV|yLAv2 z*)E1Ec4w>YbYUZM2QIkwIRDLS$}*9b$_IQoNjK6{Bo&DWQ>0Ab)aUQY7r{{mM1LH$Oj%c$f3R z|39dgc~fFAC-6?OORoi4qN*Vxgc4=h zH5DTuM7ktaxzc)R>$M8qXJV?G1a+ zB0sjDRYae&;f{b-tR)B$3{U+^S`a#x424s2LzRp>Y+x*nj3w*qM=mb>`u}X+kJLyx z)QW~mNo)*JUdX@{$MIRqy0^FYUwdh6^eq1jx)NmbM&~ayj>5J79RX;+p5!qn4bQ6y z#r#Ju`&w_MKdygy`?s2L;XOu?mmp50&ezvidk6PGkk_7=KhAoF*CD6tJtsIb_IFm=?WH7%Ma=0B*ym!dx2?0>78N{O28Bpwzl(6Glg@L?$@6e1h{vf2(&> zfLS^tEIwlVOG7tY<8a%n{Z3KZF>@TXv7NlIMbljec{@?&Bo-$^DNWqOxjhpUS^H@@ z4~%>te!18=rad+@AYT96it|I^Ex+efi{^ZPFS7B~RG6aW)Ji7rr?f&oG=fO{n3qPZ zJv3x%eRMgpR*zLYI&>SK<=mr69(iSvYK8s$FUc& z6hi1M83a5IC^#T^TAP^6h=5HP4u0wx%(?|A6cNP(kA&fQ=a>_bZ&ju#hYMJ2MZ+Ta z=kQrrME4>eT(IOvyw(;>pJN)zo|}mYD5da8dZsG9 zhsxe`z)PX3Dk)wERe&Df1#Dvq>_(Qe9on_(!M%gOwL&UXyt$D<_+0*s zR0pSzf!inHI`x6m`Um5rWqYpFJ`m-s2()EC7#vw-NwO%%S zqfilAIDIR9t^nG%P|jIDc5=#|-76N`S`|er9~;XV5WDkWLq#b@8BEg1x9W!Sqs_5K zX=`iM*e0y_ic(O_&Eee(Dm;qKW)Q?Qixu)48y9AIwbq4^a70RI@f5HpOXRsEZzH~q zP4Qmj`1q(;u?U!6@IUY$zMOj>=02=i{E(8H&GJKWGG?H5adxAHJ(QUrgcta-;lTtM zRR~k6@>)#hIl~zkk}LO8-6Qg_uG^zszJi&r%8?N$HjaCN5lo3!SHyfQ70=+b# zOxL=>nj&y8X|N(~L?;`##Rn^iN-ptZJQS?c!>8lTGs_+X=dT^5P?n}n+2L#a zUs*jURphV4i|TREJ~Ms1k++gtIk9ajG5$^+t|HIsv>u^5gLUT|p9R>y*ryP_WYu`q zsp}F9+Tpr3HKN1g!RJMZ@nudS7n?38t-ZXAV)`Ri>Pt(nY$Y(+1mgc;$+fvjetR+W zMP_IqI=4Lavlg&LAiN2pvudu*{US2{=Bi6HmI46##PG(ApBA{FhNvPuzzr+Ye(5Bp z_L0@k`;Yg{el>`=KIhtMLE;{o1~K56jI1OovZXti9jb(_;x^TuT@~5;G*9l$MryM11#KG35vk zS8d*DH9pjk%@Q=rEmkhnIX!58nvVm>iBqK#W-5xaN}dn!AJJiCf~9axN;nYqhG=gP zc%}Fgm!~%_zRmj|_PVORyi;Ff7R6!haSbw9Q^TIq73@~(Y=O+?2yufyU?8Il$*X^P z%{^)ywABSH4}GUzm5wTcegzBYo^7LUWglP(O6Mz=DG+FkrqjN6t>xZ*H8HJ;Mbd>l z_-q&4!O&wW=v&8d0^`{Q{SDM<+^xkd{x~@yDC{3F@6tvTq#VWJ2$%LmG6ylERpe^D zxisET>(V3OLq)Qq!iv{wX`|bOLs~iEWOo8xGIWz#$AwT~IUmWw z)peI#IXY*+DdppYxvBrhG!re;d5F?{J^%nW60d_0Bh^mdN)&F7JSW31pqcCsH&8!& zhOZ=6#S6o7;VsAF(zRMX(268A_qxfBd|kM;ZXd1^_X{!Y%^Ms>Ih(>S=7in-eAb@# z^ELrCZ{4$hnY!yL34+;iqoXxkT4DUy1vzvxg?Phb`KE$j_QKC`c#~; zKCpMB@o`1e%Vvw^sZTg56izHBnX~4j{|6UAmAOm|U7#y=nIW@Legy1i7_SJatl08a zk$?1K_l5n*Hy%$Gl81IhM}+Drny|$ilDH)itON+p4vlUR_poRp$>~_kyk@FjDPF@j zICF)Ej7P?xD~sn>@;4fB6bq(qKpxFmLu)QHDAIrE2xXqYr8C=Y(n6K|jhZdlEK$8D zOuxtb+)##;ij4SwWJvpQF5c#X6X^EvXwn1#}Ki0-R-0D zC;$_4aqjQqjG$>p&mOzWK`xa1m|+sJSf4K|<(f3PWta)P%=9SbQwF;u2rq7 zAbx+Q{KKP}X*IO99wIEIE0cnMwgpk`cCS)A#t&-5n9^mOSK{^5kEPU+pwUorG+n8i zEDr*KIbiMJ*C4d5hy53%nNhC^W#}H)y#ZVR)M9ai2J8JDlfO1)6Pwa%Na=l>jPR~D zWN{*vLsbbD4dTsfEGmV3M#B#Le1IOYXL(x2!24ah0wi4l$dMH)e}51&`xQ%(_tfh& z=hfPOrYt*i3XBZ6$8E7dd2HSIomjl@)@s~qgc~P_p1m6jVG|?bj=5*%X#IL67V*O9 zVZsCxlu+AjnRg8nO%no#Y7K9lPK%Xmaq_I2LwBS-nZ{XNYIb$~7-}Zi$F3v0Xk&Za z8?p}5XBw;Fe@wwB7%CY=6C~Kzv%|p9#5S{9OQ9irqHWy`h{?pnMC{x_6Dn8lKCU)o z2^89UU)i2LnMV3tE_qNa)4O&Gf1c}K`=M)9HK?SCq!?=iR%cUcsuf2ijz`^(uDkDNi?s-lBIv@iG01iz zm7=VyiKc`~djzZ<)D!kSKu&JVd23;0IaEf88fYH$&<6rML5|+!-z4p>1w&UPVQ;|k zs@J0zQ=a9#Mg7q;>{{txEtygSrzContainer #1eth0172.16.1.10/24Container #2eth0172.16.1.11/24DockerHost #1Container #3eth0172.16.1.12/24Container #4eth0172.16.1.13/24DockerHost #2(Host)eth0172.16.1.253/24(IPOptional)(Host)eth0172.16.1.254/24(IPOptional)NetworkGateway172.16.1.1/24ContainersAttachedDirectlytoParentInterface.NoBridgeUsed (Docker0)MacvlanBridgeMode &IpvlanL2Mode \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy b/fn/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy deleted file mode 100644 index 40eed1727..000000000 --- a/fn/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":389,"height":213,"nodeIndex":276,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":5,"y":6.6999969482421875},"max":{"x":389,"y":212.14285409109937}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":64.0,"y":36.0,"rotation":0.0,"id":216,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-12.0,33.0],[84.0,33.0],[84.0,86.0],[120.0,86.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":190.0,"y":32.0,"rotation":0.0,"id":254,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#f1c232","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-142.0,16.0],[54.0,16.0],[54.0,115.0],[87.0,115.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":133.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":226,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":15.147567221510933,"y":139.96785409109907,"rotation":0.0,"id":115,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":29,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":116,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":17,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":117,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":26,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887324033,-1.055138662316466],[1.3318647887324033,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":118,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":119,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":120,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":121,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1 - vlan10

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.0,"y":82.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":4.1999969482421875,"rotation":0.0,"id":187,"width":108.99999999999999,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 - 802.1q trunk

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":282.0,"y":8.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":32,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":55.0,"rotation":0.0,"id":210,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-8.0,11.0],[-8.0,34.0],[26.0,34.0],[26.0,57.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":12.805718530101615,"y":11.940280333547719,"rotation":0.0,"id":134,"width":59.31028146989837,"height":83.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":35,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":64.0,"y":73.19999694824219,"rotation":0.0,"id":211,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":52.19999694824219,"rotation":0.0,"id":212,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.386363636363733,"y":108.14285409109937,"rotation":0.0,"id":219,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":139.1475672215109,"y":139.96785409109907,"rotation":0.0,"id":227,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":55,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":228,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":43,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":229,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":232,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":232,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":230,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":231,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":232,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":233,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":54,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2 - vlan20

172.16.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":259.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":248,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":56,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":265.14756722151094,"y":139.96785409109907,"rotation":0.0,"id":241,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":73,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":242,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":243,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":70,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":246,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":244,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":245,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":246,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":59,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":247,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container3 - vlan30

10.1.1.2/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":31.199996948242188,"rotation":0.0,"id":253,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":74,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.49612211422149,"y":17.874999999999943,"rotation":0.0,"id":266,"width":275.00609168449375,"height":15.70000000000006,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":75,"lockAspectRatio":false,"lockShape":false,"children":[{"x":68.50387788577851,"y":43.12500000000006,"rotation":0.0,"id":258,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-31.924999999999997],[197.00221379871527,-31.925000000000153]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.50387788577851,"y":38.55333333333314,"rotation":0.0,"id":262,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":40.7533333333331,"rotation":0.0,"id":261,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":42.88666666666643,"rotation":0.0,"id":260,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":73.50387788577851,"y":43.95333333333309,"rotation":0.0,"id":259,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#ffe599","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":248.0,"y":51.19999694824219,"rotation":0.0,"id":207,"width":143.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router (gateway)

vlan10 - 192.168.1.1/24

vlan20 - 172.16.1.1/24

vlan30 - 10.1.1.1/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":88.19999694824219,"rotation":0.0,"id":272,"width":77.99999999999999,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":80}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#e06666","strokeWidth":2,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457586821719,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png b/fn/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png deleted file mode 100644 index a38633cdbc23014364bfc611d650b2a17dc72ae0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17879 zcmYhg1yEbx7cE?9f#O>wA2N=k^5TU; zo{GGTp7-KW?!7UgA^9LY@*H}Cd+%K1(B{(ASh93{t=X3Dy466wQ~@&Pj=v%}BW_Tv zbkUAB&>_O7chr-9U4T!T_5s~WI_wo`Hxeo-?Y#yLmK@UoPhtsg``T`mL=N9B!-bsW zgx|Q&gpY0y13~m9EY?WzZE_R{@hs0lpRCMaMHVgp79Xn2Ku{Ma|No98>dQ?kYv>Nv zsWm8Z-;KC6HhR6Lr!VeNd|tr%VOY^m zkwVRr&<8Req~^;ok(4~qc8%?}ztG$_^;BLz`tzB2Oc({;nbWga)0iI=_!KOa4M-C_ ztTazF`Hr*|1=0#(;FjKu?@{<00=iad-;Dj0b?Kb28_=j{!D5U#_8Zd^0xL57Wr;kV zW7Yogi>A0O_xXi?B{b%JsyPNb<9T4@0^Ua9m{`f@Ovrp&86MOYtf-^It+zxU^_2T1@9mNroUAc=~sP<&MwscFUeQ~U<|7u8-^w-37_ z?-JEMYVoHK&{lO$cTS!yzsMU2rqz<^>|956zA~~i8KsrX0scsyr~V=kQBHaJzO|cU zTzmPi`1-h;toXlFvEO)Dhttdt&?@q>Kd)2Vquk7t1{Yd_oSwgUmj1whTCDfD33ck9 z8RJU_(H9rX_0l_CkUw=EALt!#nWmXQAB_t;pO30$=Q^sZZaP`|Lj;nj;8ja2!7oV8 z60r=%w|IV$d3b~n<=QD^cSzQ7Vpm-Q+IaizNe1w?1stu&`ijkMd>wcd20ZSXmE!1I zI8Z;Z4{&eE;orfX6?=uD-Zc^jXFe|_NhE{gSt~z1m@cv{#2Ot7Dt%%N{W=MM*IAB6 zO*h~mm)cFI9OyBP>x`Y6Vxo^NBYlu5a$oT%!`QXsCSLEni?ZE5>9IAgThC|y@b=K) zup@H_BIEF_%5Ob&C2gXbHMgR~wd38|>Y>X)pg^&S(#@Qp{`5}_L)TAjPNKc3{R_Im zSTx*!+wnGYZWnBY3_>`pJ=Dj!#FUC~K^{-G#oe3eQeR|JSmIRTf|KTmOp)u~kKwNy ztr$AvQVd!r-YN-dJzN1!UqQ7M4I_mGlQj}eH&TSJxxDcg!oN~+fq?KYq*czb)8De< z@bw6WQp{wZ*NveGzgX|D9yD6qy=^=-BQVoy8O4?}f=3qpAOvaoMw*+Mnf-ufYlTmy zHG6OWpo!J2Z1iIa~O*@BFdJ2=tR+=&$w0&?W~!n&Tou+fz?XbS5x z?)E3!mMk5G5K4sx%>E`#WAZ0sl3d^Q$J@mmV$t>0g4BG4_>bM64d8oB1YJ65SI|}XnodMBgWbaKzxJQI_?a(SC6hSaSC6PjS3Sys<~Kr(di<~axaTe) za%Hw;r*#TBwCP%3Uo~;Q*)Af+HL~`&f5kcw9!*fzA8$9hki&vnbf2gx1HBGmr|LM$ z`@(Q`VlT9|BD@v0Osq%F*-}u*{5BzxKyc2M9V4oaBegp$`47kKyy)DwQ~J0LHDgA< zWb{EidaHNwHT|vMa~t-2Gd1GWO{fiz5JM=PAwnfkh0g9Ip9Y|WhQKM@UhRN&zPk@N8r}r7J%{fLmcR1XAlPz_PNfc zN|&SLZ2SxtKn_ed>cZM?j3(z@Qm?d4)VR4vJw<`8@OM1x#Ov9;B4#0E z4CVXs=_V=k1>gaGm2iPJZer)vq&w7Rk~oroQIV1gDlmHf{dclgvQ{qtxXoV)uLOSo zX z`CSXah0|4bZk|}UxVTK^cZ*IjP{%t|mFVyBDa8>G@=YnA?} zgD-#oF`fg9bI3JQN;mGX!GD)EonafCDf15VB z)D+V|i~tOxT6z3HYWbn_=c%>V)#cXDDJOGk&}$O{_XS^I7lH zZEVp1(PsD0r`Rw_kove#YX1(JL{n_7*`@jA$u>0>>W$>ND>KqKq}Ht`T13M%;lh2>dyc6VNPYTEIH1cr}Y|Ey8a zfu>QWO3t&dD+t;#Tx*{x3V3SpLH znu1t~d;FK-b@fQC*lQb(x=k21XO9pwH19Lwo9W8IV#p? z)@G&1;m!p8%7LJ(odTd%R+6>fYmz}chCuq(_Lj@nv-j4-x4~*4?9!r|4BSn}f|#r} z757FHa_Ywi5}df-E^S<|xZt8dV3W^5h}cR~{!AaF(d@Kv{RC)ZGv(ZLcp z>sU1pq%7n3Sj+l@fW>QXStG|drHMNll$Zv(q8sCI+I7Mt_aIJ2_zUE0RA#$Sksm8* zLS{KD)hPk)JS<899bTtfq+w`e_-!V{Oi_1@LNp>r$i|;qZ}Bt(@>wcUfsK!QZ^9x9 z_e;2Q>_Fo&8N^H-8 zA7?b&6>{I6&U#vRSdcO|)ETV+wGWAZjIbLuHj?RRL+P;Y)3ybw&ihi0u9rr6AKMkd zba9PkWo4O&L+UclrXF?+$x&-~LB=;Ky?qNcSMUjAX|ZzJY{J+s)~5OZUeu}bcM`9>7?7d0=Zy6~Y5~94 zHqIZ;oY`_Syah{NYlV+)xxLbXQR{atN zIkMpwHgL#z4=J50!Dc?&wXv>)7$HMBt7CZu2jv^_Kczn7(1g|>M9{YLw$Z^$q18?$l*f4Wg2Z`&Fwa|Jt$vzDtSl(R}P5tzI;DyAEYm=!kN>vXVM zxLuUebn3b{#i>s;%+;=b+%mbjhkDqg&Ov{*dEWQmfOm%?U3FWGbGg9R6i{sE`9>gklS7l4J#-RNg@sMFv zFyxqk_j_}|yMn6^V6`9&O%fuBY+4vDglCVl03$43_eIrxG5UqsL_NYGr@W}du2sAA zJIPSItk%GGA%bI~tWpaAC_?AG>x01TfDfNEe%T!jTL)_W>F}(oo@^*Zr>gIt#{R1VDuD*$^ zS|^}(ASNZ!>3VV@mM#QKDN4F}YVQpcARb++%x$%xZut*r`?qmc*7{NQu*dPp?ZHdC zGK#U56sz=K8-&)azkSK-oU`JM1VP{p1EX(}lUClsuKt+=C$BH(#vn=zgRmJ9#-%GE7dn}3|Q3kb|bt{!lG9d=icF zdhScP!jO%;3j8_&aZ$v*7%YL#;q=;{czRtFkMoHc2TCv-1?mGp})8NX4wIBz#d+Vuzl-*M#%(@81e)fgR>XUs^{(gVPM6UW*HV=EHlZO`8neF-+F z(hW;?u>OfdAFfaBd=)8hlv6)C$zs8c*(;jvPyJ+{nks%9>~hZ{=w=++HL-^rG8)8P>9tK$gml8CJrcltW;q!$snXiI{NYh0I8LFyWkilaW; z@n(xXY**z11v*RJ#^7q5{uOjWhH>_QU#F&-eDbtO;{!f6^+>2y56M>*gcftib0;aqY@-M!r#9+Q~ykQjU9PcrZfBmixm0dA| zhBYEv-$N4$8B4%5A5C%1tN>`RDQ5z++>P}43Hn@7$|t>C^=!&3$le`FvShp=u8k#7 zN{MeDNG_)7ywzuBI{@ej5#4bW-sm=1J7#Xb2>~6(o2GPgJ9sxse2D6Aj52kP@q9j* z@+i(ceOU6>SH_J~3H_eldKBhBK9+Tm{KgRYTmH-MquX>Ta8?Xf#F{aE;)lhj z5!JBC9m4*o$!R`{J7JY@D{}dsTJ`%eJTtIh&n1BGbQ*d@tGTHdjH%$z>l+@Sj*-d` zymFLDSD)iZNZ@Bf+=#r`t+hilrAJDneHKuurwiYSXq=pOt$0OS#H6qBhAhQK{u_xbl*gJZZGdYFlkn>Ys&Qn( zYVG5UZw}feXdn&wr!XSdi8}R6Tw|RQ&9Ym6-uHE*0$l}4C*OkIBu??oL7hwdL)IoSBB*R8Npahnzhc4b!tgvLS2o2 zTBl>?8SUah1Zwob3#$ik~fqlk!&e$)ydh zHnA*+)`U5eU8uTZPe!Brw&=-=HbvV(@BX#&|0+KYgLt&BlwC_xy0skCzX znZLO9skp03F}sKHN+&)nN#%<2>#Mvd_*H#E+^iZv0DQ~xx=X$91zn~-pRjz7ygbO< z@;e~+QgB-%6w|9zCs>}dzzLO@ROrm{h(z|W}+I28RQh#bHKL>C$FP( zysKRD(zYLRc|*-ixJb>w_BK*rh{@oK(-X?@sUctdRn{kr-XU2h2^J)?hId$t!WyO> zkMc9?-*Rb*8+%?u0eP$@UB)8jSZ2YU-3L|$(|n7;y!wvKFW0i--cL^O>lgj7YbrWb z42H0h71M5^Gv$0#*&=2<>CG;7au&oY_jAvE{5hMchsalPrjTvNNmS+#DMY^ataq+u z3rh$K|HZ)8_jZGcj5@$k@_U8PMk%!8B&=#ZhPQySrAbD#o`r@v!d!yVh6f+hE*Q^d zI*5Hfp8Q*dpT~zCS+a+aeg}pQuvOqft9ANcf;zSFSI{u;Kd4vJZkfm_%vo&75ekrq z8C{%Xw`uX^w@f-Jh6Ney?6W-R=wQxrwrmlkqBft+F-p4~MF(Jw0uUxr|3$#GLfkXd z^SCZBT{q~*+D1`bAPO`LvG0K0Lh;%i+&qAj9I~?fSOA5Dwo+$2xa9|i+5vB8uRlCP zI-J}J#@zBkU2d*+PdctPS7)>Do0@R0rM`#1f@+2vJud)l$+T@PD*v9}e{SPgYKcW- zdM;)b+%>wc?Cyk|eY!m0Gu}DPsXxA(d$QOq;oitTot7=Gi^IO=6@UhT@?39)uPx-1J`Os4e9@z2rmQrg zpVTmE^QR4wVm04w=`l`?oI})yow?Z+nMs(KTZDBIVxrWM(TB-70AfEliWk0edWuNq z_uDOmr36-nH!}F9_7QLpa)<@`J!fhipPE#LgRRNjnI@;9douW>DUQ!fr5uM@a(H4G zK2~B?@qIt5Skh2=qiPEWWOmA@M9yMzDaXgasOsE%6N50@Iz>@nbiA%E{RMOnWo)$e z5~j|PX&ZuHp`C1p5(1^&=o=e`OTUHQB1m@wMWfeb9}O`1bzVGtEy32|Cgd=#b&f{Oy+ z6*O<2DB!v2`F=S2`Res_Pl3Zi|3a=|UUfKXeVj(wImSB)Iui?yL*B?2;5va-VS|iq z939WEAZONQ1po}Iz<7-Jdp0f$4-8xRD7?_AwqIY5<>L@CGj4tkx~4$NWc)%sPt&N~qvk=b27kYF#3R=f3R6 zjhe(b)T6Z)GfZq-;9;tOnuk{Lp&c%8)(+7g^HwInMm-Q+&W52CNV53x6^5E`lg_B7PDr0APkyIFh-025+IhOK zYF&SH_mRT$qg zLSFMAkW+789n3H3{Ef@PhS>F}E5GQ@`t6lu(_iaVfcU`p(Nentf zA)7yK1krNUjpj0rN!h4!U z-re*S;|zDfG~im$Vcird>%{i*a2RwKRao_B z>B|cbi7kId^Ck5$qXpDyd~)snpTS>Wb||4{PrjTCM%T%6?i}}#Zk;XD*SlI~FyJ-w zGo-~ChiAPO89IyC9CnpbE%gC?RBQ`~8rguj5h$5edOsCl;4b(M@?wze*wp(B21On@kQuRuP$T zcaphyj@XJnKaL+G6ptvpT1FCA@qE~5g&_1)^5Knek^z zK+{*rErU0tCTxq|A?-;pFu;j5Ftg^x<7WW`G0L{>LLYFBcqMn;;eh z1^`+c@er_h3gHM<(|+o-T9#*bNSnd+L zQcN&90+Bz!+c@fD0ARjHi=Jb#m)9RKg4+w&iQ!!)kgIhDg1em?gld-MPfY@D{wURG zGO4*N*h%?q^@QU#HX`bu*1$VSGAiRJ-T;6!$o>U^F!hhUR9QqYnry0u2Gv-`SHK|j5pYZb;ubgZ3QGOtNlN&%6+S$qx&Zl25mP znzw_lw_}Ifntm^_qAaj1&60F^f2p$oa{2>i*Hi0=dGq5wn(+25p4ZKS`M@&0gkY9q zeZL!S^N0lhz41uR$Ka(gx1jru8vpG@>qMC;NNm0F+I=2kgMUp$ew;w<(L#Dn$?4kT z-zn~Ex0OZs2JQv7|HhF0i#Rt7Fgh4L*BmS%d^;MRFLUu>`l*)nwkyn|PxnDuY zGpO}ul99p=s+#_DQ*_U0xa_;JpA|~y!=yR5YJeW#L7)!K#S$Ke*a{m*tlh$Kx*4J} zOOjz$Gn?T@4R~cVH&YKYTU$!5FjVGmcU(u@x#WPr`pK=`D(^U?Oi#*VInO7J6-;$| z=M8R^$6tnW=Ke5VdzPGloQtjD|J#ZA)~0~NvxAfmcih(CQCX5K_F|t2_{ibWB^~-G zI4%f_nIrD>g?+q$~yTO{oSoY&%qqirY5sjACfNXvo8zz

b*{Hq>az{aEwl^vuL-ETW!AsvC0U0rmZ$_<6OOxU4rH z6u*cCCrj%CbZzu?LCU+fGt1_K0=ic0aiH8Tv(d}mzpAB)%@%6-6`Lyn zd;=aEyurlO2oEI|CPfQU*p&b-f0^7f+aySv?;0IYmhE8@&azM@s(W+tZ9c5e4!{sX zfiaPRB-x>31b(-@*OX3SfPO={;5>XR!!5^M3})_?`eW3RYLFL4dn&gCRlddq!UZp3 zAO=>+?ALa2%k{;xrD-8Kl++6_#b$F=O!pn7J1$t#B@!$C5)BfdlU5HN(yJrQ%j!dq}rfM(a$o8fv zTp~yzd4j8p6OX6R;`GAwKclI1Fc=V2d+jG|XGUoRj0-i(!+O4aQ6eJQWJVRWoER@>i z1n>F0IMxw9Tw>{kK1i)xicm&1#D5gu{o>fIFIqAt;KD8L+_HrIBbg;wPdL84HblQ)C+~sF>=()-`U*es#QL*>k3l!ZIIv@K;?LQ#Ty`{4Un&{&BHwE!4CHY8s zb9=V6y1JT|m)G4blMWT&;|nz-WC)pfM6;rDh2LXBA-h@)ttSbpP#BK2EA@3v!))k_I4*8zu-(cSeUL5Bkr5o$S)*>_9hh!9tbP63Ei-@-I*%W`U5q{cz|E*9v&Qg5%&dt z8!AqBkj06l=i^H)WY~-BjUhDa&H65JxDC=MS2hEuZ%EK2A?7il=<>wG#%6A#1e`7} zo5c3NGd9MIpFdt|q97+9fkL^>J9E;~(sFV}If_$KQp(F&<9&f+WOeBp)6DcfK(mM& ze>?Fg>6w+b&jHPHL|?21Nq6=$)R_j15nrFPD&Eh{H+?w}3v;%(RKEj$S!Mmq@NT8ky{N_Eq*cDRbAP`cP^8&pSqLb}@=d)^ojf1TGW= zuDwy*%gkJ?v!NjzhLx@$bKNNM;+5*d$YJ{zRg=C4TI5VM%m4OPr0(G0(9qICd@hXL z6;g5pN&P^^55gwB`9sbgRWKHhJBqdJyXfNrVPqfmezre`PZKHj!qWT@RALZRSPem- z5^p>^3k%wi;p4v4Z=Si@6@b5-guorPcC*_oI4o%WW^tQ)ge-JewU zoM=ex0>Ge^j5r+xnPav+yv)pKieCIzX%!U}w>)7m)}-_ukaFHzgi{wn7|G+5d1`x{ zr0MDDe;+en1TZl%d6)d(P0|sYq|2Usd$A8zE|HZaJf;mTq1MY{WzY(4=jWC?_Z)ui z@>{sQ6=vuSQ-CWdD5!~KaGABg4jtowUSD0oe{hF#+VlZ6_-BZIr3cSO`wR8M!;{5P z&L`WkhATD9p3M{ij5184NkCuY_wR=axPKtim(fa;f|ateva@{@{tu8YWZ2ay=;maF z@I8Y{2+_`OGnrC+00CB2FlwY`K?fuLs|<{990C|AWwK^5>2yB&!-s3o^|_v60eW*D zIjtPRgt>3DR2cA-q_psKZU(LJb|M7&gXFocbeWR(9K=jXv^Lt>+AmBjhJYXLEZjwN z+nG?T)i-~9YWJt}VS|-;lcg9Ywm)@XEfNnH|K<+Qy|wV4V^-iM73T;2p2BISVy&?R z+1aH=A{%|~ffA*ZDM)@p;7A)n;Gi}8<&9~TNoV)C((aRbW?*bgQDa%hL7|zz%%BzP zuZ%##mOR&z{*e+*!tvud{eI$Xe0+SIGM?-7tqIz>evMhj2x3da=m*aUR&H)TZ=~T$ zd9w*K#Vqo7GbmxG5D`KfMypV%*cRFuFp1Th#7&c_utafC6s0V@hnSEh6l=z zyt|W)LV62R*F7WzlgwF660sK-1E$zj*x_n>RjMHUIn zT8DC98iDbqIji4HQ0M-xXPE&3r(Lq+XgHPm=VWNyYhSJuNN)-bXyYYA4KX}?A-WZN zb>rEFp@zUGz?aO1V9cg zUQ<9y0QWk|6g3&X#u-3#Q6+3YbxzWQ4Ypf`Hc&YieLT8y*|r}!5T0qtpq1Oc+OD4C ziyrZ5{q-B}`y>Zb62pg))goLh&hEpb72S9zFOnb6KRi8D{r<8C1+?27}e0s3aI$1Q*m6Z`| z$9mVvQ^)P17IbJq=TXIentF%@*UeafK^jUZM zY%xzfxFw$W;#g%ScC@RLJViv{{DLp_=ziV&AZ?_bDw%!_85R^+K&$c+cC`b7;gf^! zdkf+K++yzA!7U(VlTN^zS(Dh!w$apvnxHq?H_9PLvkrmBv6taxYGR@0E&bY$Jxn`^ z=%9sJCCM_P>1ycq+bfLYH{4p6!&4Tkb*@^gP_cSk(^iEz7E+LhakO54^8zbDujJO_*>BO_v$z zj4LH+Ds`DR|4;;TMfm!oSDjZ-&`LOgx+g|WEv+_0b z`B9yb=ef5Ik~F%XVxI9y;y5N!Nll=ggAuL{8xrbj76_0td5R|R>l;2Cfzn=3!d;Tg zH0#m>fYa6$rzGjzU3HJ%hU1Bn!%yO6*k-7#&+0A@tKgsc{ck9_A!uFh7yJE-kH^XH zthZn0>y9|4+em&Hl=vTnhm_+q(-x^$JEaDWqgv>riK>>G`411%0U!7QNTNZop)^vXVh`0e*yCO;tD%bx59ow#0oplpi`6f zY~gW~ldgiAr6oZJSpww3S40G^TrRSUENHG~msvj@=;JJ-b1j`ZSji?Ee zLTl2nAyhWMHZ3iUzksZ{!}?{2zVfqu5>Y~5@~9WF(^_a;b~E&z=QvCjw|5WNejE?; z{%reMvCHi`dto8d?R}82^=I3&WL#s(LB292YEg%0BERP|DVnt?s>gftiWB$s=Nbvd z=SUbyI<#ttj~x{S{u2T!Q2RFLG*^wk1i9**7zqh7h@5Gl>9xo#|6N~n$3bb;6VgFN zUzJ8^$nWgB*Ikyg?flq8)>h{1cvk?-rpolPY!0(GM!SOzXP)Y2)R$YiF3cd026Zgt zC$-2(vYvx-rJwev4#xy9N*bz{jirO3CfZV~CgeytlXU)Ef;V1Vt6J?s=q7 z=djU<1Y;vHrhH(A>JE~{)8;^F*eLe7O7b%gL#p|Ep;tpwO&~;P>{6;M#4_XfS--bT zEYZIdr}5mCRMMM&O}DlQ6Ni^>a7I$R%&KRyH7)Pr_G{OY?5)>8+e3}RjDg&&Hhxi12ACsFKf`f z7oYv-$yT{+T$4%p1=MQOd3No39a3SnDEYVOLS7{rc{wd@iSWE>-p@m$%r#o02ziO% zxSH&+4z!y~N?Q zoNX(k9_6Cxns2M!L>{r^m_I8Tcf zb}|n5Q3iXZAGJYRPf|ojKYn^}?zDp~9{^7qdg++dm&hou72(25PwkVg{v+vvH1|(V zZTY~N<{za>gyBEcYJXDDYhUqd5=9N`TIFpYkmjC}Wv0L1QF^n3MhFlZVwMZM8>jvG zc8j=_#^DgaDeevn1;A16ClmhZUAZw+BJO|7GvJ0_=j9@hPU0u26PbDX zQwosiGyh(|WsVhbh7B7VSM#^2+bq^7=g9m=hABYDPgecGAu$(OENNcD;k)pY6W93} z{o=)u5!DB6WD$7xgnuu7CS28A_MgzhT{@DDctf8{@&8fwUBv4B?w2@UQwv)(U58s+ z>L-e-FMaZbnb5scP*iU^!5_+F|`*or!y+O+GPRrl6W9d9hq!*i@F(^6WPi+`|1oe#XHM#V8H?3VEHJp-9MA2BWM&*oupsHc!G$ zIl7T+C&KU#W><18`npZ12wP^0?we&E%f*<#SDY9E*M>ei&)MOq5yfCnX@Y`f~9Nw$HcbW(91%)zevdoylqjmA@%P2At{~42)X&EiW?+*+-4v4Ycp!@dWvw zDgBqLm`+2=~Fdb&CFawyt!AknMLEA8XK%Aupv%7KBaJv3dUU~F zDnhkLRK0fs$mi3UOTj*Aqy7ON6&X^UKXq(73j#;3Um*(6a7w=Eb&`O@KbAb|FG5l5 z@1^u!`!0@ondd;Z;zY&k>0abQ{OZ~A%;vBnGk7d#l4-^k?+a$ESC=ES;2A|LIw+sG zov|H??43x6y?*(tHMoJ0&o49co4MN#Rz_Y6vX6e z-(5!8HguEgP@N24RWa6d`e#;0fe(FSLq%Bx^b6%W3e2L;B^-)861&F4|)pMSa)fA5d%70C71M6))P z@6i^+GmhDfM`)-_;NQtQtm#tp>BTTUxJM5ND88t#K~=eB4*1M?e{-tjjU*7!-i@fI z1OK@F9K%`!K$E_XgkzH6ncXhYx+T$6AO;}gM<%e&Bg)$WXxf1q8@o*ynQp zYOfb;D0%56T7))UhDk!_V{bZDXuC|Hatfm5`Sk|heaUkQ4+GhwK!Ady$P(q>(9pkb zXk=u>^>o|eHA?VUb+Q?^S-X9b&=9V~`K1fl4;eWigu`zNPn6y5&tIe;*}Ov(ut@TM zQG6T}*(ppat0YdXMG1(TCjEyY5rKi}HS#1o;&8mf4~Y}wxFj4D1pp`?RzP^Q*`u1^ z+7CH=y)dE^(W%v>rJTa3etdjF^$1sc`vS!2@rb?P}6)hsEi*@mn$*rsZ z*JfPQrK6anuKW4(vrA?lGMd2=a|$k6h-LJAAtTMlC75cSiZj&6P7!sI%c0aCFCn&_ zaw0M}v%sR}WI`hFLD>DRdboJN4l-IZz@+>2#LXSmU)?JFXaW!aQqmm<)w~WAnW$&8 zO06QprL)YG)PHgvYRjT^Cx2nrbs!rfMl1Wtp(E@8wGf-nc%bO#NzDYpej0D?x4Vl5 z>m)%{(S?aP5eP~xF_48ah}6gc{lcH2I-?jlkMGVt+md@3Yx}V)kSbG6R&v++l4&6SN}?GZ zRdZTx5P5hf?(j7jkpe_LZf8E*#8~F#!JL+s`~$ggf;}>kXtlZ;G5`Fg%3Njq=b<&l zU>uvr=7(hE?<{hSeL-8xBbhI9>Yytcel^R1t$?rf&cX~r`L^i=o=O^IOom_Dd{qUJ zail2X-MQHDJkJBL`vp2MOTG)`p?pgp>l(&y%MI_{_LSQNCk`w3YSyDK0DjbJ@_kjH z;~sf4NKJJ26B_xmzpX--4KG(+W{l;!bSMkmne469Ugk|SEIl8^SpJSU@$a*kq(bN30OYWo%wjL35S$m2|v-vCEV z9;fQiEDv%U61L=z%FBOS5sN7^W_wf@xm}`e6~Qx(V3xYgj5KlI_KNU67`+1Y1GMbU zJCM?69K(%tleG#o%!G9113xSRR8o@6+FDyr)PDtYxdf^ND7r|>3+m2t;eOlHAbwrs zbw8)tp{`?#N>Hy>Vw#I7(z02B0x^N^}%SwYLlWp@+i|) zatj!bQnjjCdTV?pg>rnxS44WT0OEQfm>^wgO2=?0i6_}Y_~e$TTEI%|SPD1@R>bq< zg2`!ar%7_C%Kl<)qLZQSw{jsf^Jc+~V#S-%V{$ng&!1vV3q-tD?feCew$g(`diU5! zW=HzyUb}&!#!sNpq;x+tBf*c`0eX-_G$KL-Wq=;r!YaE6?I+d zJP4v zYwZ20rc)i-xvV)D@&Z76bEK3vf9%rU&<65}(H%_@)zH<-)#%-2Dn595eqv$TAnBkM z^?WTC_!ofY$wQ{V_+6uP$VBR2BBM`Sg;4ED@K2Etf1mSu{D-HS(;C?LO{??a92!_V z^fEQ&{q6@vX}el^h4NG4kf=6kDcv)sPjqb5d5K=xaT0#lv=c#($wC>ad^E@~Mm@Gg z*#Dobs?9oLE`6UK;Mo!1-gn+^VhULP-@Na-+)QVz4a;9j(o@sY!kuLImJ|A44rgn` zIu*tSH};M!Wk)1otS&LGhX;^ipQFMHRsVIpn(Nv(l(f-B@=!QB7Ef`Ftz2%qq+RF( z?YerZIHz%hxh0M{&AUy=+ar(f#0%E`)axho@(%&|?>IU;FY$xcE`Odt=|Q8V#;qCF^rc~|!; z$03QC1fa!sN2;rnk3x_e@|#I(4czUYba3DZ(ZWR-w~yzV%!jnS zI=|~Z2r@9^K6cOV9SQpw{wF37N%lAT=kQO~$#=au6eB#p?KtpWTcCmd!iM34ElUM` zT%NWTfA6=o639Q!#xt%X%4G|l@4IemhMEa^QItX%L_qY$Ma<64;qt#U2v=WU9$EjA z9y*uPxeXDO#I3#AAZpSPWa_)nnPJ12t%$=BrW6EHBz%c#iB`PC#H;UpUmx|IqEa}R z7rrqO^$_=QQo2H5f_61(&4qg)TW+IrnFGPhZ{~x4OK4gEcN(vBVXo1T=ck8fCUp6g%z?@m#P*^>KMXZd7=E>JyreSU^X zm&dee)0)ofFnxNm&F9I#?R$;Wa-}&AZ>SGmSY7Xaa?bw=^DkUntS0V%@Z1ESqs_;8 zB%Kt!IXVxTp73@|usNa7{M#zezDcJxbX|GuADdvInV)}7{OoD<@}Bi=_79OW>$m?g z`@eA2+2Y#lhUZV79p>-|Dl~EXVb}it@|S0quN(C$Xw0z|zW3|pzC7=e*S+%Ht15O} z@PGbzli_EFo6LXr8F5J6UioSE_0JD~Z_+CHU^$2X`F*{(@yrtv}nS5&!vBR_42NAzV-A%y8Q(dUenH`-^_q-7HNxz5MeY)8FOa zr*FOXrR@Ixx_i}SR>BV~r~Y)K1J1J{=R9QKWpYm{_9*M9=UPf z#iNJjFFzo6UDuJ@{{7O=r)w`>J9m0ZzP{XlyB||7|J(VuaDhQ-V3hUo-W%6agW2V_ zWJdk46ezQe-oO8ud;g7vb(?Rxn7;qL{e#Wx+}mI`=qZ5}=ncW#H{L8@?Uj_5D>q|nKA5%jk{U=wX7;6)2FF{BBqTqj@vwYd~H*~w%fp*k;|sH>+@1|w*I$?a-V1P zd+48%X}y>+<)EMxODP*S$b{vqQy*sV>MiBYSk->?!91O=^h+_kcTZn8ykpo_c_!IT zX|G7RN!Ox^y1HxWv*+ip+-container1 -vlan10192.168.1.2/24eth0 -802.1qtrunkNetworkRouter (gateway)vlan10 -192.168.1.1/24vlan20172.16.1.1/24vlan3010.1.1.1/16eth0.10eth0.20container2 -vlan20172.16.1.2/24container3 -vlan3010.1.1.2/16eth0.30DockerHost \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy b/fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy deleted file mode 100644 index 4d9f2761c..000000000 --- a/fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":566,"height":581,"nodeIndex":500,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":-3,"y":-1.0100878848684474},"max":{"x":566,"y":581}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":-5.0,"y":-1.0100878848684474,"rotation":0.0,"id":499,"width":569.0,"height":582.0100878848684,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":103,"lockAspectRatio":false,"lockShape":false,"children":[{"x":374.0,"y":44.510087884868476,"rotation":0.0,"id":497,"width":145.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":101,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network & other

Docker Hosts

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":157.40277777777783,"y":108.18042331083174,"rotation":0.0,"id":492,"width":121.19444444444446,"height":256.03113588084784,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":99,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-126.13675213675185,"y":31.971494223140525,"rotation":180.0,"id":453,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":57,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.4915197649562,-156.36606993796556],[-121.49151976495622,-99.52846483047983],[-229.68596420939843,-99.52846483047591],[-229.68596420939843,-34.22088765589871]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.82598824786317,"y":137.23816896148608,"rotation":180.0,"id":454,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":55,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.05455395299924,191.93174068122784],[291.05455395299924,106.06051735724502],[186.27677617521402,106.06051735724502],[186.27677617521402,69.78655839914467]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":332.0100878848684,"rotation":0.0,"id":490,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":97,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":9.5,"rotation":0.0,"id":365,"width":141.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":98,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Parent: eth0.30

VLAN: 30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":342,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":96,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":332.0100878848684,"rotation":0.0,"id":489,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":92,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":10.5,"rotation":0.0,"id":367,"width":138.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":93,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.10

VLAN ID: 10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":340,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":91,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.40277777777794,"y":126.43727235088903,"rotation":0.0,"id":486,"width":121.19444444444446,"height":250.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":88,"lockAspectRatio":false,"lockShape":false,"children":[{"x":236.18596420940128,"y":158.89044937932732,"rotation":0.0,"id":449,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":53,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.49151976495682,-152.05853787273531],[-121.49151976495682,-81.64750068755309],[-229.68596420940125,-81.64750068755139],[-229.68596420940125,-33.27817949077674]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-179.77677617521388,"y":56.523633779319084,"rotation":0.0,"id":450,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":51,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.0545539529992,186.6444547140887],[291.0545539529992,117.79470574474337],[186.276776175214,117.79470574474337],[186.276776175214,67.8640963321146]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":447.0,"y":150.01008788486848,"rotation":0.0,"id":472,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":87,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":473,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":86,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":474,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":84,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":475,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":82,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":101.71008483311067,"rotation":0.0,"id":477,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":80,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.30.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":350.51767083236393,"y":87.47159983339776,"rotation":0.0,"id":478,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":79,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#cc0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":94.0,"y":155.01008788486848,"rotation":0.0,"id":463,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":78,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":464,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":77,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":465,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":75,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":466,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":73,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":80.0,"y":109.71008483311067,"rotation":0.0,"id":468,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.10.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.51767083236396,"y":95.47159983339776,"rotation":0.0,"id":469,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":70,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#38761d","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":341.0,"y":40.010087884868476,"rotation":0.0,"id":460,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":417,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":68,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":418,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":419,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":198.51767083236396,"y":41.471599833397754,"rotation":0.0,"id":459,"width":175.20345848455912,"height":79.73848499971291,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":17.482329167636067,"y":14.23848499971291,"rotation":0.0,"id":458,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":61,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.20.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":330,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ff9900","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":279.0,"y":129.01008788486848,"rotation":0.0,"id":440,"width":5.0,"height":227.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#ff9900","fillColor":"#ff9900","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[4.000000000000057,-25.08952732449731],[4.000000000000114,176.01117206537933]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":56.0,"y":503.0913886978766,"rotation":0.0,"id":386,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":48,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Frontend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":420.0100878848684,"rotation":0.0,"id":381,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":41,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":382,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":383,"width":98.00597014925374,"height":44.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.10.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":384,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":385,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":382.0,"y":420.0100878848684,"rotation":0.0,"id":376,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":31,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":377,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":378,"width":98.00597014925374,"height":44.0,"uid":null,"order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.30.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":379,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":380,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":214.0,"y":503.0100878848685,"rotation":0.0,"id":374,"width":135.0,"height":20.162601626016258,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Backend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":376.0,"y":502.0100878848684,"rotation":0.0,"id":373,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Credit Cards

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":627.0,"y":99.94304076572786,"rotation":0.0,"id":364,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":25,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":363,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":342,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-183.0,310.0670471191406],[-183.0,292.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":410.0100878848684,"rotation":0.0,"id":363,"width":144.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":218.0,"y":341.5100878848684,"rotation":0.0,"id":366,"width":132.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.20

VLAN ID: 20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":297.0,"y":89.94304076572786,"rotation":0.0,"id":356,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":353,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":343,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.0,320.0670471191406],[-13.0,302.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":222.0,"y":420.0100878848684,"rotation":0.0,"id":348,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":349,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":350,"width":98.00597014925374,"height":44.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":351,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":352,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":410.0100878848684,"rotation":0.0,"id":353,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":332.0100878848684,"rotation":0.0,"id":343,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":203.0,"y":307.5100878848684,"rotation":0.0,"id":333,"width":160.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 Interface

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":303.0,"y":240.51008788486845,"rotation":0.0,"id":323,"width":261.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

802.1Q Trunk - can be a single Ethernet link or Multiple Bonded Ethernet links

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.0,"y":291.0100878848684,"rotation":0.0,"id":290,"width":497.0,"height":80.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":543.5100878848684,"rotation":0.0,"id":282,"width":569.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host: Frontend, Backend & Credit Card App Tiers are Isolated but can still communicate inside parent interface or any other Docker hosts using the VLAN ID

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-33.0,"y":79.94304076572786,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":345,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":340,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[157.0,330.0670471191406],[157.0,312.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":410.0100878848684,"rotation":0.0,"id":345,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":323.0100878848684,"rotation":0.0,"id":276,"width":531.0,"height":259.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":19.609892022503004,"y":20.27621073737908,"rotation":355.62347411485274,"id":246,"width":540.0106597126834,"height":225.00000000000003,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":2,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":1.0,"y":99.94304076572786,"rotation":0.0,"id":394,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.5670471191406],[261.0,108.05111187584177]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.0,"y":90.94304076572786,"rotation":0.0,"id":481,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.56704711914062],[261.0,108.05111187584174]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":104}],"shapeStyles":{},"lineStyles":{"global":{"fill":"#999999","stroke":"#38761d","strokeWidth":3,"dashStyle":"1.0,1.0","orthoMode":2}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"14px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117295143,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png b/fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png deleted file mode 100644 index 32d95f600e1d0f028e5a354584d7b3eac1639e35..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38837 zcmV(?K-a&CP)`~Uy{|NsBV<^9ae%>V!XU&~tip+HNj^66}A$mRQ)nVITrYtHNc(eD3Esqa*> z`kbAd%gf76to6~-(lK|cWX@pB=>GHa@o3Xzg_^4Ws!jc*N5S0jld8K^uJzT`)$ek1 zjEs!W&(7fC;Fgw^`k+76^8b5_oPvUbLqS5dw6u?pkKFqIl9H1A{QJh>@4UUdh>3~o zZf_bI8wCUeRju;(_xAs-RR8+*@o{lbulell>t$t)3OtT%C;^muqGbEMhb*uuoa zthc;2H8U@1m?bADOrYYNL_*Bw_Ge{V*N8X=7YhCG-9?x^NsPS9$HfQ|DjO?231S5M zlL5N}0aL3>H+?Qtsp(QsPhQMaGY|=~Zwi2)wv~;4X@!Z;naVZ8!1Fgf;UAc;zXuVure|>tOWE~Z8pARn$o3h1ObAbNj!%u^* z_5J^CTT#Z>-^Nl<{-!W<+HSAO-u~F8IZIryPE6BPOvND_pf4@y_4@7P*??SStj416 zXk>tnp?7p^5>${_Fg7T04AS7|-0SlB;o#|%K{jY`-gbA9fN4Oe^_8!W{kmp&I5V+7}GvKNhH!eJfrZ;)8=# zpwqakn3Tfhgs;~cM<7#nn}SJ8E;MGj>e#%;Zg8JfSz4 zvp_CT_#;99000DZQchF9>dx@GGiCe$0F;4AL_t(|USeQi8bn|eFfcF!*t2>a$5|-4 zqi+-{DqATiEeu5j0zpGVL+6shegeP$D|}pn2ahA~{O#-}nH(wBHO4^h(YZg#^A+KN z|9(9^qXjyfdTZ6k`BKNgy1XfuI#}PWcKuvVT6W&stlrk;s=3{#zPlN!uhga2huH31 zr`8S1DKDn%eDlluV%M`h{dz*^`D*Y05VF|C*xY@XF0?m}R`!QiLc3?IW1l)P+Mkbp zJBq5g|83EGj)%%QDk1j+RInXOmzz<@tvi4Bp-w>P4brX&x2%xY?$pi^;{7l%Hg?BD zJ55Fi5fg+k(=;FYt{WK}yeAHAS9uWfbzKxoYd3T=I6sC4b7@Z&KojH&aBl%8i9-}- z?#F>Rt(WBXR7!D<-vZ}KCSI)MO^Bvvo}kYOP2n|+wT0-Tfup(p2GARf>`j9s%Aww&IJlPEu;7F=CZlaivP(gV@h;xvwx6ru^ zBi^&_U_fL%zq?{Zo@g+g&zw9$)^C)i6B$@iaQqn!=86J%h^FnBcY0rpJJ~^##7rU? z7Lv=t9CnGVe@zLhxET65jbUYDjyRLi6sJMZk+@VyPOp6Q@%}}Vp)&zXOA9BNYAhHk zPJdsifqsfY+%N|TiD4C?WD`w0`Wlr`!dH43t^GJfZmHyz6Tx(urgNIZIt%KOUYC`PTk=3+Npo2+fSsJMp3%#W~E`ov*Wx}10c*&ublvv zD(EoR5fj}jA^9-=5MTTQ=@$bRPJd^w>3lD^m1R$=xB^95O1JfLE$*m=hd7#awnFfI zD#V+HQuQK@#*E(m9aYYO(j{^!UfN2$gQhf?l2RVc(U;ow-jcc=gjz}}MQNDWSs4iZ z4QaYAlx(tTHvtlGHaP&VI~l3qsOa7lf(Ee)RZ5I*lP$iT6L-Wka{_sTCNl%-ybxkS z{t@#Gc&aLK(%N8{yDNnZLN0Vz8e5P4MVCgHiRyGxA)0AEbR!!|!|8N_D?jwJu5Tw3 zv9+~D;>yZq8tz6Ry2T}RC@Mi5{|t`x9rOAf6MZ*DzETR8mK$;(g!pI|I=C40hySKR ze=5<`MRq%#)fm!_O{IO!P!_lD_6gA)x8?gNGkhtFBlKPW4et}=M9$5q~;`woA^t;vy%i+#cf9f8d^Z zV~m=iGPKnzl@IPxpFdq)BZwWkg2s*;a^6+|hOh!Q3PzYLXneSv>ksi2fn>g0Y58;cav6&vtV5QuTJq=C zm1`|!Ie<(`5OwqQ=`#o6@>%<_hB!pK!*EQz0`XHN1fd_*ht{k7BmmmSpz(ib^o7go zvDk1~Mz#4W4|l7#>`DgLB;>vop=0fFoTu5Qd+vVz2o=M3%K1b(Ft6j^Pl02_e zl{{XD%7=@n;Xj!N;e0IiK{D(!jjy)Rf4VBDuw4Ei7W+k#G9GTKLCEPlP#(xND$UOIFwfcCXm6$eA6lTk6? zgGW%3QCeQ%P;l6+V8SQoKg7*AoY$P$a9HtgvB@6pmUVF)iW8rrDph5@wFx{db^Oj>2+;FE)9P6>}*CMg!%4=uCb!zbdotnVpiV-AAKL4T%m(quY4lYl}fqj z2MGl5-=OqBRY*~y>1&w@%HY6K^{g9{oQ;E)2eSb|VAO+d9(K znF`IM3iN7;?&#}(NjHAfHg&sFm`Ne$)6im&zB&c~;R)vU7ZJP{zPm)O(CJ(%G|@`_ zYbg-lD8(BKw=a@_!JTqz%I8m>5Tr`O>har@O6%@vI;zdK2%d;FL}KuoSH)^6Q{Cwz zL-j6;_Y`3)_Q3L3mo>wUjwvgG(9OdT(niiOZK0W{lV05nlJh~)DN5H%*s^Si;B15< zb9*}hfEJ^Zx!~<505ZJ{Z|IME+o*|tSZNK!C6T^fkj{e&tM03~0G+r(C@$o(xb?gN z9;Wc97`{T7&#BxS%6_vYVn(y4fATDF0B?+~=n+AAl-Crqe~{ z%h1errt?WDcztsr+A$OTr_5A5ajcMwusFR&?*O2!=%Ym@`UoH#U-HaG6TPjIaZHGa z6;kMYSdgCRjpDMY0pcm{q!q$N1cx$CSu|xRGV#0YBu`L>?AU16ZaLOnhAI#o7)=aj z*ny!Q*IH?{Ckx#8?cCfif43i(22Da1Q|Z9$bb3Bcw|wt+R8#?q{fS=7nJA%xwpdbZ}wzdF4J{#GU!eEC=lwfh8mJTOLdHt~K- zHWP0m{A=mbCxP~KAzShbll6&=hXSgXxtf`{H@Xc->AY(IzQ1oT#&b)((IJ3&iN;%? zd_HdHHy8Hd(uFeyA;%AoI%c{3ETEH72v_g-x*g{95SNuZ(jpk72qal`BHVKY+3UFC zr=8n{(Y2Av$e)UC@rkYcOL?J z*((j{Z@c{L^uaI)LAwnP0#sFp($DS>gN=~hjOmRhtX|cKIJgraL`@Jl%=Ew8A0gSJ zJI``ViTrjG(m&)|9KBg^^eu5Nd%Q8%*3er_`9Dg}J4VL69b2yeW96zs$Wj6Te3%db zR1_cn;f{QF5&$w9AqcqfTZ0GT&S1q<2m*p6Nw%u3d0sbLhVFSDfFmjMRE=Ip+6>1R zz3XMs2vEqFFoR8q#X}P6=_4#hK$;2JLx`_uhawioSFZnOgZI~uPEM^6v(71TRqp91 z-rE$frUB#MUv4hET3Mnv76RfL#iJ>@LjC^NCHhK$gH1yrl+|t@|I4+5aA_AI^O7Wq zkH7@#9Y{bGIx+WKVj4^YyiX4JTHfI>2zlqr5eSz-hubDJ7%MA|1fNoMidEZm1SRJ{ zPF$Pa`J=?;#``XQbuY2iPdKk6#Wn2vi(hCB^qlRbrC)8vCn4bMcr%@yprvU+qASE~ z8*GYZTlwzIJv$I{XH7!rN1g>7OV|ZZ8c;#-P`QDGryUAHM;aA8E==z?&jD6~X(}R2 zCLrJvCfS$FI5#Eio!x*5IU$8mR*P3xhg;C+P90&vj#+gY9?r{q?#g&vS;z?W)iMR!cGlMMK%z#b1VvDG{q_PqdDP0Ka#v|GIS{o}hO^0wJtA*LL?QxW_ssJ;I+wf z`Air1{-P!F#py`Xm=^(ZM(w!#Z^5U_#LgA==@nyFINs3sqCuwj7k+88Xw)l+k89MS zyhb6ahh8S9z_G}kt6x3f=>ve+ck>4!AX!ox2KU7d9;mP=vT~9uJS1sI@{4NgG6;Uz z>S{vAoM+B#B0voGfpBC14_dhrCSH6Od)fk0SHimvtF0&bZKx`|K`eq%SA1l* zm(nS;S@qBn;CQ4T;X%K{3h}DF(zGD+4gyA)A5}-JEG?d~>li9VJ`{5Nt!d;KdirW4FckZTRs>=Hm0@mtB#lrUBEul8{DW9ntGP zDs{q|a@M0l4vtBy0HC@D{ zf$-UEBUHqIbs(hnf{5Civf7${gufuC%paVxAtKV+2qejwkeYh=$D~fo2nZ}%tRBZD z559WEmwai?b<<&b;c)bE5xcb-fS|+S9(!R;s6)+xq}$L!yO49rMH~)!+U_{LHEMUK zFZLuIo=r{Tmcu@9Dd-4Q#I1Ix-ti;enAp{=X}9`$KI8J>s|OI22!-ir{3swvqW@C_ zI!My^H9-hN2=R|#@k<8Z69gl?k_6_+#dxrUv|f?+k;Ky3**J^e6tYYoCtP=)=hi$x&zZ z?mcCw=lh#6tNmuuI-)rX(4F3s8vU#~wVJ=kKl$oYc<9>hqYo3g#R{#3B!yA%_yK{H zqWP#%$W-9C1me}azw|ig^tcu2Pj)wSn_%|>06TpkQFW>>q2tm$lTl~%MCA7XCjJ4) zL=^DnQYP38;7vh)>tZB<~al}qRiJy>Jv z>X@LX&d+81F;87JQ4v4~Gj(-r$`cJIHDvshvoYzBd1O6XI$GoBJ$16wG>zT_O@=9t z#}W_oHD0)PyK}!mE9J)%ov0vEnS$1+B#spjwkC^*js(Xl@YjNqRa)Fel0sWaAXd7P z40=W3BM=ZwZV6$$_&gp)0*V3zA#|n0V<od@6h(0fX;{e?9Ujm;lYfGUv#SqC z01wv$6H?sx+}zqClDWC@agKtAz9kq{rA3Hv=OEd)PT|O076j&84h02{U-(-^%n1ZR zm2#xatavm4#trFC3{TMK2fW=6;`QE`Cuh00E)<5-q|ZZe+~E&#S8>6$rMq|kbzxy} zup=jfgM$kT|9W>~ZJZSEry=et6L*1f;lN)z>f|rA&c8`PJ21clz&0 zi~7!h(V&_3(s3h>l?P3&1f?;K1vMtQuGLFxHW&cqFjn zRAro|D=9|4aalIn;mCn^MG=fpf*JRCg+C^E~a7 z`LTNlOK*_Hg^rGerSVd{^7;R@2@J~JHS%!XmsQ3rO|Cdaejp$!MU5D)f3L(^2uV_? z@`mqpQt)lNH+i=@SqHvv^+c7%vw)Vzr@nqs8}}p{`Ay2wGP%)a3Re{%MMuJhQGBZ$%XcR;LO^~XMUw}0$QDtU2-j%mR-pT=zEm)+{O+UA`wuVhM zO@q0Gj=`ll@w^1WKAoB2_~KoJ+}hlnFqfjqcXiby_A2)*9|(v>FB6Dlc$8dUl2DmZ zN1fsO^E3ba@83N8=Gq_U=kE{e+A7Tq-0#k$k!h!;(5+_^{8Qg+x6E+~X-JnZ>e`Gn zgj)Jnsl-Uat+*M$mIEcFZCj(!4eMFJjmm1noG+UBxqr$y+UL7D>gKI=qkX%}aLDPq z;FykI`K{z=OLQh2pIiLfpdi>Th;qK;?p*mYYw$q zml=a*B8H;hgf6@X{ioSj2syquaXxz~?@p{Qu1_q8(43z;!a^{XY`gy1|D7M6RhpSxR|-OH(F~>)#l>-M0$sj^yG!FVO_SmhM3U$^wYUThFQO{tCXcV*9hA&ZiBKk~ zOyPY2fuK!*Rnc}pANArFvop5thcmP9V1(J(`}Swu4G-s4d2_`|gsV^Zy?2sU4Jii= z7Vib|8S$~;^5hQX{*z{6+%b#AJsRv-Sf8UmtT|o5rd1Sf4FNs4c))6tWjqvS0N+ct>0r z*A_a?uaAQqZb*)&M~7(ZA9Zv*BKKcCv{Fgsdjdik1wfD?g2knklzDd8D66jveKr%S z)5?sd+2QNm*JsNF$#NH3b^lrKGmmHHvl&ml%s4!YqXs;N~z zM-}1GA&_bpvbdf^8b|`Lj_=^I?T_IHXy@W!$NIh}7%@_!(2?&Ah^z<@{K(wDZtL!T z=oucKHJe$6q8QeU)Mfbo%rH2L5DbAEf2#0Y&%^Fs>mO$U?7#ErRk2aVEhMkR=lw_j z@to3RysV8{?C4m61?M7e_w8(f#X;F#b*k5r zKk|(3&__>C9qAUC0D_u;}crW0v1pNWW21LB?uw@ zLOah~h}}@za6L}Yhf5uJ^}ndK=>}^TF2J?fiU5J!V8_zFcTlC6K&NGnno26UYGQ(s zTrx~Ez1{O>2+G!FmZ#r~=TfsX-QD-eoUg#~JV!$)+G4~%!hQT9c+iI-Hs-3JVPd`e zE7So+9KMJ*8ZeKn-@11R5OOx6zR`-3usmvqdFoQ4iy1oMQxEvGWMmnDCW0$1wDWA5l*{xgACOWJO=7HbH((6Jx|5lVA@u}yxzR8lfqd9t_Y4L|Jt=h*`z z|L{O({h*<+z}S=XKOsa)=HtsNn)@Ok@j1y+-h-CpQSSED-(fk4`qZEvLEN78UhlZe z6&S?rh&LFDR%*m>O}GQ-WNJS^?gIFZ6-No+k(Vr(uVdLo56+`onEC)$r>x9AlMpEkES*F*`bpeM zpv>xptk5mNUhLZkz7&wHflFN6XWZF{w3sVW?o-Cb*yyHH-Ln^ z(9B3PivD2sO-Mjw2T>xJ1kOLpJ}riKxV?K8>?f$<#;3SphG1Aw3ly=@11AYD^fY^L z5)C;i+KbZg?fBZ=Vs3<7S)JY<(2tJ!mSa=KfiY(b^&iXI4WFc&Rin?hF5bq|QQ_y- z4Qqoxb1ndOFUr#f-fDds3fXL3LG_OyQ3wbG;|s{B)1XwKNDazxF#M<*0tuDHSf7nQ z%EYu`^RogxAYfLkcI#~b(a98gkUoXni>MLK?@sT2`9&ub3}O@?On&)iZn9xD0I)&q zD~^ZFch3}qBvG^dPY^2DFzm_ItVjn?8#8)8b)@Ik7XSeyeDJuszi0m+d72-Y&0=OFGggn7F=%-7upZOcV~tO>N%;`n}m zCN7WEYr8DaFQe)wox)VUSRt3ZuOg9HW>O#<|Z>64|W9s#{lRZS{S6jD}`npzG zfU5`qm^iZqbgpLTJ`T#*`t}!R5bbJ59#X6MtanDd-q?wfQ+RF#{Tld z8(SCvP7{jan7QTYkN^8?=4mH@!ML>qkXZqca+x4Jf}>yLXhB}6M76ii763v= z`asGNjnza3#!~)KgLBmHtZ-;OOi~fr)K%>W7yI=p2ILEY0FOig5_ti0^c{BvMvYz z-ni@LM!yw6$b2{U_zy3>3ny7Xx}vlt47e|!zxmd5qc3Oyysa1z^EIS~Sjq$PYaSd{ z#s>xG>R_%aovRORQ@x{SOl(>hoWc*~g}RGBv_RMjX&-VZKq#fuTvJh_QL<$2g!w*V zH;}z8T(ewm=VQalF#|HOAD6);>Z;kAx~c$|LAos@dHcDi(V>vO2gE!h z0bZ*J13KergegxoP%7$rBCCOzUSHWZmaIo)O3ifGOLNTt%I=QEr2X%+$8Cu77!9tsdbSD&jt*Xmd@ zf47F{5{v3cOchLwc)@!KsnT8VF1Kz$8VNisAdBabf|N*5W?XeDKw6jR1Q9>rCQpNb z#*}}^-yb<)+ZejpW&lX3-vY+j2{NB2T0vSfO~z#r0|KOnPn&OyIH%CAa;kYF3}9jt zZ1sNa-7Ns*Z$+#xqZL!CP8^DUR~hv=I5bTDnc4R4d8Rn^62gL<&!QL)6gMUL@w>tY z@o(=)p`(f&?Diuu$K?UxN;M?W#0)uW!HzDmOu77ko^0f-t(iu{*p}H@#CCLK=e7yxhF5qCH}(2M{H`)Y?aFd!iA&^t0d#Q>YefHY69)JCjz z7?8-=PSoQJUD|R~7=8v|%U3&zEXejUcD1CHvS7(#D+K$jw;Wif4kS5|D*=%e*a--T zYx`}TX_W3ksN?}an-jI@G^bR7U3W%nW-&qm$e2}lkP5)q7$8}>D(Jfnn6_}Rm1Kn+ zky8w8@ilflaA!c z{^0|d&;A#-xEo)?#M$3yX+FY3FwvODDytz(O}&UmWl4?;HG_78C4#HW>e82jMx`6< zR#4#02uRkNTarfacJ+$E81e5QHQrhUWZdB8&S{+9U85Tr7$em z=LNxOFHp*XYHwp0WAle+$!sy~(sBOoA}KjKsw^Ig((o;wAZbW`!ccv&G93s=e?w1V z3e|{v%uhRgH@#rj+WJ_JmL%k}e7@6c8Ddv((BNsOJ>+^G+H70#Zfheg+usF8U4P!H z3uKnJ2ikT*BUE}Lg;+=WadUbq)W=)L3RcSN@6wQd^wS|vIB;m8UUm#3E{n_S1{vB& zwBV9BmKEt#3Xm!m;HI1`fu7=}d&l4f>ClTtJbdR2N?vWa4PCV$^OjI~Ndi*d^ag`N z39J_oDw2+Lf~;T$enPrKD7h)yg8W@=`b9vb5~&)A@+w-gO0Nr@(i%;sFs}2CvkeudabsMCH#XP%yghe~sF-GH!RmL}1)Vf#G6SPpV9x|ghDo??fLMwC{ zbLyv)e$Eug%WIiA*r;Qnd7kGWIJ7`&QOv>Jv$CAk?X;=?51COnRRh3B9OuT;YPAvB zJ-gK%NUJf25M2E&AG-bjdN3vGaaMukja6@?icFh4Q_D#imHBzs=0Ibu>e`svQOo?t zfH#jDcu518`!Ug6M=QONIYiN?`QXJ3;#>9FexQ=D@JU`i7TkMI7Vc0WnKwUgsB~g) zqTotEaEyZUa8be(l)97Y(wn?G=Bvusevj;3%PCf)jCdkHZm$OGTW?H9ngakj8snpZ zKVd*Jj@W7-ZrXX9HWJZG+nmpM=0&gcBl6qPDjoTY3{xL2ep8O4HwpkUa(!HVMa1u%gcl%hzO;S%?Ao*L>%-K$eM%gPI%*9O zSj$T}0j{XCnzQ;{ElGO-Y#EqU!(UE^)SgRk{=n5%kNPv1`r)2hFVTPB)J>BnLJuhzaL$2kZdo0~7VZYSzk`sCe$Z}k*@f352DqIK% zu~WICG$&i)F`mt`WPYtHGacK6(!xXHlcewx!tB$S<22wR5pFqx`Hz90x1 zwSEL^Yfx5iG}WD^jrI6n=;{PX6hXvaF@ED_fOcIOo16T7f7C3~vTf*}z}xkT-}1(q zk-(OABe30YTH2(xKQkXQ{Vq8_dG0TS`SE{JvWL=9QD9+2Qo02t>nA>uMlE#+?t@en zVm@5269`CC83n;I``N>vdV7bZ&Qb%KhMP|}l2((o2f!E2&W)rY<48AoE*c!4<7&;{ z0+2Cy)5~`dAb{REJJd)e9LurEPhM+XBcW+$53lCbH+}!;w?)%=q_B{UUE(Rp{<;@#kGqUiI!4(Mk(`bjDILi<5 zbzwdf?*j1^hs+mcy9$C-^JV)k-+G@V3qT<^eee8sA$7YR4p4Os6_LL zRMQFB>HdrAV3bWNeSb*z^t*Y3Kj}!0(D4k=NA1<7W-A5uevd~8`=*)}Hdes@kqu$2yNf}N3=qr!0(N7@u!>8}&k zP$vQ;xw-4s4}@__pt=X_S`DEMM-Ko#?fugr;ZAi1AJ?w*C7M^%oX_BUz7?f1T34;T zg;O`?*^qWQ_=p09dbaaRi*xjzaUkLNOH9#!_^;d0YpBY7iI&15S(_j`q)``H6MY*5 zoCOR6f*QQ?(``D6Y{OJG$x;*xVw$iR`J(`3K5@dV5?Up)6rj-%Kwwv; z?Gy`DZ6~}O3keES-B3QZx2>G-zJ`h3k0~gF5pjPVFCR@zEUmAtElo^Jpw|=d9Un`X zW5InT#z+-ksFd^(E;G#Um3l$|qz3naFAqqrL`o|M@V}Y69?wRyI1b@~MW@@A0eX^z zlE_G%%#De)Bgx)e2wgX+UO9J{BhF^AdLfn^t~R%VJDlFpVJW2=iUu-V=(!vrAt_qe zN}zRE!2JXJUS=kn?R#&2bjI3GTZHK}olm~s&wJnZ{b8q8a;r8}IbRDiNq5rsJpjK7 z;Gq+GaM2$8cyP2g*T-bBV$W4TwZ59KsUS|@bjfVNTrPm0$|^K zW>%5oAWi}JW)0P>d1;;m%%Azo-XE%?l~82BUetd>qV5eqIMiGHpvr%NAH#ahiCJlm z6Oub7LhSC3U=x(S-UX#U>IkT=F3OpGQP=~c1=OHi4 zi+LVGZAl-E-|;+OeHhA#OAsRZMwq=?_&1$S{{e9|nsf-Q@If?Y*0BLQLf%z%E;}Lw z@M@)AKkKO}z^*+b9!XEE$q8uFn8#vLqnpT<**u42UzeNZRvHbP^{{9=( zriMr~FjE_%4hR7OAjK9jd$IzjL@KZx@JVm0OGbN5a40jkSB^k|A%O!`e_ZL%y!yK`Y3vk=>T2=$?flmB-c~?nPq5%tTM%yjfVwV5>g_FYY6>-s^Dm+jyJxNA5#cf z`0sJ`x#S=$A@^$a;c$3StJP4s`_rbvdwtU03Bl`1QL#K)jJ-LbT8JnXi(>mHT1MJN z%-HHVA|z}a7lvKF|1gb^a$|ux*#zW}!(8jm-bKBFh8V}1O;uB5mUG1h8UP{yUkN$t z_xl4tZZn5z2u4UIcS?Etd?b$TL#Tv+0B>Z|MFYSuLdwj8ott|Fjw4wK>S)Gl%_rYL zyHu}`6X-j)kToF;g^<~l$++xfrw+`*=SVz79eL!Qkn?J{{YC55 z?s~JSs!$`8DMA|R(Ddr_%31Bx;de+c=I3`3HNtZm5FK`HV)11!LmuHePYB+m1Fd?^ zSQZG_k35?BtYCt0{hbLZX39@d7n2%>Ivl!j)>6Zsl-VTYLu$WsZw`0i9ZzZFqwB`#1K`F&wqRV1As{&;1RxOC%s#-lxBvSP z3vf5Dp=OYfrCNw$oXCFSVt$AmF*2NfsY>eKEw*#qZjZW%x1I2 z>@8f<2w|n5;eK2YVmhsOJnW|MObJWXa{{)PL;@kQs;V0A-vbml!DkD(LN3etp7OGC zZ{<8oK$${F!aPxTH9v$)=*4WYh#YW(#7TxBKqOg+Rw|YLl6`xA-=bzGg8!m!d)Y7T zrDG!6q-Cn9&k4@m$Rl1dvWFre-bFO&=EZ|;63WE}PRPMQ2}=_G=$5_qMz`^Mb%ltB zI3cqHheg)y$fML^a5hmD38u5eq{7QbJH)}*m%4?#W$*e~BS*4$B3zM@tEK~mLQ(~d zgoKpT8I%;efjBTIkn$TEe4Mkva~o~6-{6>;J8*Cad<@KGPR<7t3|wFsv*Qo2c(M)e z7?=-m@2RR(wB_ryT4n6r^)GO>)uo2+pI*H`Rqwr<64E^YA-0k2-*IEk+-IlTu} z2{NKHqNl_e+uwcto3BTJYyZAMNON&^cHvnj#Cqt&s%PWX%6wgr>4YF7Otn;C^x^NiTkA^)(eL$ddv)Gr--;B{frJf;(Z9*jO1+?W3m zg0f(>`mWtdNVy1a$0EA_m4n=aA7YaaS6RF}_A$(S5hSiD%9Nyxxr{}R+n=z%A7Y6Q z)t@53Dgjd^#H7LrnR{iB((UrL87|Ou(8+j`2SMMEn;T|?P7LgYPKc9fLbms2F;S7N zC~L)sC`b2CX$rl-QnS}H7iRM4zPU4;qvm-l6*uo(0t787VR~Q zymqlKq9i1@M2LMq1W%njPVl`XQ8}KTjrQH7um7#i2-_p%k@_L_QPn|ea&G4nqAo5A zt4mgRbaeK%v`NUHEE9s;^qDgoWt^zcV3B*yTtdcM!Is(=5-!g%5GyeZ+*ALpz>;}T9YJ@u?y3tb^GV6Ep^Cd7%-RC}6sK%hqQ z#}miN-X0wvVe0z{jF@q9etdj{ajxj>`1p^N<9HWG$Ilu$@mNhfKIxsA=HAXRp28L` z7GKygaVl%-B+zsuZRt4H2_dbDjm6v=AKf7ma&dTwqw^L<%sn`P@fi>jq9qoAUOa{i zKmN;jvP&Z$uZdF~;eZ!$gl|}v5Jbfi zV^7ZqiB8U5M7I!f-f2R*?g$BacGz{HB&0(^`iTU)Wpqf$rXNDyy?>3>0HCc6Xl3~S zJ&f;xkWZkcKL7#OFk+4AcV7Xn`zi~4t+z~1yn^d6k{yCBS?Skq5DabXX=`-3eL|c@ zn{dKX?AGf`bTlXD{Yg0-8v+Etk;X7X6(RTC`8m8%!nX}!tkew+jCuDh=#))D$feT8 z2kIOTc_V*c=Mu(OJs|)fK3onVZ2vW^P zkk*Yi>&xAR5GPcX(9%Lg#EN-o2m8MVsD5}?C>dCz;mws&4Odb%z<8;22t1^i)AsoRH# zjeL7sjB^e=G+~>^f`VM&W|2#^;z(#7ndZLMC1f1gPn7|Y=B4w%$WER;J31d--B?o0 zh!~H~?c)dyV|5(od0f|mGRD$02?AWCOrIOvTotiz(%I}w$QXQ`228D6Zd=+Z9Z>Nd z34vl--=6eKmh7cNGICG)Y6)r~NkC^KQUC+E8VuM$y|IoA8}~0ECZ3#VN*I>upAA1) z>_~`mO|()+J%HKa*1;D+>fQvU6sZ6%ii8OugJ%{M7PNSNsu%!hCCFwWQn@(Nugyr zxIk%Auy|Q ziB{l(2E)S2ih_yUFFj2Pxs!j(%Vua(vflkWnF3~)m^*9F=q0iX7gN4hm60t4Ip$${ zbK>z;-o#7>Zj?MHDXm+-%(j+Y8{RAZyk@2!Uy$>;$ABEGNuNYEMN%++-7SM^0#s}JwMYxa3B|<=g+|1!Ux7gX-y1ovmbpM53PTM8BQc-uhg`gc2)uom zMPPPl!oGxTy-Il0*26DrO!6S1b-ebQCW@-6EIrRF%c_c^rdhVJ0Kp1|TUeUPu@Fr* zNkWVtasVLKBm_r%*qAQT9*l!`_9X-{M?wsEwX9u~4dxDzW+B=meOQ4C(F+jTD4X6b zkV$Ev<1u-;iyvaTBjEA2nSO{;Zeg7cEd!HShlw7)gcwr7TqH%$3UxK`6$7NkwZ&^@ zfh&@_jWWJY^mb<|qjF@DglzgD#>yhQC64l@6{%8=6ok31wD3HB3E7ZwuL@&|@f?tJ z=57T`gCbxQjphDU-z|oCjHG0O4zc2gXf`}QmvMBeqywvh+k<__aQlOh@v>RxwQvKZ zB~g;0kphw=!S`sL51xw*S^|rTsx*$WDOKQ9531RINaj_3jK(4xmeaJx!0peD5W|W7 zW@Y5><#A0}Dv@Y%sEG4+ZyA-wqZ*|@up+3#lIvf5!eMKIe#q7S9f>3Z6P%Y38XF0S zEi+a;U01p)(ZlEv%bOc=`>KM$ga5U2I|lbfp`CxwMIv8D-c)|TC}UaMtALM9^|ES~ zYmi_;85$yqOh=+xG%$`{ah&-JB4oTikULPwLlKx&gQWoyB;xh0j;67)2nxoweMQc_ z5?NTZ$bvM=b?S?DYt_|M#LH4Ex1S#&)`f~MYwgk)hbF*KS>1*h)r)#Rlk*I~&km0c zPh_{~Kkvf@4=N#_%2hIoqQ>`^;U1Du3peefhMJ--*N8Lppe+IMnn|}#Gzr23^)fc9Gp9Gi;3Uik`2F^>0RguzK z#(PFwJP{%}Y1-Hg05?1uhLE;1APHV{U3ZoS|M|m@KYsHZ{{IV@y!DWpQ1ja zNNAle{HWwxmb@oIluP61XgHfRN9t~ZvYrs?eun*!zkd(!zeGY9-PmLW3~msTQ1s>$ z6#|%OP&W-!kS9WZ+NG(QMIH+bl_8`*V;=pp;}g0eI;plsbNiYn>7!4Sba_ zD(X7U+jZzim6uH@xAjEGW;i3`=xMffEdcn`vmQaQ(twJAs}<_(=g*%f3@3;lbrW|%b z_T^Yav{?cuN07Zj;f!9HpQ}%%oNu zp*r?Iuy?X$$1?3Ux``z7p&j3V=Gu|BCkU~>XSi)JrNr-GK zTsg{wE-L^;T#_kfw?nZmX5m{?Ve6Ypp7tvTd2K7VpX2h3D0!wz+0^_uXx2vQmoKPy z=G!joqqpAr4RkKXj{!Ns)T!EgDcikP*?tLtT_0yWr%pL3lnvt2!0_C7~IVu^iTmYbJjK9*;jK#N91VXKn_87t<<)>29b2Tb0 zCUh~Dk@KTEHBHR{Kt1U4^|{OTaEWn5t6V%5cPI@hIY`BpZj4Zl%#?opP7Y94xGZ#h z*Ura}AAioZ04ZDx2A!qgTwQX4u*x_A0YTJ~oOA^MtztkBaMF)KknZ%b(grPVl&-mo z!TWWFk+J|FSbB1RnjW=eDxKi-+X2QEsLTNbq9!7KS4tRaLCDXf7#9Gynk0q`@x)q~ z@Qg;b*{H&eEG`LU_$qt`qe;ghjG^LNCRwhdp5od0Nn{l z`y&Soh_LWAOKq&TEJRq9Tzjon4GV!@Z7q2ShzTF7Kvhd2bvF2*&e%g331~#IP+0vN z(EK_TlQwrMuyf}sfL2jqAt=eR7oVi&JrskFY$_6R7GzRXXYVYpK7z)?giR7DISc^J zZI$~G2wQ2X-d<6FP+}}M=yLL;G>{Y;tliq=VvLwL~!>IRg-BFuI`t2)(^I49KRC zE-yX0opCnbg`xw?8!M_V%DYfLi%)75D6imS=^}hQdHQDKiqeRYtESYw{Iupd6Nn`M zIJrvjJdqBk#Lt9GHa`Ky#X#u$^imi#^O4WE`NVos=^`HT2@dMVzdA8o)=tzOahZwVFpbbC^j>GaYhps6 z=?F43;VV&EGXBWp7w8&5j$iO?W5ZhdIY1ETGP#&*Wyen*b1#`HfDk#W?3F*!s^A}O z70AwZ`;SFge6wR5RDAa!8wq3jh-11Ws`?@sjPSb6$`-^|kZm|? zbd)i%b>c4;c=Wl;_WEO5GAY8_hHodn9@v8nOYEc15 zMNLqC?ek0m0R5!@;`y)SH_p6Qg<7?c;YFo*uHaE;KZq>`m`kKs1=wL0$A_vyyb5En zg|jezt2n9rxuUI|BmEl@FEl340KG)@Psb9oOQ&?{(d=E1hb&hDq6_jjmEv||C{dW4 zo>vgm7ZMp?d>2jsXh)>Z!n07qn*DZ{1uq{U9SaA*doHV;p0|)+-YPkEVv1hLMji;V3qV+i$ySTBvw496q093R9c)8 zBK}Mj@*>Ux?1)KOoXj;m%W)^?A6=K@F?Re3lwzuuysA@aYI!xL0+&Dc<>hq;l)OSb z`$u_B_NF(~ZUcli_QQ7{S-`B5hY&1FumrG#K`jVF%K1p3dDpMF#XiV5RaIn0p22wBvtw5@=Zp(Zx%5sRIL5rA@7=XMper@B(;`mRU0<^S{ z;lY7{F!U*nt4(5Lb})biD1n@XjO2q|FAFD3S*3tkf)xXp17jn==7@EGAaECK5(9@3 zM`CNqfdLBw2!XK)2HKy3N*lKbrNK1+K;Nu3bridm?6BC7uYvE(?3>lFAC2D33aydx zP{aab>{#dQPXidlVgRb-D3Qan8>9v@k{{B|x$j2~gd~Q-xKbE;FCe1efI3wOLLmT9 z72wbT|JWHoM+Y36d|1)c*na)x%YXgiFb=JI1f+?+{74MoAgP-lHiV%7xF{qB-UG-$ z;+kOL2%WJ+xvU}bG0Mzi)SxNnh_N!sK{S<{6p$c~Au<)HF>*f;%t>1Gep5!qUedDn zOAi@h^Tv&|2Tn7cZhDBmx5%HR)9DuVY@}bL8yvCq;zhdYAbLTdcNo}gwpe)Lpt-Su z(*qg0q=f>6GYZo{1M=y12lP8QfED^* zJkznafH3g3iylDh2C0SMDQ_S++vH6Gk|r-Y2u<>P9zsKT3)(ZQ0o0v*8&Qog93d-Q zGd@1`k3NS!jX?-QLm(jL*YsSBG9s&x*BOCOPXTZc66BL60hy$NMnI;?%K)rGu7K33 zLxocZ0;mBaAV0N%kUX+OAon_fpaPQa075zh5(FZu5r!lfyrd1EOMnAoiju^p=>!A7LZRhaUvk3kY@3teIu)8Xr$b%e`E;bxQ70{&KzPAI5exPy>u(f@!1z`cXpz)LDkZ^%OjNa<%N+V#fXhKzJ z*E_tU0Xq&b60$##LweWwTh2QI-gSJ|{@~&v0U2}77Z6c?NJ=1kZyh_dqpRAWaI}8B zqf=C=-Gk78nCA)zqDmHDJIERDys(`7*akn62Q|Oc5pWbwu;)B}Q}}Kn=o~odO=@8u z1Q1d?2jIylG7u1yg>hKKw_;d_Kv2|a#KB7!)%N5=0fI^=>mf`I+Le937eM0Lr48Y? z$F9WScN}I%AEi%bc;9ck?Y!;Tk3>@6Xo3Jej*Ia~H30ASDeyXeo`%3UAH%f(ey-MgKO zKah{q6cM}fwLxtl5S*=p2>9WMWRY6tTdEtXDL4%S6$w2K5RiQ^IYcWb%CVk!VBWIl z$6;N{+)92l5%1+FIDCL<7+a$ zx+$N`0{4N`q;zfhvoVv&_2*?WYwOo*8L8iTAal!m(#~4)ATlG9>zOC)e|pKyls3A( zFb{6MgaU-0$7Qd7PpStX;N8|o@veMLpHj;rFLO6*JMv~;#v>g-ejcfAS*z8ZgMZK6 zS{kXzzj^vUP}Fi$Bb)B*aCN6t)k{l}@1qy=JCTuBnTvvT1#<3|y&nhVp$lOE>2Zc| zu_vV!)#akIm79&^Woq^DT{*h;4iJ68sIDg?ltZd|&ay0-tm#w2*vmaPa4&1wnS-j` zXCR2pjBQ!Ae17IYv{UWW2h@XIhQk2D>eJJcW%8~-Hl=jkYzOAw~H+d>*nH^IYdvV zJrR(h*kIhXEGue52uYorj@ipup_bA2am8IAAbsKukWHD~pOGm*)&a!ZI(R%+S{kn2 z?GJ$b`1l8XW5gpMdTKTDO3&+eMus<|3(&(p#PdAP!VniL6S^L?-1wk5&@+(Xl?kh~ z*iUj90XeIkA>vbMyHbvNTT=Gpp)^)!SsE+St$7|UI3VS4A*InGmyHR)9sAVq# z)Us2CmyJf_EAb6nAs;t9GFn8R^*&_z;_TI{mp}jP`mN|tZyUoEefIKafBEQ>Pd@s4 ze;7baP1DTYMlU!Afhafw68b2GO6U0I$|0aKMvfG!!c|q(sk_630BUDkfPXe2df?%- z8jY~Nq56$hGgd(u2Ey=S&ZSU?<3PuQ3?fA^&|Pp)+#IYvfC$dco%LxP+kbn3&NFX#hFE6nW!+5$#H}w;u2V{R6iwZvw>;Qm5UwaXJ*{71_t4d>T zGzTpJcw1F#7q*8_&|uyn*bU)8qDYLd`%#lX4#&kH5+(kGAWCWvv;HDo5Id>_1Q%__ zOvD;uixESR*OIBWgv`L6N!u!rt#j7>0~0t%hD`1Lg35@%h2@)>dYnogU71P#5=cBxRg(!xp0t%cYui8=$2Ujfh!%@O zAasK+hYv!G!M()@GHCKvgh5?Q10hokp_G_XC>Sb@#Z#f1HbV!00%xeb^%HpM6#7AW zzB|3)sG*JSC%-)YqX)$E^CJl%$G0pP_p(?ObkOfG6h z2#k{b^jpF7D~JDXgXunrAZZqI((q7!UI>iGv)hH?yckzfO`OHzVIEbCz&J~;!= zU2Pu2iV;H2*UK$nK>_9jt-!!`x$Z;K{)P~e-Sa6Ldf=9B=XNxtNYWp|x)3Tb zo1sr6MF*`_)qrv#{fk`4Yb=KYgQuL~dmnu*~2Hg&+31JvdCOD8YR*0gQJ}Z2vC18wq1tMd-oyizukzo!{ zLI%AXV(Sp%3jU(TagGxw?{Gw~+z?U8pcK-p2|?)Uz^YjZq7>D(hsU^#{lg3Hsv`hEs+^Z_M zzdHBabE@cs;w2VMQy3M`G)Ici7R|P)B+%ew6Cem#7lCN8WU*LG4pa>jJ{!7)4r16e zMc>Drs@Bq4$pMVu(n=OvI1y<@Xk!Dw5ReZaViaJv?f3n6Q0i<$R_3z65Z$GL#VLc>9_UBNyeRtQh(Rvj;^EA z3mEGF0LXM0XC-~H1B0nt2L!XMzT~(X=S3A;)n!dM3~qbc5JQDU?FFZ}_XQ127-F@Q zQRYX6==2$@x0bP~Dnlz2jh=%Bh;9YsU*8r;$lNfra(P*D3nLvjyQe2S&m8Hv%>l=Q_ata|h4b@&t6RTTq&{B^&KttO8_~^T5gW5$(YwQ z+jVo+MkQytmX*y~?7+5c@))ShN@p2kc7|CNbMsl2se~2*`McYNQpvK7N*A8;10K?D zZ#o^cXQp#JVuiNJJ7;Zn=%Fcb7!URR^#q2RY^fdaHVC1Q^-IhFkh6s2o+*oW6kXqB z1sy;XpTlsYhZKu7R_{5OQC@t>>cz5i1a*k{J#Fw-)fuYQl!l>YULO`%vB8{S*=gO5_ zSP9oEb7@2jDW9`W(pD25kj2W>gqNJy0J3dmY5wzINkE#F49ori5L(qcrfzN?mjHyf z5g_)CkDY?&w0Aw{Y|%N^6BI<0byhg;v%L}T?0WEm5nqacSTz3$eVy^m36QCW(8ZpL zpgxWvoChs6Ssga`=}TTT&RJua;zw0}YE(IH_)|lrjny)+4Ffi$_yI5YzURPwra+df z0m4b}RzM&>5r`YQ-4j{LHoI0%?lvoPuG{UpZXPfyYhlG*L$JU|1PKd6LaA%zD>V5T zb`gkt6OdnBi6JJcT79Qb-~z}#D?koS=S;`p2_Sa_5R2@H9{}P2*3w8US%^P~_WOM4 zcvlCE5kkpzF(f{YA%lTx^cr-v(=w+2TwHBfUD}C&6qmsxMnINe9|I9k20oT7UQi7~ z>kSN}z>)y+3oThPq-!-dLIEW2h8r6jSqVVoSs?DlMl*W>h;3v3U8ph~NFo~zk(IO@J(thXB-WyN?yEa0Vy>NQbpId0*czDQ(UU+nb|4J8pAF zPu%DsLj(0$_5a!F_dNnKFusFoG>TOx0s=&_R^%Z^MzwKTMo=$DKw3U8QYb%9#eF^; zIP4T2lwu7v{&VfJ-n$-1ZVuBYed&g@WDyW7TN0M6xm-5_5=wcoWPO>l?6ljIb52yj33LMzv2D}sS(sQtRPCyXUJ6?j=G0u70 zb6A14z33S2!@!P}fSG+poyR6;Fz9z?fXoDSQq=EwP;+QKDjMH9yy0^IX#q&0%jD{8w6zMoGwv}JI+oG8fzHm_~gYO+x9}b=%%tlu=;4LJZG$o2Z++ z%(XJI1rLFjbXhnK>bAuY<*`OFd)qoeD|9P^AqO&mV3d?00|sBeWF;0$ZIK`<`$14D zZF+qV$z3T3*7fIoPw{Y^`|bTcxd^@mL0ebWgT7Zo%2Df&bo^=)mcF;%C-Hd9Jz)+{v{1hB_zeySH#jb1Iaqn(o_ zR%`62&{8`pHA7WQEsHjesGtBH*?Tu@O8izuyVHhGUjH}R94Qiae8q^czA4Mr0!^sc5CKbz&C=SR`@0(Pl~~*8_Q%G6gbQTpSmwv`Kk4(&Lv3y=2nJ zFc}%?kqS*48=(J;K;HX>ii6IyO=!O?+~O$@wk= zdH557pdNlL5a~@bF-p?ZE=?>F$<^v3+20-r6=|bJC_>ehv3=T$ghVHq)*{DiR6-bk z6bL@1-VmczhnBCXdKE2G%hX`0!Y4kWv6@)lq6W4pkOi?!LPJ@LC|! zc($h#D?uiog2=^p8fJJ%<8|%rf$WzMw^2Am+8)iTVjg-5?D&LvpWt|TOgE&=y(o}%1CGI4L%hK6{;O7i9%ne+KF?BG``U9`^d>7 zr%~29ci7(UK4*-#m76@|S|H1)BpPw!J3egGMWE1@eWV$2RCUpzQW}qljd5j%JLu?f z#`zIU)APhG1Hnu_{N$U3`P=WZ9^8BVm|m8pSC=jVk>=bSO}VvgTVo*G%)&&c8M86n zmH^`BWZ4aE8+9*Cz8EH@yG-OCZjObr^tK+d=Yw|-sKjDYVn(86TA;0 zm1X9>y2QIahoFTW~dAmZiY|K6Hkoxkmp1-bkF%18f!ckdoPeLAybA(z1P zgT~*0AtKatDJQWo`4w9L0So|XR;U=6$>s+-87CkV8v31)vxfqEp<|ut7lFL-q`t^sJa3nZC!E7~IwL|k1@ije?mrY4`}fCJ zJOqMloaP;Acmza4+j8m#JfwjJnS5$#K=xQ`km7ZaD;oaN>2mD)CF}mDzj=zcBkylL zT75GA0`q&jS{8`>?$yfIAARt_C!aKDUlBoqC%bNC>>;x4<~K0YSr^IqED+Li;f5Ol z3A3&MVpG}oGLSExZGHNIkUx1dE52g#XmOS(*jX{`lF}2cOWjKKxU1nY=>CR1uYwb6zAMumQx;_b5*pJs&wds_n&CG79P# z#2NrHTm}?6)9k4PkdL2z@%Yn^zFt{*wYvJ`$@>p(?I9rY+ZT@>eFbSsGqcxqrN3F( z36L<9;0J`lW_Fr@%#DGNhDxwBi6LVk-K#+UvbBXvk$gd~Wlbfa6gqm)(E&uc10egT z?K!A#^$RFA9s(9y4ItufqLy5ZiTuU)YUPN z(Tu@3R;0KLC$gZF{G3_wIwccQ3stC)>FH|V zm{^%Seh455eV>hENC`l=!&%|%j^9y4^&X>{_mSl2v?gYl|oB*=6h6DY%X1=nGj`7cORAkJ# zYgnl z7zo2trAW0DkBO3|8Bqo{Z;`q%gr!qf!~>Ltr7K-Jbm)SF#D>HR@Bl2(g-75WcnZ$G z94F$afs$kBFU7XbiK^yHe2(>d%wom}BpktyAqprC0>V;p8wh`tTR}}9;Z2QQaF8vl zxf-uYf)yNHK-?1$5)f8Kt0(N8gDRsQtTO7PkQF(Hx`5dKi9pCv%?3udK4SfDHK>Fv z;wHnTX&T0q_&S){^%+=WF=V#>Rw9sT>!u!-)O{en{BR50o~RGRK0^q^^Q^lPMG=fO zCr~$s=ww^;0%7-idwq}*F)gmR$wprg#d?5rs(;8*aV%`M_JPQVa|i-a2&7wVw&sY} zAb=F}#6Z+QZ^+RAVf|#t`jz_~i~k(5HgCfFLxN>WK=i5f{*cM8_yUq%*c`BTqeojv z0fkMLf2R3(8VU$Y=Z~^LlErMEB9M_jTf-bljt9($FRKcHz4jrv3cF5^FIRd zc&KoCzDOcL9>KE62}m42JUh3Jgxu_F2hS|#5H-@peQjS63Ax3tyIEchnA9;9DSh(- zeuP9uQM9_+rdM3UL}GU2;g|Nm4Vm>1~y>j+F5KREP)hbv4j0_ z=wV@Dt9Bxs2uWdc+<_q6D-6DYwJ%_6ZR-n|!($h(n!xU_U;f4BgCq?R*uPy?Y4!S& zj)6H&4&?Tx=SWD$W~e1gb+1+N{q}ksmv^U3gy6cjN_X`3^l>qY`>*E)31MNjqfM9V z&*|!H43DetO^m>AV*~t<&b~qBMu?YnBgR~zC4^9z{<_A-7+aY*pUq<{6uE*;%&Iw%mqS_!0M{XjQO3Iab6mO`UW9Gin(82%viNy zAO^#e=<{w4u28|_BPyRw#_D5x81+L#^ME{l*fhU=XhS_k=*uUSA<6ncp{cP=xwOUS{XID0*xVWbv{e#=4MrLehV~Ljs9U zUV*4wf$+Yc7KrfhM}e08m_eR>WXr<%Kp;sHX0$-?r#d6?MO+0!17^#5yE%~Y{AVC? zUm*MpY3(-5kKr>!0)&m9AwS9kurg(5h{(kZ0XsvkBX+-O%MuV;LdWgsvm0s?`lsVPh?G3rC6t(_!5^`R3Ufk_8|vwE}x64Ip}nv(EgWzYsG z+5tJMLKA*Tcy5gE4>9G&v6=)%AukWYu>=JNFb(Oq=i#S%r7oaVO6xgw&kooOg@9iz zd}W&kMF7_#2aS!uDLiOW>xeZqg`E;d>3l7=J%*;-*R?>F;VQByUzbJEOs)jZ1$AX5 zEPdwNMUb8Dd?QsWSJ$ zJJZMocMfvHHhsc3w!_zDdDH_fqs_`ii3R{IXUy6*5XJGasWRQcA%U^zqo2X>C) zsEb_+#fx~*42BHm3LRV!QK59umeFIuWUw|vhd72V!6anPs2+baB~CKo_nKhyj1KOR}u+uP`0v`Hba8K7vA+E+5KFrEc)eQlhrTxZ6 zR#N}zyd3~yZ85|^gr8pce*;qS_GZf0Pf!JwW>~S3uNszq{ib2je#35_J@by z9GDG9m<-M;fjk$%Cl@>3CP12p>Ao!x1_3XZ3rz-GElQq?0Qr90d=yA=Wa%sKj}F7q zJihYFqAF&;mc-Ec^5*&K9;xYmf3hJEZzq+I@h}a@?Y6UGBXe{T#6W;h69ym*1Kohy z3;-s#kWbPX(^E^N3Z!&o`Fexc)vKuMGStCLR%G&o03Jqk1Jx!#ylF!T(c3#qqynUT zWbNJexSfTO>k^uhBN(ylVOFhVYX9SGpOn7wK_q??`#P?4w&03XisE*H*qa{a4pcJtn z8Uy1#0VKhTx z2w7K=>YXv&Q^zw466DoTzpB@Iez(0t8rSiDQPn03bg8z1_l*E3q$^f zuJtl72p9oTAs_@)nUER=2`CC2kkILpnpKWvGIBL+&kH8_BzL{)-vm#Mn8SuH)BgOsx9J8FSKnpb*HVkvZ>aS$m`Zx zUs}Bqy36cst+nCNiulf@`-iT-_zrIPFe3ymVf*MdJREx^=0jTDSlh&{wrpJl8kiZt zS`lkwDqXD17+YbA`t~26gOQmFcJ2wGam#oWh}^dQ(CThsQ5QzL z-zHBQ{{R!e|I0US^t-Pi7hkY5FNC;#x^=?!ihGFHRz%ZU$C}l_b#mPeMw{3+FO2G> z?yoH`txUL@h_`DXM7ELs^s}GLq%4S#gAkNj9#Vd7S#4CyY;tX6Aln9PGu^DUEXFpM z8rOwPkhHfiX(-lF`wX(y84-dK0}@T0Kkt=D9WqGvXb#v3Mbfejz@lacXV|`Z$z!IqJ zCkt_ZV+Ftx%PR{o)epg-P1h)$!GpEUbW)GpvMRS$RMTA{0F%Ff>#QHIhQI-PBm^SF z(c>bE+bOk7n`jm4u{LckJEnDCP04%twSgTITV4mMxK$IqD_%z5J3;_fC4Yt~`o{tno_9=|xPZQH?g-IACfLXBf|KJKa^}0eKR8(kv@M=U zZHyo0@IxhGAYz@FRm6g93^Orkor%n=>_(YY&vlQygAfh-D0G)MoRqTmsmJIIuZ>}G z&$DLGxyhqOWeLj0!I-?tSZKhB=}9^vd{;;&c=QA&h_ey0r%@Ie-EisZL`G#_MLG1W zk$F#&K8%bJ!3bR%qE(KR!T6pK#?x=ylzrD8cVg&->;nWEB0eIR_>@qL2{h!!?+DgW zSi;lrWIplG=dl&!6(J~qvDWEA9E>82e2fV3C?OR2jzAoYhOi+NBV}MMs_cloSA<9y zYyE7ls2^YyReKo4*ufluAO|DFcU&Z`#{p(~fZZ3uKK_rp?c#BSJX#IeZ@})@z`Ngf z4VL?d+l5FxirqZKdsT@1?vu~nIVEM?haG6gFgFjum?aqB7s5FE_6dBw!n}|J48IPx z-;glbfFI}|enklJKmO`{xG3x7QP%l)ayS-p+z8?bxuXg14lzXT@5W;xFP=Y#%jD*Z z`s-i>VB~+n2rTXkLE@L+K7-r*&DRj`xM75LFuH$)@6agrj)dHN{q>h{f4~b;*5yKo zD$BCn2&NJQkK)#DkayG$Mcs#=nOQ>}d4Ld-v_MO$M)lNX{ML@-bucGHAfW;FTgY0o z5pvY%;9z~&G_G3EN|!wtKWYadsH*ck`Uqx-kC?LNh1539b6>6Mj1yFq(T%})Mu<|P z{`EUYHRLE|9W}JFd7d*l)hk43sBL6@G9yF%37Gi3@<4s%0On`_s~iji3={7}A&|38q}?Vd-wb!0}!%sgZV z+r0|jGtnNlTVP6n=AI!lLS||Rz6T?BVPp?;Yj#V7PZjd0H3W0nV^Rf~o04&s`54v_ zHButd0Amluc|zcMrgBDb7>wkS2;p(ykxFF(UIkT|fcPVF2MAyTh{yv>!laRTLotyF zeuI$NQ5F(`AVlU29MCXe7+~Apgxd|+F^Ov8ydfCL9^)AyXO6O#c(svBx7`GX>7u6|jY|hei>38Q=$I%hfeGuXtIo&g*N_WO z13DH$kELQxwNY?h2*F{rKTopQqi*YCyhCe()F#LUqs~3}6 zjwl3}6QXVPo*4?W2H)4q^UGeRE!tP=+(ymUfF5W)#pAOs_bBTtn&tSIemaA;j$wgb|m^7D4p=tz$bO{@O+4zae4Wbc{Z{sAHbK z6oPmKLR`<}2M~S1LCD&0SsGDx>#m`1H9}rn)IrE_W$JEZ8=is?Hj=6^aEt~wRBDP? zvp?=qmSAWAAr(`aJ@1D1v?@j>9aG%o!&S+KvRp2vHwZbJ7|suS9~dDXDlO*qqF7Z$ z&$H!*uR0Heph_=_BrY14>H`~>izRf=9c}pBl=ablAzT#;TeXI(#mH6XYN|nQ1BR}T zfH`#RgKeP?BTMlmLQ+@Mp+@?xMQqvgxJ%)P>fHvIs$8YZA|LFi0k$H%?)+0z*5ghL zNz5<_rSw86h-f^*8XPtPlS(zTl9;ImF;+E=l(&>HLC&oqKe{6Xvj*cZmOfCaXnX)d zlMFzOA2!vN*@Q8`1i%=%WMYT`fJv+bj~n7Q0{(|WH3WLAU;_j&-5Im2X&g+91kVV0 z+*3&p6GOV2BjMdNcKeu;m+Poo5WJs0kq6AwH;0KK-odD&Y=Ou%x`!Q@TNpimV)(~{ zkS)NYcVN5TY+Y>+S(YS@f2L{P0tT+*V)Ga9HQ_LfqN!?qusrX(X*r5dtS$ zD|9goskbZyzBVK$mu@zSBX!Cs3ob`^io;7`45Be(^FrWq1VJu^RZ;qug}|km2_nS9 z4~-5=(GcY-n~xBev;H0s5>fw;k1R0td%A}*5!9TEjHxNzhG#gHk4F7+NdB$EZ13&ja^DP#;8x|Kqq z(Em`s%Tbk(_}V+{XCnkgJX=rC_wIC`E@px&4?zNMP@#nCjVNTpJkiL!<~T*A+aXZo zTDQ4383P$7Aae->nKA}R zGRJ5y&`bMQJD<)1$w-qVYjp@bvU@vJ#1q2>NE;;^vC>XK;F0s~vmi3)0rejf62LN+#(>R&cVij^XA*hZv8HL|%HM%_YMOKwh~8fk!^_ybUBvsMb!0K$ZU1B!sM$ zQDVZc>I7MB6)s?kqZ^bEQ$Hk$xxK~7KL~;j`)NL4-eeSHG02vtEL-rDrg!Cl29Y6Y z3$>=f#cmZtp3^u4?1T%1U#B4Gj=cX$2O)H{HjTTaOB!|iO0V`yHi$z|m>8K`K~`Jz z*?DAlUa7k9MXfX!Cj8Qef^2rNw^QU=2a#3xWDRo(g3I~)9akp-WWwk{8s-)x{GNJ; z7jO4I+;V?ZNQgt3P`5*{EaAboo5KLI`=rerg4hRPq|?tKWLhXihAoSz-!ve1?0w83 zSfgn4S`c3-gc9?uizOYyPLW?R%vnhGE%bam8jqg29fD^y={AZpZ?Ys83q78iY;EfS?M|BRfGh@G_UC1g zb??mX*$>Uef=cyUd9hOwi>+zg%0lFk>Mu#GP<7-%*>V85yq+h!)3?l=)M zUnEu4drySu%X^c*jvXBTIP!X7I)kV8>bcx*y|Ul$=j9or{J6B^l3}BoBh?N1 zt9^qm(qrJcEB#KFEHc=9ulg(xWDiAsoK|E$eo}pH_IQTB>@C?yk ze9&ac!W?gALf5vD^?Dgt@O%Ew=Kj>_ zO>74y+j#Hz!c+H`KF#cV5Td?V%ZixE7NgmS^KdhwG~BWUQijH6nvbY8W>vycdX1ph zqq9^x{>eEVpNXKHjnHqIUz#Dnqi0Ber3HcRSysQ>agi-SfShq(23J;p)s+TM7~J6K zr7IfkU=0Bwm%GV(+OyX*40&evc*y6wFxzpybGR_oI80V%uicPZg@hE()JY=_&xzFR z)QY1K-IDa=JU)qrxbDeVW(b19u_=Q_JUUOU^9DZ|BB?e*nlETh(lNBZ;3b&lm#nr6 zyj%rVNB7+BAn0EN?u95#fxFV&7lQ!|RtE;J?TWMs^uq~x5>iSn>tk*0Pi=lVv3gS{ z$-;Fq-__>trqg)7LFgNA?*mgcNlk%Wj~ zHj9R%go2QqE$Ei9Fkd7maWsSX7bB$G?_$EO1(r8JJsJ$IK7fe25Bp;fvYgt}HQj|LYoFf|;(X-udAOS-bH4{g;eWM_`x8=CbO^mhZDNg> zUW1TE68^w8`37Bn=U-00Pr8H#cB%uRhc6>QOVRGm)%qH}mElQSux#K+$qDs~Q8X{_z zHI7qgcNm)NhL3a3&s#K3>6ma3LboDC2!`?4ibph$sdX&~X`~3Updn!%^6SPIY6wsc zA)05k2ikX1wN*j{uG^NPz7i!1e*J^nx2)CSgcx;BIj3P}gcQ;}-`!Aj4BngeEZ-3> ze`kRZKDXBvME$^n2j`JaDZQ1L%ML?ZdZNY)G5TW^X zqjd(6Z;j(TV&aSl8dB0s9Z*Xi6+i8j%~3nhG_BojN_gRdrG7`F!ljv6*KP=*?s&k!OrWbXkZ`r@c2-+M7| z0HTow*;ft~rx-T3Z%cWWr5}|EP5YD~PygoF1y@o6Ewua29N5a8**Mogqg8ITpQ_4ax5aWzDqeeq4nORplYL5Ny2IGsriREA`R@A?7bn)*U!R^HfBQT2&7NOQ zga(uVsXNZA+Cu z*T<*LW3?V8zFG6qcP6EzNZoOkul{h_#5Gd?j)c|QzkT~vIrtxKcWA>f5JLeJnVsZP zOP20v3$s9FDd3lQh3*iAhX&sQ9U;SYfmTtXD1mMK;E_+hb?zm0wD|oPu>RF>-_(tOV={cR5ofFR&WGK(>9ql*aQ&^`|7krmK=~9 zzE}Jwn4&B+(lrB(`22bx$yqI=jt&87kcSmd$Y@N~mjiHg$=)uz!j18PPf7EDL4FL7 z?;xH-^6|!i#LM30#2`6+Bk}KmKteEw_@^w|CFKYJI6)D9UYZlwJYy%K&XXIijOzhz zuDcdc;6^~GvPZ7S(JpCprWz~1X=$!4u+*?BR&Lk@+3pI8i2Ceg9#Je(dpyIo6x7dyNFsWU2^M(!qFa4!F$RI8R4F81K5n#3W(33H$Gc0l@`T)6@VFgfUA5jFD05 zBa8^65s)0M^&&c_MTlb*!Hl=+Qw|Y8T~bxO8{)A%C}L+;h>ek?#p3(tA)v#H>5k<^ zn2fC8W>uk>AfPxDx0rtMb%c1-PFr0B6UPghW%U`8nPU9AUq?SFsel4+o$*Kvki>As zT$dwkJ|A#wJ^>F}cPvX)hjulh7{{KNF?Nk~F{VroHOt>IB3=Z77u64xO^Yf6jFPnw zYhH*8)ldWiKH_YEApi+9XvoN6?AYjr#X;^_ZYv}=3kY3&x<4FH4kOmKv@W#; zgVBQP%0xUg!xx*`2pg^~QvwYxJnizt6$rk2Isn>`T^ZSmfMr<`=u#)bjL68Y&11us zPr8WbIeOo7+vP4bfJea({^G6Qbg`_+*kF1X+3uVbB98u4orgP*i~5$E)BTKVa~Z|O zxb4npVcjlwF~mymyED2Ej%08I5Nx>wH>{&csFatY;%teBubvcMj z42_vIO7LE$RBu-RtNZ?Y+}r$-g7d_0!-y6dW@Mqf&xM{>GqUQ)t|^Qw1138Cr7nBt<`c&eqU zs^vqzn3rGdZ)JBw)Y2YP;JGzR>xCcJ^dWOIpd6j+-Rog`l+{k%lBBzwsaHRa(;nOpJM5>qO^kx&{z@%+d3;;QbF;HfWwmQjb5f!b4r^WwdO^Z|G9D zotCuhcN7=0roL;Gy5c=+&5AH`(%=}N>p@cod7hx=Ys)$~$__6obGP5`-#j3aPn**1 zZL@yg402|$>2>b;8c?4ZQZZ+ga?{ZYxvn8B04_rMdS%zYZ_75CN}Fc_cQM zgSO=VuE{f2^ilUIYUzWb#!NrphOrgy||^s6Hazo4yjHXFQO zQ`J*{Q>B!H3<=~$Hu`7=KQ4I}nq;M({4fv^B^)j7Lz2_EB_YH#Ow+1sJtocQJbOplT{AlSKTSMw!3Ob=ev4#e|xi0NF` zvmoeJ^y8hkJ^Z5l7YGPGJB?_wb5VYCygYmxiOrgIKnQX`*8*LDa9r(ULQ6Y*PSYP7 zr4{k}A0F!u3tC$0aKH7$bk@_Al;Pi-5Lrtiy%JUL*}y4 z*;?N@^zPrgkXG71bd8KmNYfjLn0}=P;$fG5Om_z2$yUJ%f8TZPblYXWwFe8>YV7j@ z;qkf^wF-N|-@49W^}jx}(zE%&F9+vUx40~vd=S(9brrAcAtH`CcyVF|6v1A`8|7CI zcJN(2XfN0D{DPiISC(#5LKv@^e%0tXOTDHSyeM_WhDz}lTHjswf-^n3tG#*4gG!*TFn)sLmbB==mQtw^)rZX{Ni%R{ij)U-IOdc~AEnor^t> zO0Q7*w0?XnJ$AIy5TQlWf1(AAZfkkyK^XCG8+g5^?K-8$PgbJu%MnKHmu*P4I77aB zf8#v_FY1ycJw^+fBn`th5IrN^Z%g}SNm>bODu+06S&GDl6tzl{WCh!LN2)a6Nmq?7 zMFrKTJX+Kun$z~Rt+X$e2awYR?O3Z6)$pzwZd&{1Rp}Y#(TwDIQWbsbE$Q7EwcLA@ z4nL}LIbY5<(|#*mY3oC+#}N+Cck;T<9=>&L1)h_mJZ+Y|@?|@n*HkC1YtBrXONm4EyG^)X(dz2moFPz*g471s}aJt z)R($f=ta$PK7Gj%yzoP1QPTAupOYlB`$?DYz{2FIW`0`@6$d;j<#QVlhH1J-nu`$zj4#AERtcrQE(WyQ*|C=Cshxz z;$qu%QYitu3SssX_7~!el5(k0Tva%bmhFg;jLqJ(9h)P>o>hn~7>Zz|d}Lqw*s)PY z;XvGW(g6mkdcCgjG_$&@$i_uQ$k@bCcxN`n!wvLs`;p?Zw|3*=%56gryv(i&)Rqy- ztX2dH{K&BR_G$<|6CT*HUBTrMLlqIjju1y&q4c;|&y@QY_4qhLzygt7>^!RQvW#%% zQANfa8S7#Ofh|NGT(C)FdsQKhLNX}@R-(NcQ5Zl?Kpznfrd+QG*eF(P*-6QWL+#l* zm|V5pjLQQ&!bU*CuFJCQI_}L+>X=dtxZ%cL*;tF=gX12;$$K>cF#>mrp~J*6MIaTA zvKAT`W>j(@VFba9&CELigpFxa3Wg|+1yI6AFvj#^n>cpIw3+b{CNzcx4URR)gqMgS zjwcEH*Z^+nfk@kCB;r^yu@LK{v9XC89Mj1x(8MSuD1nY)hz(;H7^~rtV+2U}M4dqc z5jG8;Ppp9L__}#S6BFg*zaF}|+DW|f2~IK$>KHr#8H2~d8j$>@>2sinm8hz60QGx_7jT8T@< zWKd#xN+ul+k+>uUIycOa>13x@GBQFt;>bK1OT`b|2$Ud@u>#4+EEvLcehKG~hU2sX zaUiGGSQppbM0wi$p+ChofsPOnfW8(rcG8p)y{2q1pi`15OlhV;t_3)8FakJ<0K(VZ zu;~&c$RxwZ=>-l5EC(VP0Z2oD4=fG^-pvC-SPmv%9V~}&l9d=}Rv?IT)tL41ND5)Q zf?^ep#}@%*=?vo-+kFm;h2s$6Nor5NY?$F#4hC`aWrTUu?(YKV)BM>2|AXElX-UBI%fzz(P&?7DWH+{zAk0^+=^q=csanP&hp zwqk25VJ4N55wcg~mj{7`#R1cXttgNQFl1u}PO!nIOE?60!dB_6{Pnj*7h!oUN*`FV z(=dGshA_^g>h!@bwqugQrtFf|a<^y`%qC5pi#oq3^%;{8&aegK3gDDDDdMon8O6FS z6JnGsp*&!27?YsF4?VHHcG;3%5eUy+DSL@qrRLgmYFZQ^spgy z%)yt|8@*D57t@ZE$9F&`LU>Vyk$gB`L4zdY_(1x^z&^N@a6l^#GBk7KrKj@`^SndMF2GD|415J;%g(X9f+ca~j=ESFPuWv`SV8N9C5Z0b!(@ z9Wc#~x&f5q2tNBE#SV~*PoL2n2LvzZQFB#E^y5zb-3LU7aj#xX56)m6^7EH19fG=Uh0S!ZG^EQ9U%U=#p3<<|dO$cjrxMiK z>b@R`KW*H%`1WvJg|*X$c6qmGMF=vaFE*b4zCzAA6%F2zoqX`gN<9M36Fm^iCF|>F;I~fLlr}g*nkU_M_PN-w%3f(45Iuqn z+MqZ6cj`ROSyX&c>x(bub_U|T1M%qD*`LYGkj`A5?L=Sg_&Dd)f0-4}m+gedN-{rcU8mvc7XRC+xSh?HccMd8{Jpo% zYFiMHLN{D|jZZ$DOJ247^>-Wuqgd8n-yQI1Vv5lLqH(g-uMVT(ft>%S96*g6yKnqzZG-kLnxIz2b*^wAi%5C z%)D2P5c+aYl@4%I`i@d@ea$Lod$iOmtYs$|5{U)#8NKNq!s^LzSUr3b7S#O%dJjqv z&(2n-=$*MYm05X~>_b;-^mT>3}DA@$=$t9FT9%J->Os^yg4a4`$40 z5i+DtYbD)pl`5FN-o?2cMe{L_&(;Rb@}Te;fBbATo(^zajcIDZ(ux7KG02cSS}3)3 z5T$dLklD1Ny@z56G9+BA&JJ#MWUWsh+=Hw_Tx%HuvhjP5oOdce2_H?RI>-%L+ACHh z`_R&sw~V8=7kTdI@5IgJ^24SUXGn*Z15vBgRJ`bgh|osXWOdoMwqlpeAO*H&-h2ZUgz$7-dKKcP z4z6_RQWOh&TMZR`DX9>Ug|$@iDGU#8^b_tn1SEciwr!sMVni|m64=KOpv$O?Eepx0 zz++hvsI15WZ@2;@P<#%F6|e)^29tLMf?URfO+AnWjAU1kc|j%^cpR2NMkcb$6Ov(W z44GtH0RRBOtYPLLwvLV1499FDLynL7g6)pLH5)^YjWFmS3GfIw%P20#MA;!h23H6% zn8;ujfGacW&|`B>U}wfZM`1w7n4oH~IRGQZ25_ARDQ6D?0yMZmBR8rBoO!;>>&k9p~*R9^rlY`hu8=1$p;28iH8sYDPUmM!e{(rC5pL;cFYDn z*1#TNMU)XE6R}7l8vEnO*iB-MFcHGV;$&imv;dDI<-lNT@UeyG1X$%)cCYFo!pe9``VxW|5_GAWXZHCAqhIxjC$9KOUX4J zOgdv?8k7+^k{byTOh$uap^4ei-*{;1o12J_`BOc(rXmPW&?L&&VNVzAI?Sm-)=p?{ z!u&tAvx!PvmAGytZl;!!YvOa-Pe_8OBIW*9EbMrTEGUr5^Hzux(A1|L7zLUFaV};$ zmMr*l2n?v@nKjw3*P-+n5Cc@D^DJU}z~t zJ1`0?4FT-iAr1f_2td)o#{F+>V$PyTF}xS4e~d(w-H&0(j3&$tSqIkk0iiCo-SqvCT7M#14aQe6999SUnert*{%Qp N002ovPDHLkV1f$s#LfT! diff --git a/fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg b/fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg deleted file mode 100644 index 96cd21d52..000000000 --- a/fn/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg +++ /dev/null @@ -1 +0,0 @@ -DockerHost:Frontend,Backend &CreditCardAppTiersareIsolatedbutcanstillcommunicateinsideinterfaceoranyotherDockerhostsusingtheparentVLANID802.1QTrunk -canbeasingleEthernetlinkorMultipleBondedEthernetlinksInterfaceeth0Container(s)Eth010.1.20.0/24Parent:eth0.20VLANID:20CreditCardsBackendContainer(s)Eth010.1.30.0/24Container(s)Eth010.1.10.0/24FrontendGateway10.1.20.1andothercontainersonthesameVLAN/subnetGateway10.1.10.1andothercontainersonthesameVLAN/subnetGateway10.1.30.1andothercontainersonthesameVLAN/subnet:Parenteth0.10VLANID:10Parent:eth0.30VLAN:30NetworkotherDockerHosts \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/experimental/vlan-networks.md b/fn/vendor/github.com/docker/docker/experimental/vlan-networks.md deleted file mode 100644 index 13eb5981d..000000000 --- a/fn/vendor/github.com/docker/docker/experimental/vlan-networks.md +++ /dev/null @@ -1,475 +0,0 @@ -# Ipvlan Network Driver - -### Getting Started - -The Ipvlan driver is currently in experimental mode in order to incubate Docker users use cases and vet the implementation to ensure a hardened, production ready driver in a future release. Libnetwork now gives users total control over both IPv4 and IPv6 addressing. The VLAN driver builds on top of that in giving operators complete control of layer 2 VLAN tagging and even Ipvlan L3 routing for users interested in underlay network integration. For overlay deployments that abstract away physical constraints see the [multi-host overlay ](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) driver. - -Ipvlan is a new twist on the tried and true network virtualization technique. The Linux implementations are extremely lightweight because rather than using the traditional Linux bridge for isolation, they are simply associated to a Linux Ethernet interface or sub-interface to enforce separation between networks and connectivity to the physical network. - -Ipvlan offers a number of unique features and plenty of room for further innovations with the various modes. Two high level advantages of these approaches are, the positive performance implications of bypassing the Linux bridge and the simplicity of having fewer moving parts. Removing the bridge that traditionally resides in between the Docker host NIC and container interface leaves a simple setup consisting of container interfaces, attached directly to the Docker host interface. This result is easy access for external facing services as there is no need for port mappings in these scenarios. - -### Pre-Requisites - -- The examples on this page are all single host and setup using Docker experimental builds that can be installed with the following instructions: [Install Docker experimental](https://github.com/docker/docker/tree/master/experimental) - -- All of the examples can be performed on a single host running Docker. Any examples using a sub-interface like `eth0.10` can be replaced with `eth0` or any other valid parent interface on the Docker host. Sub-interfaces with a `.` are created on the fly. `-o parent` interfaces can also be left out of the `docker network create` all together and the driver will create a `dummy` interface that will enable local host connectivity to perform the examples. - -- Kernel requirements: - - - To check your current kernel version, use `uname -r` to display your kernel version - - Ipvlan Linux kernel v4.2+ (support for earlier kernels exists but is buggy) - -### Ipvlan L2 Mode Example Usage - -An example of the ipvlan `L2` mode topology is shown in the following image. The driver is specified with `-d driver_name` option. In this case `-d ipvlan`. - -![Simple Ipvlan L2 Mode Example](images/ipvlan_l2_simple.png) - -The parent interface in the next example `-o parent=eth0` is configured as followed: - -``` -$ ip addr show eth0 -3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 -``` - -Use the network from the host's interface as the `--subnet` in the `docker network create`. The container will be attached to the same network as the host interface as set via the `-o parent=` option. - -Create the ipvlan network and run a container attaching to it: - -``` -# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) -$ docker network create -d ipvlan \ - --subnet=192.168.1.0/24 \ - --gateway=192.168.1.1 \ - -o ipvlan_mode=l2 \ - -o parent=eth0 db_net - -# Start a container on the db_net network -$ docker run --net=db_net -it --rm alpine /bin/sh - -# NOTE: the containers can NOT ping the underlying host interfaces as -# they are intentionally filtered by Linux for additional isolation. -``` - -The default mode for Ipvlan is `l2`. If `-o ipvlan_mode=` are left unspecified, the default mode will be used. Similarly, if the `--gateway` is left empty, the first usable address on the network will be set as the gateway. For example, if the subnet provided in the network create is `--subnet=192.168.1.0/24` then the gateway the container receives is `192.168.1.1`. - -To help understand how this mode interacts with other hosts, the following figure shows the same layer 2 segment between two Docker hosts that applies to and Ipvlan L2 mode. - -![Multiple Ipvlan Hosts](images/macvlan-bridge-ipvlan-l2.png) - -The following will create the exact same network as the network `db_net` created prior, with the driver defaults for `--gateway=192.168.1.1` and `-o ipvlan_mode=l2`. - -``` -# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) -$ docker network create -d ipvlan \ - --subnet=192.168.1.0/24 \ - -o parent=eth0 db_net_ipv - -# Start a container with an explicit name in daemon mode -$ docker run --net=db_net_ipv --name=ipv1 -itd alpine /bin/sh - -# Start a second container and ping using the container name -# to see the docker included name resolution functionality -$ docker run --net=db_net_ipv --name=ipv2 -it --rm alpine /bin/sh -$ ping -c 4 ipv1 - -# NOTE: the containers can NOT ping the underlying host interfaces as -# they are intentionally filtered by Linux for additional isolation. -``` - -The drivers also support the `--internal` flag that will completely isolate containers on a network from any communications external to that network. Since network isolation is tightly coupled to the network's parent interface the result of leaving the `-o parent=` option off of a `docker network create` is the exact same as the `--internal` option. If the parent interface is not specified or the `--internal` flag is used, a netlink type `dummy` parent interface is created for the user and used as the parent interface effectively isolating the network completely. - -The following two `docker network create` examples result in identical networks that you can attach container to: - -``` -# Empty '-o parent=' creates an isolated network -$ docker network create -d ipvlan \ - --subnet=192.168.10.0/24 isolated1 - -# Explicit '--internal' flag is the same: -$ docker network create -d ipvlan \ - --subnet=192.168.11.0/24 --internal isolated2 - -# Even the '--subnet=' can be left empty and the default -# IPAM subnet of 172.18.0.0/16 will be assigned -$ docker network create -d ipvlan isolated3 - -$ docker run --net=isolated1 --name=cid1 -it --rm alpine /bin/sh -$ docker run --net=isolated2 --name=cid2 -it --rm alpine /bin/sh -$ docker run --net=isolated3 --name=cid3 -it --rm alpine /bin/sh - -# To attach to any use `docker exec` and start a shell -$ docker exec -it cid1 /bin/sh -$ docker exec -it cid2 /bin/sh -$ docker exec -it cid3 /bin/sh -``` - -### Ipvlan 802.1q Trunk L2 Mode Example Usage - -Architecturally, Ipvlan L2 mode trunking is the same as Macvlan with regard to gateways and L2 path isolation. There are nuances that can be advantageous for CAM table pressure in ToR switches, one MAC per port and MAC exhaustion on a host's parent NIC to name a few. The 802.1q trunk scenario looks the same. Both modes adhere to tagging standards and have seamless integration with the physical network for underlay integration and hardware vendor plugin integrations. - -Hosts on the same VLAN are typically on the same subnet and almost always are grouped together based on their security policy. In most scenarios, a multi-tier application is tiered into different subnets because the security profile of each process requires some form of isolation. For example, hosting your credit card processing on the same virtual network as the frontend webserver would be a regulatory compliance issue, along with circumventing the long standing best practice of layered defense in depth architectures. VLANs or the equivocal VNI (Virtual Network Identifier) when using the Overlay driver, are the first step in isolating tenant traffic. - -![Docker VLANs in Depth](images/vlans-deeper-look.png) - -The Linux sub-interface tagged with a vlan can either already exist or will be created when you call a `docker network create`. `docker network rm` will delete the sub-interface. Parent interfaces such as `eth0` are not deleted, only sub-interfaces with a netlink parent index > 0. - -For the driver to add/delete the vlan sub-interfaces the format needs to be `interface_name.vlan_tag`. Other sub-interface naming can be used as the specified parent, but the link will not be deleted automatically when `docker network rm` is invoked. - -The option to use either existing parent vlan sub-interfaces or let Docker manage them enables the user to either completely manage the Linux interfaces and networking or let Docker create and delete the Vlan parent sub-interfaces (netlink `ip link`) with no effort from the user. - -For example: use `eth0.10` to denote a sub-interface of `eth0` tagged with the vlan id of `10`. The equivalent `ip link` command would be `ip link add link eth0 name eth0.10 type vlan id 10`. - -The example creates the vlan tagged networks and then start two containers to test connectivity between containers. Different Vlans cannot ping one another without a router routing between the two networks. The default namespace is not reachable per ipvlan design in order to isolate container namespaces from the underlying host. - -**Vlan ID 20** - -In the first network tagged and isolated by the Docker host, `eth0.20` is the parent interface tagged with vlan id `20` specified with `-o parent=eth0.20`. Other naming formats can be used, but the links need to be added and deleted manually using `ip link` or Linux configuration files. As long as the `-o parent` exists anything can be used if compliant with Linux netlink. - -``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -$ docker network create -d ipvlan \ - --subnet=192.168.20.0/24 \ - --gateway=192.168.20.1 \ - -o parent=eth0.20 ipvlan20 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -$ docker run --net=ipvlan20 -it --name ivlan_test1 --rm alpine /bin/sh -$ docker run --net=ipvlan20 -it --name ivlan_test2 --rm alpine /bin/sh -``` - -**Vlan ID 30** - -In the second network, tagged and isolated by the Docker host, `eth0.30` is the parent interface tagged with vlan id `30` specified with `-o parent=eth0.30`. The `ipvlan_mode=` defaults to l2 mode `ipvlan_mode=l2`. It can also be explicitly set with the same result as shown in the next example. - -``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged. -$ docker network create -d ipvlan \ - --subnet=192.168.30.0/24 \ - --gateway=192.168.30.1 \ - -o parent=eth0.30 \ - -o ipvlan_mode=l2 ipvlan30 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -$ docker run --net=ipvlan30 -it --name ivlan_test3 --rm alpine /bin/sh -$ docker run --net=ipvlan30 -it --name ivlan_test4 --rm alpine /bin/sh -``` - -The gateway is set inside of the container as the default gateway. That gateway would typically be an external router on the network. - -``` -$$ ip route - default via 192.168.30.1 dev eth0 - 192.168.30.0/24 dev eth0 src 192.168.30.2 -``` - -Example: Multi-Subnet Ipvlan L2 Mode starting two containers on the same subnet and pinging one another. In order for the `192.168.114.0/24` to reach `192.168.116.0/24` it requires an external router in L2 mode. L3 mode can route between subnets that share a common `-o parent=`. - -Secondary addresses on network routers are common as an address space becomes exhausted to add another secondary to an L3 vlan interface or commonly referred to as a "switched virtual interface" (SVI). - -``` -$ docker network create -d ipvlan \ - --subnet=192.168.114.0/24 --subnet=192.168.116.0/24 \ - --gateway=192.168.114.254 --gateway=192.168.116.254 \ - -o parent=eth0.114 \ - -o ipvlan_mode=l2 ipvlan114 - -$ docker run --net=ipvlan114 --ip=192.168.114.10 -it --rm alpine /bin/sh -$ docker run --net=ipvlan114 --ip=192.168.114.11 -it --rm alpine /bin/sh -``` - -A key takeaway is, operators have the ability to map their physical network into their virtual network for integrating containers into their environment with no operational overhauls required. NetOps simply drops an 802.1q trunk into the Docker host. That virtual link would be the `-o parent=` passed in the network creation. For untagged (non-VLAN) links, it is as simple as `-o parent=eth0` or for 802.1q trunks with VLAN IDs each network gets mapped to the corresponding VLAN/Subnet from the network. - -An example being, NetOps provides VLAN ID and the associated subnets for VLANs being passed on the Ethernet link to the Docker host server. Those values are simply plugged into the `docker network create` commands when provisioning the Docker networks. These are persistent configurations that are applied every time the Docker engine starts which alleviates having to manage often complex configuration files. The network interfaces can also be managed manually by being pre-created and docker networking will never modify them, simply use them as parent interfaces. Example mappings from NetOps to Docker network commands are as follows: - -- VLAN: 10, Subnet: 172.16.80.0/24, Gateway: 172.16.80.1 - - - `--subnet=172.16.80.0/24 --gateway=172.16.80.1 -o parent=eth0.10` - -- VLAN: 20, IP subnet: 172.16.50.0/22, Gateway: 172.16.50.1 - - - `--subnet=172.16.50.0/22 --gateway=172.16.50.1 -o parent=eth0.20 ` - -- VLAN: 30, Subnet: 10.1.100.0/16, Gateway: 10.1.100.1 - - - `--subnet=10.1.100.0/16 --gateway=10.1.100.1 -o parent=eth0.30` - -### IPVlan L3 Mode Example - -IPVlan will require routes to be distributed to each endpoint. The driver only builds the Ipvlan L3 mode port and attaches the container to the interface. Route distribution throughout a cluster is beyond the initial implementation of this single host scoped driver. In L3 mode, the Docker host is very similar to a router starting new networks in the container. They are on networks that the upstream network will not know about without route distribution. For those curious how Ipvlan L3 will fit into container networking see the following examples. - -![Docker Ipvlan L2 Mode](images/ipvlan-l3.png) - -Ipvlan L3 mode drops all broadcast and multicast traffic. This reason alone makes Ipvlan L3 mode a prime candidate for those looking for massive scale and predictable network integrations. It is predictable and in turn will lead to greater uptimes because there is no bridging involved. Bridging loops have been responsible for high profile outages that can be hard to pinpoint depending on the size of the failure domain. This is due to the cascading nature of BPDUs (Bridge Port Data Units) that are flooded throughout a broadcast domain (VLAN) to find and block topology loops. Eliminating bridging domains, or at the least, keeping them isolated to a pair of ToRs (top of rack switches) will reduce hard to troubleshoot bridging instabilities. Ipvlan L2 modes is well suited for isolated VLANs only trunked into a pair of ToRs that can provide a loop-free non-blocking fabric. The next step further is to route at the edge via Ipvlan L3 mode that reduces a failure domain to a local host only. - -- L3 mode needs to be on a separate subnet as the default namespace since it requires a netlink route in the default namespace pointing to the Ipvlan parent interface. - -- The parent interface used in this example is `eth0` and it is on the subnet `192.168.1.0/24`. Notice the `docker network` is **not** on the same subnet as `eth0`. - -- Unlike ipvlan l2 modes, different subnets/networks can ping one another as long as they share the same parent interface `-o parent=`. - -``` -$$ ip a show eth0 -3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - link/ether 00:50:56:39:45:2e brd ff:ff:ff:ff:ff:ff - inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 -``` - -- A traditional gateway doesn't mean much to an L3 mode Ipvlan interface since there is no broadcast traffic allowed. Because of that, the container default gateway simply points to the containers `eth0` device. See below for CLI output of `ip route` or `ip -6 route` from inside an L3 container for details. - -The mode ` -o ipvlan_mode=l3` must be explicitly specified since the default ipvlan mode is `l2`. - -The following example does not specify a parent interface. The network drivers will create a dummy type link for the user rather then rejecting the network creation and isolating containers from only communicating with one another. - -``` -# Create the Ipvlan L3 network -$ docker network create -d ipvlan \ - --subnet=192.168.214.0/24 \ - --subnet=10.1.214.0/24 \ - -o ipvlan_mode=l3 ipnet210 - -# Test 192.168.214.0/24 connectivity -$ docker run --net=ipnet210 --ip=192.168.214.10 -itd alpine /bin/sh -$ docker run --net=ipnet210 --ip=10.1.214.10 -itd alpine /bin/sh - -# Test L3 connectivity from 10.1.214.0/24 to 192.168.212.0/24 -$ docker run --net=ipnet210 --ip=192.168.214.9 -it --rm alpine ping -c 2 10.1.214.10 - -# Test L3 connectivity from 192.168.212.0/24 to 10.1.214.0/24 -$ docker run --net=ipnet210 --ip=10.1.214.9 -it --rm alpine ping -c 2 192.168.214.10 - -``` - -Notice there is no `--gateway=` option in the network create. The field is ignored if one is specified `l3` mode. Take a look at the container routing table from inside of the container: - -``` -# Inside an L3 mode container -$$ ip route - default dev eth0 - 192.168.214.0/24 dev eth0 src 192.168.214.10 -``` - -In order to ping the containers from a remote Docker host or the container be able to ping a remote host, the remote host or the physical network in between need to have a route pointing to the host IP address of the container's Docker host eth interface. More on this as we evolve the Ipvlan `L3` story. - -### Dual Stack IPv4 IPv6 Ipvlan L2 Mode - -- Not only does Libnetwork give you complete control over IPv4 addressing, but it also gives you total control over IPv6 addressing as well as feature parity between the two address families. - -- The next example will start with IPv6 only. Start two containers on the same VLAN `139` and ping one another. Since the IPv4 subnet is not specified, the default IPAM will provision a default IPv4 subnet. That subnet is isolated unless the upstream network is explicitly routing it on VLAN `139`. - -``` -# Create a v6 network -$ docker network create -d ipvlan \ - --subnet=2001:db8:abc2::/64 --gateway=2001:db8:abc2::22 \ - -o parent=eth0.139 v6ipvlan139 - -# Start a container on the network -$ docker run --net=v6ipvlan139 -it --rm alpine /bin/sh - -``` - -View the container eth0 interface and v6 routing table: - -``` -# Inside the IPv6 container -$$ ip a show eth0 -75: eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 172.18.0.2/16 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc2::1/64 scope link nodad - valid_lft forever preferred_lft forever - -$$ ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc2::/64 dev eth0 proto kernel metric 256 -default via 2001:db8:abc2::22 dev eth0 metric 1024 -``` - -Start a second container and ping the first container's v6 address. - -``` -# Test L2 connectivity over IPv6 -$ docker run --net=v6ipvlan139 -it --rm alpine /bin/sh - -# Inside the second IPv6 container -$$ ip a show eth0 -75: eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 172.18.0.3/16 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link tentative dadfailed - valid_lft forever preferred_lft forever - inet6 2001:db8:abc2::2/64 scope link nodad - valid_lft forever preferred_lft forever - -$$ ping6 2001:db8:abc2::1 -PING 2001:db8:abc2::1 (2001:db8:abc2::1): 56 data bytes -64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=0 ttl=64 time=0.044 ms -64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=1 ttl=64 time=0.058 ms - -2 packets transmitted, 2 packets received, 0% packet loss -round-trip min/avg/max/stddev = 0.044/0.051/0.058/0.000 ms -``` - -The next example with setup a dual stack IPv4/IPv6 network with an example VLAN ID of `140`. - -Next create a network with two IPv4 subnets and one IPv6 subnets, all of which have explicit gateways: - -``` -$ docker network create -d ipvlan \ - --subnet=192.168.140.0/24 --subnet=192.168.142.0/24 \ - --gateway=192.168.140.1 --gateway=192.168.142.1 \ - --subnet=2001:db8:abc9::/64 --gateway=2001:db8:abc9::22 \ - -o parent=eth0.140 \ - -o ipvlan_mode=l2 ipvlan140 -``` - -Start a container and view eth0 and both v4 & v6 routing tables: - -``` -$ docker run --net=ipvlan140 --ip6=2001:db8:abc2::51 -it --rm alpine /bin/sh - -$ ip a show eth0 -78: eth0@if77: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 192.168.140.2/24 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc9::1/64 scope link nodad - valid_lft forever preferred_lft forever - -$$ ip route -default via 192.168.140.1 dev eth0 -192.168.140.0/24 dev eth0 proto kernel scope link src 192.168.140.2 - -$$ ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc9::/64 dev eth0 proto kernel metric 256 -default via 2001:db8:abc9::22 dev eth0 metric 1024 -``` - -Start a second container with a specific `--ip4` address and ping the first host using IPv4 packets: - -``` -$ docker run --net=ipvlan140 --ip=192.168.140.10 -it --rm alpine /bin/sh -``` - -**Note**: Different subnets on the same parent interface in Ipvlan `L2` mode cannot ping one another. That requires a router to proxy-arp the requests with a secondary subnet. However, Ipvlan `L3` will route the unicast traffic between disparate subnets as long as they share the same `-o parent` parent link. - -### Dual Stack IPv4 IPv6 Ipvlan L3 Mode - -**Example:** IpVlan L3 Mode Dual Stack IPv4/IPv6, Multi-Subnet w/ 802.1q Vlan Tag:118 - -As in all of the examples, a tagged VLAN interface does not have to be used. The sub-interfaces can be swapped with `eth0`, `eth1`, `bond0` or any other valid interface on the host other then the `lo` loopback. - -The primary difference you will see is that L3 mode does not create a default route with a next-hop but rather sets a default route pointing to `dev eth` only since ARP/Broadcasts/Multicast are all filtered by Linux as per the design. Since the parent interface is essentially acting as a router, the parent interface IP and subnet needs to be different from the container networks. That is the opposite of bridge and L2 modes, which need to be on the same subnet (broadcast domain) in order to forward broadcast and multicast packets. - -``` -# Create an IPv6+IPv4 Dual Stack Ipvlan L3 network -# Gateways for both v4 and v6 are set to a dev e.g. 'default dev eth0' -$ docker network create -d ipvlan \ - --subnet=192.168.110.0/24 \ - --subnet=192.168.112.0/24 \ - --subnet=2001:db8:abc6::/64 \ - -o parent=eth0 \ - -o ipvlan_mode=l3 ipnet110 - - -# Start a few of containers on the network (ipnet110) -# in separate terminals and check connectivity -$ docker run --net=ipnet110 -it --rm alpine /bin/sh -# Start a second container specifying the v6 address -$ docker run --net=ipnet110 --ip6=2001:db8:abc6::10 -it --rm alpine /bin/sh -# Start a third specifying the IPv4 address -$ docker run --net=ipnet110 --ip=192.168.112.30 -it --rm alpine /bin/sh -# Start a 4th specifying both the IPv4 and IPv6 addresses -$ docker run --net=ipnet110 --ip6=2001:db8:abc6::50 --ip=192.168.112.50 -it --rm alpine /bin/sh -``` - -Interface and routing table outputs are as follows: - -``` -$$ ip a show eth0 -63: eth0@if59: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 192.168.112.2/24 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc6::10/64 scope link nodad - valid_lft forever preferred_lft forever - -# Note the default route is simply the eth device because ARPs are filtered. -$$ ip route - default dev eth0 scope link - 192.168.112.0/24 dev eth0 proto kernel scope link src 192.168.112.2 - -$$ ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc6::/64 dev eth0 proto kernel metric 256 -default dev eth0 metric 1024 -``` - -*Note:* There may be a bug when specifying `--ip6=` addresses when you delete a container with a specified v6 address and then start a new container with the same v6 address it throws the following like the address isn't properly being released to the v6 pool. It will fail to unmount the container and be left dead. - -``` -docker: Error response from daemon: Address already in use. -``` - -### Manually Creating 802.1q Links - -**Vlan ID 40** - -If a user does not want the driver to create the vlan sub-interface it simply needs to exist prior to the `docker network create`. If you have sub-interface naming that is not `interface.vlan_id` it is honored in the `-o parent=` option again as long as the interface exists and is up. - -Links, when manually created, can be named anything as long as they exist when the network is created. Manually created links do not get deleted regardless of the name when the network is deleted with `docker network rm`. - -``` -# create a new sub-interface tied to dot1q vlan 40 -$ ip link add link eth0 name eth0.40 type vlan id 40 - -# enable the new sub-interface -$ ip link set eth0.40 up - -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -$ docker network create -d ipvlan \ - --subnet=192.168.40.0/24 \ - --gateway=192.168.40.1 \ - -o parent=eth0.40 ipvlan40 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -$ docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh -$ docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh -``` - -**Example:** Vlan sub-interface manually created with any name: - -``` -# create a new sub interface tied to dot1q vlan 40 -$ ip link add link eth0 name foo type vlan id 40 - -# enable the new sub-interface -$ ip link set foo up - -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -$ docker network create -d ipvlan \ - --subnet=192.168.40.0/24 --gateway=192.168.40.1 \ - -o parent=foo ipvlan40 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -$ docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh -$ docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh -``` - -Manually created links can be cleaned up with: - -``` -$ ip link del foo -``` - -As with all of the Libnetwork drivers, they can be mixed and matched, even as far as running 3rd party ecosystem drivers in parallel for maximum flexibility to the Docker user. diff --git a/fn/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh b/fn/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh index 3d211be6f..a3d86b857 100644 --- a/fn/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh +++ b/fn/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh @@ -13,7 +13,7 @@ SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016" # - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64" # - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind # - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP -# - Consider cross builing the Windows binary and copy across. That's a bit of a heavy lift. Only reason +# - Consider cross building the Windows binary and copy across. That's a bit of a heavy lift. Only reason # for doing that is that it mirrors the actual release process for docker.exe which is cross-built. # However, should absolutely not be a problem if built natively, so nit-picking. # - Tidy up of images and containers. Either here, or in the teardown script. @@ -116,7 +116,7 @@ fi # Get the commit has and verify we have something if [ $ec -eq 0 ]; then export COMMITHASH=$(git rev-parse --short HEAD) - echo INFO: Commmit hash is $COMMITHASH + echo INFO: Commit hash is $COMMITHASH if [ -z $COMMITHASH ]; then echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?" ec=1 diff --git a/fn/vendor/github.com/docker/docker/hack/README.md b/fn/vendor/github.com/docker/docker/hack/README.md new file mode 100644 index 000000000..326e35e16 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/hack/README.md @@ -0,0 +1,68 @@ +## About + +This directory contains a collection of scripts used to build and manage this +repository. If there are any issues regarding the intention of a particular +script (or even part of a certain script), please reach out to us. +It may help us either refine our current scripts, or add on new ones +that are appropriate for a given use case. + +## DinD (dind.sh) + +DinD is a wrapper script which allows Docker to be run inside a Docker +container. DinD requires the container to +be run with privileged mode enabled. + +## Generate Authors (generate-authors.sh) + +Generates AUTHORS; a file with all the names and corresponding emails of +individual contributors. AUTHORS can be found in the home directory of +this repository. + +## Install (install.sh) + +Executable install script for installing Docker. If updates to this are +desired, please use hack/release.sh during a normal release. The following +one-liner may be used for script hotfixes: + +- `aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index` + +## Make + +There are two make files, each with different extensions. Neither are supposed +to be called directly; only invoke `make`. Both scripts run inside a Docker +container. + +### make.ps1 + +- The Windows native build script that uses PowerShell semantics; it is limited +unlike `hack\make.sh` since it does not provide support for the full set of +operations provided by the Linux counterpart, `make.sh`. However, `make.ps1` +does provide support for local Windows development and Windows to Windows CI. +More information is found within `make.ps1` by the author, @jhowardmsft + +### make.sh + +- Referenced via `make test` when running tests on a local machine, +or directly referenced when running tests inside a Docker development container. +- When running on a local machine, `make test` to run all tests found in +`test`, `test-unit`, `test-integration-cli`, and `test-docker-py` on +your local machine. The default timeout is set in `make.sh` to 60 minutes +(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run +all of the tests. +- When running inside a Docker development container, `hack/make.sh` does +not have a single target that runs all the tests. You need to provide a +single command line with multiple targets that performs the same thing. +An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py` +- For more information related to testing outside the scope of this README, +refer to +[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/) + +## Release (release.sh) + +Releases any bundles built by `make` on a public AWS S3 bucket. +For information regarding configuration, please view `release.sh`. + +## Vendor (vendor.sh) + +A shell script that is a wrapper around Vndr. For information on how to use +this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md) diff --git a/fn/vendor/github.com/docker/docker/hack/dockerfile/binaries-commits b/fn/vendor/github.com/docker/docker/hack/dockerfile/binaries-commits index 4f85d9173..98344bc2f 100644 --- a/fn/vendor/github.com/docker/docker/hack/dockerfile/binaries-commits +++ b/fn/vendor/github.com/docker/docker/hack/dockerfile/binaries-commits @@ -3,9 +3,12 @@ TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a # When updating RUNC_COMMIT, also update runc in vendor.conf accordingly -RUNC_COMMIT=9c2d8d184e5da67c95d601382adf14862e4f2228 -CONTAINERD_COMMIT=9048e5e50717ea4497b757314bad98ea3763c145 +RUNC_COMMIT=2d41c047c83e09a6d61d464906feb2a2f3c52aa4 +CONTAINERD_COMMIT=3addd840653146c90a254301d6c3a663c7fd6429 TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574 LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e -VNDR_COMMIT=19220953c4a45310a4c404b7905154e29120249e -BINDATA_COMMIT=a0ff2567cfb70903282db057e799fd826784d41d +VNDR_COMMIT=c56e082291115e369f77601f9c071dd0b87c7120 + +# CLI +DOCKERCLI_REPO=https://github.com/docker/cli +DOCKERCLI_COMMIT=3dfb8343b139d6342acfd9975d7f1068b5b1c3d3 diff --git a/fn/vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh b/fn/vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh index 64f2b57da..370ec7ce4 100755 --- a/fn/vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh +++ b/fn/vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh @@ -29,8 +29,8 @@ install_runc() { install_containerd() { echo "Install containerd version $CONTAINERD_COMMIT" - git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" - cd "$GOPATH/src/github.com/docker/containerd" + git clone https://github.com/containerd/containerd.git "$GOPATH/src/github.com/containerd/containerd" + cd "$GOPATH/src/github.com/containerd/containerd" git checkout -q "$CONTAINERD_COMMIT" make $1 cp bin/containerd /usr/local/bin/docker-containerd @@ -46,12 +46,12 @@ install_proxy() { go build -ldflags="$PROXY_LDFLAGS" -o /usr/local/bin/docker-proxy github.com/docker/libnetwork/cmd/proxy } -install_bindata() { - echo "Install go-bindata version $BINDATA_COMMIT" - git clone https://github.com/jteeuwen/go-bindata "$GOPATH/src/github.com/jteeuwen/go-bindata" - cd $GOPATH/src/github.com/jteeuwen/go-bindata - git checkout -q "$BINDATA_COMMIT" - go build -o /usr/local/bin/go-bindata github.com/jteeuwen/go-bindata/go-bindata +install_dockercli() { + echo "Install docker/cli version $DOCKERCLI_COMMIT" + git clone "$DOCKERCLI_REPO" "$GOPATH/src/github.com/docker/cli" + cd "$GOPATH/src/github.com/docker/cli" + git checkout -q "$DOCKERCLI_COMMIT" + go build -o /usr/local/bin/docker github.com/docker/cli/cmd/docker } for prog in "$@" @@ -91,8 +91,10 @@ do ;; proxy) - export CGO_ENABLED=0 - install_proxy + ( + export CGO_ENABLED=0 + install_proxy + ) ;; proxy-dynamic) @@ -107,12 +109,12 @@ do go build -v -o /usr/local/bin/vndr . ;; - bindata) - install_bindata - ;; + dockercli) + install_dockercli + ;; *) - echo echo "Usage: $0 [tomlv|runc|containerd|tini|proxy]" + echo echo "Usage: $0 [tomlv|runc|runc-dynamic|containerd|containerd-dynamic|tini|proxy|proxy-dynamic|vndr|dockercli]" exit 1 esac diff --git a/fn/vendor/github.com/docker/docker/hack/install.sh b/fn/vendor/github.com/docker/docker/hack/install.sh index 1a5165d40..12a9b3e93 100644 --- a/fn/vendor/github.com/docker/docker/hack/install.sh +++ b/fn/vendor/github.com/docker/docker/hack/install.sh @@ -442,7 +442,7 @@ do_install() { else echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' echo >&2 ' package. We have no AUFS support. Consider installing the packages' - echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' + echo >&2 ' "linux-image-virtual" and "linux-image-extra-virtual" for AUFS support.' ( set -x; sleep 10 ) fi fi diff --git a/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md b/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md index 7366f7233..1cea52526 100644 --- a/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md +++ b/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md @@ -38,6 +38,8 @@ Following environment variables are known to work in this step: - `BUILDFLAGS` - `DOCKER_INCREMENTAL_BINARY` +Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`. + ### Step 2: Execute tests $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest diff --git a/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go b/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go index 442428ac8..3442b0940 100644 --- a/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go +++ b/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go @@ -83,11 +83,13 @@ func privilegedTestChunkExecutor(autoRemove bool) testChunkExecutor { } var b bytes.Buffer teeContainerStream(&b, os.Stdout, os.Stderr, stream) - rc, err := cli.ContainerWait(context.Background(), id) - if err != nil { + resultC, errC := cli.ContainerWait(context.Background(), id, "") + select { + case err := <-errC: return 0, "", err + case result := <-resultC: + return result.StatusCode, b.String(), nil } - return rc, b.String(), nil } } diff --git a/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go b/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go index 08e5ac7f1..56c03e38d 100644 --- a/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go +++ b/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go @@ -24,7 +24,7 @@ func enumerateTestsForBytes(b []byte) ([]string, error) { return tests, nil } -// enumareteTests enumerates valid `-check.f` strings for all the test functions. +// enumerateTests enumerates valid `-check.f` strings for all the test functions. // Note that we use regexp rather than parsing Go files for performance reason. // (Try `TESTFLAGS=-check.list make test-integration-cli` to see the slowness of parsing) // The files needs to be `gofmt`-ed diff --git a/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go b/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go index 40f7a1a57..6823a7668 100644 --- a/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go +++ b/fn/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go @@ -36,10 +36,10 @@ func xmain() (int, error) { // Should we use cobra maybe? replicas := flag.Int("replicas", 1, "Number of worker service replica") chunks := flag.Int("chunks", 0, "Number of test chunks executed in batch (0 == replicas)") - pushWorkerImage := flag.String("push-worker-image", "", "Push the worker image to the registry. Required for distribuetd execution. (empty == not to push)") + pushWorkerImage := flag.String("push-worker-image", "", "Push the worker image to the registry. Required for distributed execution. (empty == not to push)") shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity") // flags below are rarely used - randSeed := flag.Int64("rand-seed", int64(0), "Random seed used for shuffling (0 == curent time)") + randSeed := flag.Int64("rand-seed", int64(0), "Random seed used for shuffling (0 == current time)") filtersFile := flag.String("filters-file", "", "Path to optional file composed of `-check.f` filter strings") dryRun := flag.Bool("dry-run", false, "Dry run") keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm") @@ -188,5 +188,11 @@ func waitForContainerCompletion(cli *client.Client, stdout, stderr io.Writer, co } stdcopy.StdCopy(stdout, stderr, stream) stream.Close() - return cli.ContainerWait(context.Background(), containerID) + resultC, errC := cli.ContainerWait(context.Background(), containerID, "") + select { + case err := <-errC: + return 1, err + case result := <-resultC: + return result.StatusCode, nil + } } diff --git a/fn/vendor/github.com/docker/docker/hack/make.ps1 b/fn/vendor/github.com/docker/docker/hack/make.ps1 index 1a1b7c5db..ac3e36904 100644 --- a/fn/vendor/github.com/docker/docker/hack/make.ps1 +++ b/fn/vendor/github.com/docker/docker/hack/make.ps1 @@ -17,11 +17,12 @@ development and Windows to Windows CI. Usage Examples (run from repo root): - "hack\make.ps1 -Binary" to build the binaries - "hack\make.ps1 -Client" to build just the client 64-bit binary + "hack\make.ps1 -Client" to build docker.exe client 64-bit binary (remote repo) "hack\make.ps1 -TestUnit" to run unit tests - "hack\make.ps1 -Binary -TestUnit" to build the binaries and run unit tests + "hack\make.ps1 -Daemon -TestUnit" to build the daemon and run unit tests "hack\make.ps1 -All" to run everything this script knows about that can run in a container + "hack\make.ps1" to build the daemon binary (same as -Daemon) + "hack\make.ps1 -Binary" shortcut to -Client and -Daemon .PARAMETER Client Builds the client binaries. @@ -30,7 +31,7 @@ Builds the daemon binary. .PARAMETER Binary - Builds the client binaries and the daemon binary. A convenient shortcut to `make.ps1 -Client -Daemon`. + Builds the client and daemon binaries. A convenient shortcut to `make.ps1 -Client -Daemon`. .PARAMETER Race Use -race in go build and go test. @@ -174,7 +175,7 @@ Function Execute-Build($type, $additionalBuildTags, $directory) { if ($Race) { Write-Warning "Using race detector"; $raceParm=" -race"} if ($ForceBuildAll) { $allParm=" -a" } if ($NoOpt) { $optParm=" -gcflags "+""""+"-N -l"+"""" } - if ($addtionalBuildTags -ne "") { $buildTags += $(" " + $additionalBuildTags) } + if ($additionalBuildTags -ne "") { $buildTags += $(" " + $additionalBuildTags) } # Do the go build in the appropriate directory # Note -linkmode=internal is required to be able to debug on Windows. @@ -279,7 +280,7 @@ Function Validate-GoFormat($headCommit, $upstreamCommit) { # Get a list of all go source-code files which have changed. Ignore exit code on next call - always process regardless $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'" - $files = $files | Select-String -NotMatch "^vendor/" | Select-String -NotMatch "^cli/compose/schema/bindata.go" + $files = $files | Select-String -NotMatch "^vendor/" $badFiles=@(); $files | %{ # Deliberately ignore error on next line - treat as failed $content=Invoke-Expression "git show $headCommit`:$_" @@ -340,8 +341,8 @@ Try { # Handle the "-Binary" shortcut to build both client and daemon. if ($Binary) { $Client = $True; $Daemon = $True } - # Default to building the binaries if not asked for anything explicitly. - if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { $Client=$True; $Daemon=$True } + # Default to building the daemon if not asked for anything explicitly. + if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { $Daemon=$True } # Verify git is installed if ($(Get-Command git -ErrorAction SilentlyContinue) -eq $nil) { Throw "Git does not appear to be installed" } @@ -396,7 +397,42 @@ Try { # Perform the actual build if ($Daemon) { Execute-Build "daemon" "daemon" "dockerd" } - if ($Client) { Execute-Build "client" "" "docker" } + if ($Client) { + # Get the repo and commit of the client to build. + "hack\dockerfile\binaries-commits" | ForEach-Object { + $dockerCliRepo = ((Get-Content $_ | Select-String "DOCKERCLI_REPO") -split "=")[1] + $dockerCliCommit = ((Get-Content $_ | Select-String "DOCKERCLI_COMMIT") -split "=")[1] + } + + # Build from a temporary directory. + $tempLocation = "$env:TEMP\$(New-Guid)" + New-Item -ItemType Directory $tempLocation | Out-Null + + # Temporarily override GOPATH, then clone, checkout, and build. + $saveGOPATH = $env:GOPATH + Try { + $env:GOPATH = $tempLocation + $dockerCliRoot = "$env:GOPATH\src\github.com\docker\cli" + Write-Host "INFO: Cloning client repository..." + Invoke-Expression "git clone -q $dockerCliRepo $dockerCliRoot" + if ($LASTEXITCODE -ne 0) { Throw "Failed to clone client repository $dockerCliRepo" } + Invoke-Expression "git -C $dockerCliRoot checkout -q $dockerCliCommit" + if ($LASTEXITCODE -ne 0) { Throw "Failed to checkout client commit $dockerCliCommit" } + Write-Host "INFO: Building client..." + Push-Location "$dockerCliRoot\cmd\docker"; $global:pushed=$True + Invoke-Expression "go build -o $root\bundles\docker.exe" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client" } + Pop-Location; $global:pushed=$False + } + Catch [Exception] { + Throw $_ + } + Finally { + # Always restore GOPATH and remove the temporary directory. + $env:GOPATH = $saveGOPATH + Remove-Item -Force -Recurse $tempLocation + } + } } # Run unit tests diff --git a/fn/vendor/github.com/docker/docker/hack/make.sh b/fn/vendor/github.com/docker/docker/hack/make.sh index 42b969382..b7d59ba94 100755 --- a/fn/vendor/github.com/docker/docker/hack/make.sh +++ b/fn/vendor/github.com/docker/docker/hack/make.sh @@ -56,7 +56,6 @@ echo # List of bundles to create when no argument is passed DEFAULT_BUNDLES=( - binary-client binary-daemon dynbinary @@ -102,7 +101,7 @@ if [ "$AUTO_GOPATH" ]; then if [ "$(go env GOOS)" = 'solaris' ]; then # sys/unix is installed outside the standard library on solaris # TODO need to allow for version change, need to get version from go - export GO_VERSION=${GO_VERSION:-"1.7.1"} + export GO_VERSION=${GO_VERSION:-"1.8.1"} export GOPATH="${GOPATH}:/usr/lib/gocode/${GO_VERSION}" fi fi @@ -113,7 +112,6 @@ if [ ! "$GOPATH" ]; then exit 1 fi -DOCKER_BUILDTAGS+=" daemon" if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null ; then DOCKER_BUILDTAGS+=" journald" elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then @@ -140,7 +138,6 @@ fi # Use these flags when compiling the tests and final binary IAMSTATIC='true' -source "$SCRIPTDIR/make/.go-autogen" if [ -z "$DOCKER_DEBUG" ]; then LDFLAGS='-w' fi diff --git a/fn/vendor/github.com/docker/docker/hack/make/.binary b/fn/vendor/github.com/docker/docker/hack/make/.binary index 01028c27e..8d4265cb6 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/.binary +++ b/fn/vendor/github.com/docker/docker/hack/make/.binary @@ -1,6 +1,8 @@ #!/usr/bin/env bash set -e +GO_PACKAGE='github.com/docker/docker/cmd/dockerd' +BINARY_SHORT_NAME='dockerd' BINARY_NAME="$BINARY_SHORT_NAME-$VERSION" BINARY_EXTENSION="$(binary_extension)" BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" @@ -20,17 +22,6 @@ if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARC esac fi -if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ]; then - if [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then - export CGO_ENABLED=1 - export CC=o64-clang - export LDFLAGS='-linkmode external -s' - export LDFLAGS_STATIC_DOCKER='-extld='${CC} - else - export BUILDFLAGS=( "${BUILDFLAGS[@]/pkcs11 /}" ) # we cannot dlopen in pkcs11 in a static binary - fi -fi - echo "Building: $DEST/$BINARY_FULLNAME" go build \ -o "$DEST/$BINARY_FULLNAME" \ diff --git a/fn/vendor/github.com/docker/docker/hack/make/.binary-setup b/fn/vendor/github.com/docker/docker/hack/make/.binary-setup index b4bd66872..15de89fe1 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/.binary-setup +++ b/fn/vendor/github.com/docker/docker/hack/make/.binary-setup @@ -1,6 +1,5 @@ #!/usr/bin/env bash -DOCKER_CLIENT_BINARY_NAME='docker' DOCKER_DAEMON_BINARY_NAME='dockerd' DOCKER_RUNC_BINARY_NAME='docker-runc' DOCKER_CONTAINERD_BINARY_NAME='docker-containerd' diff --git a/fn/vendor/github.com/docker/docker/hack/make/.build-deb/rules b/fn/vendor/github.com/docker/docker/hack/make/.build-deb/rules index 6522103e5..19557ed50 100755 --- a/fn/vendor/github.com/docker/docker/hack/make/.build-deb/rules +++ b/fn/vendor/github.com/docker/docker/hack/make/.build-deb/rules @@ -15,14 +15,12 @@ override_dh_auto_build: override_dh_auto_test: ./bundles/$(VERSION)/dynbinary-daemon/dockerd -v - ./bundles/$(VERSION)/dynbinary-client/docker -v override_dh_strip: # Go has lots of problems with stripping, so just don't override_dh_auto_install: mkdir -p debian/docker-engine/usr/bin - cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-client/docker)" debian/docker-engine/usr/bin/docker cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd cp -aT /usr/local/bin/docker-proxy debian/docker-engine/usr/bin/docker-proxy cp -aT /usr/local/bin/docker-containerd debian/docker-engine/usr/bin/docker-containerd diff --git a/fn/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec b/fn/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec index a31c1bd7f..6225bb74f 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec +++ b/fn/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec @@ -74,28 +74,22 @@ Requires: device-mapper >= 1.02.90-2 # start if with_selinux %if 0%{?with_selinux} -# Version of SELinux we were using -%if 0%{?fedora} == 20 -%global selinux_policyver 3.12.1-197 -%endif # fedora 20 -%if 0%{?fedora} == 21 -%global selinux_policyver 3.13.1-105 -%endif # fedora 21 -%if 0%{?fedora} >= 22 -%global selinux_policyver 3.13.1-128 -%endif # fedora 22 -%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 -%global selinux_policyver 3.13.1-23 -%endif # centos,rhel 7 + +%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?fedora} >= 25 +Requires: container-selinux >= 2.9 +%endif# centos 7, rhel 7, fedora 25 + %if 0%{?oraclelinux} >= 7 %global selinux_policyver 3.13.1-102.0.3.el7_3.15 %endif # oraclelinux 7 -%endif # with_selinux - -# RE: rhbz#1195804 - ensure min NVR for selinux-policy -%if 0%{?with_selinux} +%if 0%{?fedora} == 24 +%global selinux_policyver 3.13.1-191 +%endif # fedora 24 -- container-selinux on fedora24 does not properly set dockerd, for now just carry docker-engine-selinux for it +%if 0%{?oraclelinux} >= 7 || 0%{?fedora} == 24 Requires: selinux-policy >= %{selinux_policyver} Requires(pre): %{name}-selinux >= %{version}-%{release} +%endif # selinux-policy for oraclelinux-7, fedora-24 + %endif # with_selinux # conflicting packages @@ -127,13 +121,11 @@ export DOCKER_GITCOMMIT=%{_gitcommit} # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here %check -./bundles/%{_origversion}/dynbinary-client/docker -v ./bundles/%{_origversion}/dynbinary-daemon/dockerd -v %install # install binary install -d $RPM_BUILD_ROOT/%{_bindir} -install -p -m 755 bundles/%{_origversion}/dynbinary-client/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd # install proxy diff --git a/fn/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup b/fn/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup index d0ec03e9e..5134e4c2d 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup +++ b/fn/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup @@ -2,6 +2,6 @@ set -e bundle .detect-daemon-osarch -if [ $DOCKER_ENGINE_GOOS != "windows" ]; then +if [ "$DOCKER_ENGINE_GOOS" != "windows" ]; then bundle .ensure-emptyfs fi diff --git a/fn/vendor/github.com/docker/docker/hack/make/.integration-daemon-start b/fn/vendor/github.com/docker/docker/hack/make/.integration-daemon-start index 664aaa300..dafd0533d 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/.integration-daemon-start +++ b/fn/vendor/github.com/docker/docker/hack/make/.integration-daemon-start @@ -3,13 +3,26 @@ # see test-integration-cli for example usage of this script base="$ABS_DEST/.." -export PATH="$base/binary-client:$base/binary-daemon:$base/dynbinary-client:$base/dynbinary-daemon:$PATH" +export PATH="$base/binary-daemon:$base/dynbinary-daemon:$PATH" -if ! command -v docker &> /dev/null; then - echo >&2 'error: binary-client or dynbinary-client must be run before .integration-daemon-start' +export TEST_CLIENT_BINARY=docker + +# Do not bump this version! Integration tests should no longer rely on the docker cli, they should be +# API tests instead. For the existing tests the scripts will use a frozen version of the docker cli +# with a DOCKER_API_VERSION frozen to 1.30, which should ensure that the CI remains green at all times. +export DOCKER_API_VERSION=1.30 +if [ -n "$DOCKER_CLI_PATH" ]; then + export TEST_CLIENT_BINARY=/usr/local/cli/$(basename "$DOCKER_CLI_PATH") +fi + +echo "Using test binary $TEST_CLIENT_BINARY" +if ! command -v "$TEST_CLIENT_BINARY" &> /dev/null; then + echo >&2 'error: missing test client $TEST_CLIENT_BINARY' false fi +export DOCKER_CLI_VERSION=$(${TEST_CLIENT_BINARY} --version | awk '{ gsub(",", " "); print $3 }') + # This is a temporary hack for split-binary mode. It can be removed once # https://github.com/docker/docker/pull/22134 is merged into docker master if [ "$(go env GOOS)" = 'windows' ]; then @@ -88,7 +101,7 @@ fi # give it a little time to come up so it's "ready" tries=60 echo "INFO: Waiting for daemon to start..." -while ! docker version &> /dev/null; do +while ! $TEST_CLIENT_BINARY version &> /dev/null; do (( tries-- )) if [ $tries -le 0 ]; then printf "\n" @@ -96,8 +109,8 @@ while ! docker version &> /dev/null; do echo >&2 "error: daemon failed to start" echo >&2 " check $DEST/docker.log for details" else - echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" - docker version >&2 || true + echo >&2 "error: daemon at $DOCKER_HOST fails to '$TEST_CLIENT_BINARY version':" + $TEST_CLIENT_BINARY version >&2 || true # Additional Windows CI debugging as this is a common error as of # January 2016 if [ "$(go env GOOS)" = 'windows' ]; then diff --git a/fn/vendor/github.com/docker/docker/hack/make/.integration-test-helpers b/fn/vendor/github.com/docker/docker/hack/make/.integration-test-helpers index 7a086d5fe..4ff9677c7 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/.integration-test-helpers +++ b/fn/vendor/github.com/docker/docker/hack/make/.integration-test-helpers @@ -60,6 +60,8 @@ test_env() { # use "env -i" to tightly control the environment variables that bleed into the tests env -i \ DEST="$DEST" \ + DOCKER_CLI_VERSION="$DOCKER_CLI_VERSION" \ + DOCKER_API_VERSION="$DOCKER_API_VERSION" \ DOCKER_INTEGRATION_DAEMON_DEST="$DOCKER_INTEGRATION_DAEMON_DEST" \ DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \ DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \ @@ -76,6 +78,7 @@ test_env() { PATH="$PATH" \ TEMP="$TEMP" \ TEST_IMAGE_NAMESPACE="$TEST_IMAGE_NAMESPACE" \ + TEST_CLIENT_BINARY="$TEST_CLIENT_BINARY" \ "$@" ) } diff --git a/fn/vendor/github.com/docker/docker/hack/make/binary b/fn/vendor/github.com/docker/docker/hack/make/binary index 9b89dbe66..eab69bb06 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/binary +++ b/fn/vendor/github.com/docker/docker/hack/make/binary @@ -3,11 +3,6 @@ set -e rm -rf "$DEST" # This script exists as backwards compatibility for CI -( - DEST="${DEST}-client" - ABS_DEST="${ABS_DEST}-client" - . hack/make/binary-client -) ( DEST="${DEST}-daemon" ABS_DEST="${ABS_DEST}-daemon" diff --git a/fn/vendor/github.com/docker/docker/hack/make/binary-client b/fn/vendor/github.com/docker/docker/hack/make/binary-client deleted file mode 100644 index 59c1bc0e0..000000000 --- a/fn/vendor/github.com/docker/docker/hack/make/binary-client +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -set -e - -[ -z "$KEEPDEST" ] && \ - rm -rf "$DEST" - -( - source "${MAKEDIR}/.binary-setup" - export BINARY_SHORT_NAME="$DOCKER_CLIENT_BINARY_NAME" - export GO_PACKAGE='github.com/docker/docker/cmd/docker' - source "${MAKEDIR}/.binary" -) diff --git a/fn/vendor/github.com/docker/docker/hack/make/binary-daemon b/fn/vendor/github.com/docker/docker/hack/make/binary-daemon index a6b322b55..736f308a8 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/binary-daemon +++ b/fn/vendor/github.com/docker/docker/hack/make/binary-daemon @@ -1,13 +1,9 @@ #!/usr/bin/env bash set -e -[ -z "$KEEPDEST" ] && \ - rm -rf "$DEST" +[ -z "$KEEPDEST" ] && rm -rf "$DEST" ( - source "${MAKEDIR}/.binary-setup" - export BINARY_SHORT_NAME="$DOCKER_DAEMON_BINARY_NAME" - export GO_PACKAGE='github.com/docker/docker/cmd/dockerd' source "${MAKEDIR}/.binary" copy_binaries "$DEST" 'hash' ) diff --git a/fn/vendor/github.com/docker/docker/hack/make/build-integration-test-binary b/fn/vendor/github.com/docker/docker/hack/make/build-integration-test-binary index 29da577f6..a842e8cce 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/build-integration-test-binary +++ b/fn/vendor/github.com/docker/docker/hack/make/build-integration-test-binary @@ -4,6 +4,8 @@ set -e rm -rf "$DEST" DEST="$ABS_DEST/../test-integration-cli" +source "$SCRIPTDIR/make/.go-autogen" + if [ -z $DOCKER_INTEGRATION_TESTS_VERIFIED ]; then source ${MAKEDIR}/.integration-test-helpers ensure_test_dir integration-cli "$DEST/test.main" diff --git a/fn/vendor/github.com/docker/docker/hack/make/build-rpm b/fn/vendor/github.com/docker/docker/hack/make/build-rpm index db0bcf1c1..1e89a78d5 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/build-rpm +++ b/fn/vendor/github.com/docker/docker/hack/make/build-rpm @@ -125,19 +125,18 @@ set -e # selinux policy referencing systemd things won't work on non-systemd versions # of centos or rhel, which we don't support anyways if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then - selinuxDir="selinux" if [ -d "./contrib/selinux-$version" ]; then selinuxDir="selinux-${version}" + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + ${rpmName}-selinux.spec + EOF fi - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux - RUN rpmbuild -ba \ - --define '_gitcommit $DOCKER_GITCOMMIT' \ - --define '_release $rpmRelease' \ - --define '_version $rpmVersion' \ - --define '_origversion $VERSION' \ - ${rpmName}-selinux.spec - EOF fi tempImage="docker-temp/build-rpm:$version" ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$tempImage" -f $DEST/$version/Dockerfile.build . ) diff --git a/fn/vendor/github.com/docker/docker/hack/make/cross b/fn/vendor/github.com/docker/docker/hack/make/cross index 94d2ddf67..85dd3c637 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/cross +++ b/fn/vendor/github.com/docker/docker/hack/make/cross @@ -1,51 +1,29 @@ #!/usr/bin/env bash set -e -# explicit list of os/arch combos that support being a daemon -declare -A daemonSupporting -daemonSupporting=( - [linux/amd64]=1 - [windows/amd64]=1 -) - # if we have our linux/amd64 version compiled, let's symlink it in if [ -x "$DEST/../binary-daemon/dockerd-$VERSION" ]; then arch=$(go env GOHOSTARCH) mkdir -p "$DEST/linux/${arch}" ( cd "$DEST/linux/${arch}" - ln -s ../../../binary-daemon/* ./ - ln -s ../../../binary-client/* ./ + ln -sf ../../../binary-daemon/* ./ ) echo "Created symlinks:" "$DEST/linux/${arch}/"* fi +DOCKER_CROSSPLATFORMS=${DOCKER_CROSSPLATFORMS:-"linux/amd64 windows/amd64"} + for platform in $DOCKER_CROSSPLATFORMS; do ( export KEEPDEST=1 export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION - mkdir -p "$DEST" - ABS_DEST="$(cd "$DEST" && pwd -P)" export GOOS=${platform%/*} export GOARCH=${platform##*/} - if [ "$GOOS" != "solaris" ]; then - # TODO. Solaris cannot be cross build because of CGO calls. - - # go install docker/docker/pkg packages to ensure that - # they build cross platform. - go install github.com/docker/docker/pkg/... - - if [ -z "${daemonSupporting[$platform]}" ]; then - # we just need a simple client for these platforms - export LDFLAGS_STATIC_DOCKER="" - # remove the "daemon" build tag from platforms that aren't supported - export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) - source "${MAKEDIR}/binary-client" - else - source "${MAKEDIR}/binary-client" - source "${MAKEDIR}/binary-daemon" - fi - fi + echo "Cross building: $DEST" + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + source "${MAKEDIR}/binary-daemon" ) done diff --git a/fn/vendor/github.com/docker/docker/hack/make/dynbinary b/fn/vendor/github.com/docker/docker/hack/make/dynbinary index 998f92acf..981e505e9 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/dynbinary +++ b/fn/vendor/github.com/docker/docker/hack/make/dynbinary @@ -2,11 +2,6 @@ set -e # This script exists as backwards compatibility for CI -( - DEST="${DEST}-client" - ABS_DEST="${ABS_DEST}-client" - . hack/make/dynbinary-client -) ( DEST="${DEST}-daemon" diff --git a/fn/vendor/github.com/docker/docker/hack/make/dynbinary-client b/fn/vendor/github.com/docker/docker/hack/make/dynbinary-client deleted file mode 100644 index 61fb28ede..000000000 --- a/fn/vendor/github.com/docker/docker/hack/make/dynbinary-client +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -set -e - -( - export BINARY_SHORT_NAME='docker' - export GO_PACKAGE='github.com/docker/docker/cmd/docker' - export IAMSTATIC='false' - export LDFLAGS_STATIC_DOCKER='' - export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary - export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here - source "${MAKEDIR}/.binary" -) diff --git a/fn/vendor/github.com/docker/docker/hack/make/dynbinary-daemon b/fn/vendor/github.com/docker/docker/hack/make/dynbinary-daemon index 1b7dbb45a..d1c0070e6 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/dynbinary-daemon +++ b/fn/vendor/github.com/docker/docker/hack/make/dynbinary-daemon @@ -2,8 +2,6 @@ set -e ( - export BINARY_SHORT_NAME='dockerd' - export GO_PACKAGE='github.com/docker/docker/cmd/dockerd' export IAMSTATIC='false' export LDFLAGS_STATIC_DOCKER='' export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary diff --git a/fn/vendor/github.com/docker/docker/hack/make/generate-index-listing b/fn/vendor/github.com/docker/docker/hack/make/generate-index-listing index 7094ad7db..9f1208403 100755 --- a/fn/vendor/github.com/docker/docker/hack/make/generate-index-listing +++ b/fn/vendor/github.com/docker/docker/hack/make/generate-index-listing @@ -40,7 +40,7 @@ create_index() { # change IFS locally within subshell so the for loop saves line correctly to L var IFS=$'\n'; - # pretty sweet, will mimick the normal apache output. skipping "index" and hidden files + # pretty sweet, will mimic the normal apache output. skipping "index" and hidden files for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' ! -name '.*' -prune -printf "%f|@_@%Td-%Tb-%TY %Tk:%TM @%f@\n"|sort|column -t -s '|' | sed 's,\([\ ]\+\)@_@,\1,g'); do # file diff --git a/fn/vendor/github.com/docker/docker/hack/make/install-binary b/fn/vendor/github.com/docker/docker/hack/make/install-binary old mode 100644 new mode 100755 index 4051c9cab..57aa1a28c --- a/fn/vendor/github.com/docker/docker/hack/make/install-binary +++ b/fn/vendor/github.com/docker/docker/hack/make/install-binary @@ -3,10 +3,6 @@ set -e rm -rf "$DEST" -( - source "${MAKEDIR}/install-binary-client" -) - ( source "${MAKEDIR}/install-binary-daemon" ) diff --git a/fn/vendor/github.com/docker/docker/hack/make/install-binary-client b/fn/vendor/github.com/docker/docker/hack/make/install-binary-client deleted file mode 100644 index 57938eb30..000000000 --- a/fn/vendor/github.com/docker/docker/hack/make/install-binary-client +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -set -e -rm -rf "$DEST" - -( - DEST="$(dirname $DEST)/binary-client" - source "${MAKEDIR}/.binary-setup" - install_binary "${DEST}/${DOCKER_CLIENT_BINARY_NAME}" -) diff --git a/fn/vendor/github.com/docker/docker/hack/make/test-unit b/fn/vendor/github.com/docker/docker/hack/make/test-unit index 1c2ba3ff3..85eef5b5b 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/test-unit +++ b/fn/vendor/github.com/docker/docker/hack/make/test-unit @@ -26,6 +26,8 @@ bundle_test_unit() { TEST_PATH=./${TESTDIRS} fi + source "${MAKEDIR}/.go-autogen" + if [ "$(go env GOHOSTOS)" = 'solaris' ]; then pkg_list=$(go list -e \ -f '{{if ne .Name "github.com/docker/docker"}} @@ -50,6 +52,7 @@ bundle_test_unit() { fi go test -cover -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS $pkg_list + go test -cover -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS github.com/docker/docker/pkg/term -test.root } bundle_test_unit 2>&1 | tee -a "$DEST/test.log" diff --git a/fn/vendor/github.com/docker/docker/hack/make/tgz b/fn/vendor/github.com/docker/docker/hack/make/tgz index 4af2e77c9..1fd37b6b5 100644 --- a/fn/vendor/github.com/docker/docker/hack/make/tgz +++ b/fn/vendor/github.com/docker/docker/hack/make/tgz @@ -1,92 +1,2 @@ #!/usr/bin/env bash - -CROSS="$DEST/../cross" - -set -e - -arch=$(go env GOHOSTARCH) -if [ ! -d "$CROSS/linux/${arch}" ]; then - echo >&2 'error: binary and cross must be run before tgz' - false -fi - -( -for d in "$CROSS/"*/*; do - export GOARCH="$(basename "$d")" - export GOOS="$(basename "$(dirname "$d")")" - - source "${MAKEDIR}/.binary-setup" - - BINARY_NAME="${DOCKER_CLIENT_BINARY_NAME}-$VERSION" - DAEMON_BINARY_NAME="${DOCKER_DAEMON_BINARY_NAME}-$VERSION" - PROXY_BINARY_NAME="${DOCKER_PROXY_BINARY_NAME}-$VERSION" - BINARY_EXTENSION="$(export GOOS && binary_extension)" - if [ "$GOOS" = 'windows' ]; then - # if windows use a zip, not tgz - BUNDLE_EXTENSION=".zip" - IS_TAR="false" - elif [ "$GOOS" == "solaris" ]; then - # Solaris bypasses cross due to CGO issues. - continue - else - BUNDLE_EXTENSION=".tgz" - IS_TAR="true" - fi - BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" - DAEMON_BINARY_FULLNAME="$DAEMON_BINARY_NAME$BINARY_EXTENSION" - PROXY_BINARY_FULLNAME="$PROXY_BINARY_NAME$BINARY_EXTENSION" - mkdir -p "$DEST/$GOOS/$GOARCH" - TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME$BUNDLE_EXTENSION" - - # The staging directory for the files in the tgz - BUILD_PATH="$DEST/build" - - # The directory that is at the root of the tar file - TAR_BASE_DIRECTORY="docker" - - # $DEST/build/docker - TAR_PATH="$BUILD_PATH/$TAR_BASE_DIRECTORY" - - # Copy the correct docker binary - mkdir -p $TAR_PATH - cp -L "$d/$BINARY_FULLNAME" "$TAR_PATH/${DOCKER_CLIENT_BINARY_NAME}${BINARY_EXTENSION}" - if [ -f "$d/$DAEMON_BINARY_FULLNAME" ]; then - cp -L "$d/$DAEMON_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_DAEMON_BINARY_NAME}${BINARY_EXTENSION}" - fi - if [ -f "$d/$PROXY_BINARY_FULLNAME" ]; then - cp -L "$d/$PROXY_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_PROXY_BINARY_NAME}${BINARY_EXTENSION}" - fi - - # copy over all the extra binaries - copy_binaries $TAR_PATH - - # add completions - for s in bash fish zsh; do - mkdir -p $TAR_PATH/completion/$s - cp -L contrib/completion/$s/*docker* $TAR_PATH/completion/$s/ - done - - if [ "$IS_TAR" == "true" ]; then - echo "Creating tgz from $BUILD_PATH and naming it $TGZ" - tar --numeric-owner --owner 0 -C "$BUILD_PATH" -czf "$TGZ" $TAR_BASE_DIRECTORY - else - # ZIP needs to full absolute dir path, not the absolute path - ZIP=`pwd`"/$TGZ" - # keep track of where we are, for later. - pushd . - # go into the BUILD_PATH since zip does not have a -C equivalent. - cd $BUILD_PATH - echo "Creating zip from $BUILD_PATH and naming it $ZIP" - zip -q -r $ZIP $TAR_BASE_DIRECTORY - # go back to where we started - popd - fi - - hash_files "$TGZ" - - # cleanup after ourselves - rm -rf "$BUILD_PATH" - - echo "Created tgz: $TGZ" -done -) +echo "tgz is deprecated" diff --git a/fn/vendor/github.com/docker/docker/hack/make/yaml-docs-generator b/fn/vendor/github.com/docker/docker/hack/make/yaml-docs-generator deleted file mode 100644 index 8548deebb..000000000 --- a/fn/vendor/github.com/docker/docker/hack/make/yaml-docs-generator +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -set -e - -[ -z "$KEEPDEST" ] && \ - rm -rf "$DEST" - -( - source "${MAKEDIR}/.binary-setup" - export BINARY_SHORT_NAME="yaml-docs-generator" - export GO_PACKAGE='github.com/docker/docker/docs/yaml' - source "${MAKEDIR}/.binary" -) diff --git a/fn/vendor/github.com/docker/docker/hack/validate/compose-bindata b/fn/vendor/github.com/docker/docker/hack/validate/compose-bindata deleted file mode 100755 index a565da460..000000000 --- a/fn/vendor/github.com/docker/docker/hack/validate/compose-bindata +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "${SCRIPTDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- 'cli/compose/schema/data' || true) ) -unset IFS - -if [ ${#files[@]} -gt 0 ]; then - go generate github.com/docker/docker/cli/compose/schema 2> /dev/null - # Let see if the working directory is clean - diffs="$(git status --porcelain -- cli/compose/schema 2>/dev/null)" - if [ "$diffs" ]; then - { - echo 'The result of `go generate github.com/docker/docker/cli/compose/schema` differs' - echo - echo "$diffs" - echo - echo 'Please run `go generate github.com/docker/docker/cli/compose/schema`' - } >&2 - false - else - echo 'Congratulations! cli/compose/schema/bindata.go is up-to-date.' - fi -else - echo 'No cli/compose/schema/data changes in diff.' -fi diff --git a/fn/vendor/github.com/docker/docker/hack/validate/default b/fn/vendor/github.com/docker/docker/hack/validate/default index 1dce96350..e243f4383 100755 --- a/fn/vendor/github.com/docker/docker/hack/validate/default +++ b/fn/vendor/github.com/docker/docker/hack/validate/default @@ -16,4 +16,3 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" . $SCRIPTDIR/vet . $SCRIPTDIR/changelog-well-formed . $SCRIPTDIR/changelog-date-descending -. $SCRIPTDIR/compose-bindata diff --git a/fn/vendor/github.com/docker/docker/hack/validate/gofmt b/fn/vendor/github.com/docker/docker/hack/validate/gofmt index 42ba052b7..38027a9f7 100755 --- a/fn/vendor/github.com/docker/docker/hack/validate/gofmt +++ b/fn/vendor/github.com/docker/docker/hack/validate/gofmt @@ -5,8 +5,8 @@ source "${SCRIPTDIR}/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | - grep -v '^vendor/' | - grep -v '^cli/compose/schema/bindata.go' || true) ) + grep -v '^vendor/' | + grep -v '\.pb\.go$' || true) ) unset IFS badFiles=() diff --git a/fn/vendor/github.com/docker/docker/hack/validate/lint b/fn/vendor/github.com/docker/docker/hack/validate/lint index d362f4624..341490a04 100755 --- a/fn/vendor/github.com/docker/docker/hack/validate/lint +++ b/fn/vendor/github.com/docker/docker/hack/validate/lint @@ -4,7 +4,7 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${SCRIPTDIR}/.validate" IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '^cli/compose/schema/bindata.go' | grep -v '^api/types/plugins/logdriver/entry.pb.go' || true) ) +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '\.pb\.go$' || true) ) unset IFS errors=() diff --git a/fn/vendor/github.com/docker/docker/hooks/post_build b/fn/vendor/github.com/docker/docker/hooks/post_build deleted file mode 100755 index 528170712..000000000 --- a/fn/vendor/github.com/docker/docker/hooks/post_build +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -if [ -n "${BUILD_DOCS}" ]; then - set -e - DOCS_IMAGE=${DOCS_IMAGE:-${IMAGE_NAME}-docs} - docker run \ - --entrypoint '' \ - --privileged \ - -e DOCKER_GITCOMMIT=$(git rev-parse --short HEAD) \ - -v $(pwd)/docs/yaml/docs:/docs \ - "${IMAGE_NAME}" \ - sh -c 'hack/make.sh yaml-docs-generator && bundles/latest/yaml-docs-generator/yaml-docs-generator --target /docs' - - ( - cd docs/yaml - docker build -t ${DOCS_IMAGE} . - docker push ${DOCS_IMAGE} - ) -fi diff --git a/fn/vendor/github.com/docker/docker/image/cache/compare.go b/fn/vendor/github.com/docker/docker/image/cache/compare.go index 6abbdcd8f..923793246 100644 --- a/fn/vendor/github.com/docker/docker/image/cache/compare.go +++ b/fn/vendor/github.com/docker/docker/image/cache/compare.go @@ -1,6 +1,8 @@ package cache -import "github.com/docker/docker/api/types/container" +import ( + "github.com/docker/docker/api/types/container" +) // compare two Config struct. Do not compare the "Image" nor "Hostname" fields // If OpenStdin is set, then it differs diff --git a/fn/vendor/github.com/docker/docker/image/cache/compare_test.go b/fn/vendor/github.com/docker/docker/image/cache/compare_test.go index 7cc058933..10e464b43 100644 --- a/fn/vendor/github.com/docker/docker/image/cache/compare_test.go +++ b/fn/vendor/github.com/docker/docker/image/cache/compare_test.go @@ -46,9 +46,9 @@ func TestCompare(t *testing.T) { sameConfigs := map[*container.Config]*container.Config{ // Empty config - &container.Config{}: {}, + {}: {}, // Does not compare hostname, domainname & image - &container.Config{ + { Hostname: "host1", Domainname: "domain1", Image: "image1", @@ -60,23 +60,23 @@ func TestCompare(t *testing.T) { User: "user", }, // only OpenStdin - &container.Config{OpenStdin: false}: {OpenStdin: false}, + {OpenStdin: false}: {OpenStdin: false}, // only env - &container.Config{Env: envs1}: {Env: envs1}, + {Env: envs1}: {Env: envs1}, // only cmd - &container.Config{Cmd: cmd1}: {Cmd: cmd1}, + {Cmd: cmd1}: {Cmd: cmd1}, // only labels - &container.Config{Labels: labels1}: {Labels: labels1}, + {Labels: labels1}: {Labels: labels1}, // only exposedPorts - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, + {ExposedPorts: ports1}: {ExposedPorts: ports1}, // only entrypoints - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, + {Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, // only volumes - &container.Config{Volumes: volumes1}: {Volumes: volumes1}, + {Volumes: volumes1}: {Volumes: volumes1}, } differentConfigs := map[*container.Config]*container.Config{ nil: nil, - &container.Config{ + { Hostname: "host1", Domainname: "domain1", Image: "image1", @@ -88,30 +88,30 @@ func TestCompare(t *testing.T) { User: "user2", }, // only OpenStdin - &container.Config{OpenStdin: false}: {OpenStdin: true}, - &container.Config{OpenStdin: true}: {OpenStdin: false}, + {OpenStdin: false}: {OpenStdin: true}, + {OpenStdin: true}: {OpenStdin: false}, // only env - &container.Config{Env: envs1}: {Env: envs2}, + {Env: envs1}: {Env: envs2}, // only cmd - &container.Config{Cmd: cmd1}: {Cmd: cmd2}, + {Cmd: cmd1}: {Cmd: cmd2}, // not the same number of parts - &container.Config{Cmd: cmd1}: {Cmd: cmd3}, + {Cmd: cmd1}: {Cmd: cmd3}, // only labels - &container.Config{Labels: labels1}: {Labels: labels2}, + {Labels: labels1}: {Labels: labels2}, // not the same number of labels - &container.Config{Labels: labels1}: {Labels: labels3}, + {Labels: labels1}: {Labels: labels3}, // only exposedPorts - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, + {ExposedPorts: ports1}: {ExposedPorts: ports2}, // not the same number of ports - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, + {ExposedPorts: ports1}: {ExposedPorts: ports3}, // only entrypoints - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, + {Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, // not the same number of parts - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, + {Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, // only volumes - &container.Config{Volumes: volumes1}: {Volumes: volumes2}, + {Volumes: volumes1}: {Volumes: volumes2}, // not the same number of labels - &container.Config{Volumes: volumes1}: {Volumes: volumes3}, + {Volumes: volumes1}: {Volumes: volumes3}, } for config1, config2 := range sameConfigs { if !compare(config1, config2) { diff --git a/fn/vendor/github.com/docker/docker/image/fs_test.go b/fn/vendor/github.com/docker/docker/image/fs_test.go index 76abbdcc4..5f2437cad 100644 --- a/fn/vendor/github.com/docker/docker/image/fs_test.go +++ b/fn/vendor/github.com/docker/docker/image/fs_test.go @@ -11,16 +11,17 @@ import ( "path/filepath" "testing" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil" "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" ) func defaultFSStoreBackend(t *testing.T) (StoreBackend, func()) { tmpdir, err := ioutil.TempDir("", "images-fs-store") - assert.NilError(t, err) + assert.NoError(t, err) fsBackend, err := NewFSStoreBackend(tmpdir) - assert.NilError(t, err) + assert.NoError(t, err) return fsBackend, func() { os.RemoveAll(tmpdir) } } @@ -30,15 +31,15 @@ func TestFSGetInvalidData(t *testing.T) { defer cleanup() id, err := store.Set([]byte("foobar")) - assert.NilError(t, err) + assert.NoError(t, err) dgst := digest.Digest(id) err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600) - assert.NilError(t, err) + assert.NoError(t, err) _, err = store.Get(id) - assert.Error(t, err, "failed to verify") + testutil.ErrorContains(t, err, "failed to verify") } func TestFSInvalidSet(t *testing.T) { @@ -47,15 +48,15 @@ func TestFSInvalidSet(t *testing.T) { id := digest.FromBytes([]byte("foobar")) err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Hex()), 0700) - assert.NilError(t, err) + assert.NoError(t, err) _, err = store.Set([]byte("foobar")) - assert.Error(t, err, "failed to write digest data") + testutil.ErrorContains(t, err, "failed to write digest data") } func TestFSInvalidRoot(t *testing.T) { tmpdir, err := ioutil.TempDir("", "images-fs-store") - assert.NilError(t, err) + assert.NoError(t, err) defer os.RemoveAll(tmpdir) tcases := []struct { @@ -70,14 +71,14 @@ func TestFSInvalidRoot(t *testing.T) { root := filepath.Join(tmpdir, tc.root) filePath := filepath.Join(tmpdir, tc.invalidFile) err := os.MkdirAll(filepath.Dir(filePath), 0700) - assert.NilError(t, err) + assert.NoError(t, err) f, err := os.Create(filePath) - assert.NilError(t, err) + assert.NoError(t, err) f.Close() _, err = NewFSStoreBackend(root) - assert.Error(t, err, "failed to create storage backend") + testutil.ErrorContains(t, err, "failed to create storage backend") os.RemoveAll(root) } @@ -89,10 +90,10 @@ func TestFSMetadataGetSet(t *testing.T) { defer cleanup() id, err := store.Set([]byte("foo")) - assert.NilError(t, err) + assert.NoError(t, err) id2, err := store.Set([]byte("bar")) - assert.NilError(t, err) + assert.NoError(t, err) tcases := []struct { id digest.Digest @@ -106,10 +107,10 @@ func TestFSMetadataGetSet(t *testing.T) { for _, tc := range tcases { err = store.SetMetadata(tc.id, tc.key, tc.value) - assert.NilError(t, err) + assert.NoError(t, err) actual, err := store.GetMetadata(tc.id, tc.key) - assert.NilError(t, err) + assert.NoError(t, err) if bytes.Compare(actual, tc.value) != 0 { t.Fatalf("Metadata expected %q, got %q", tc.value, actual) @@ -117,14 +118,14 @@ func TestFSMetadataGetSet(t *testing.T) { } _, err = store.GetMetadata(id2, "tkey2") - assert.Error(t, err, "failed to read metadata") + testutil.ErrorContains(t, err, "failed to read metadata") id3 := digest.FromBytes([]byte("baz")) err = store.SetMetadata(id3, "tkey", []byte("tval")) - assert.Error(t, err, "failed to get digest") + testutil.ErrorContains(t, err, "failed to get digest") _, err = store.GetMetadata(id3, "tkey") - assert.Error(t, err, "failed to get digest") + testutil.ErrorContains(t, err, "failed to get digest") } func TestFSInvalidWalker(t *testing.T) { @@ -132,19 +133,19 @@ func TestFSInvalidWalker(t *testing.T) { defer cleanup() fooID, err := store.Set([]byte("foo")) - assert.NilError(t, err) + assert.NoError(t, err) err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, "sha256/foobar"), []byte("foobar"), 0600) - assert.NilError(t, err) + assert.NoError(t, err) n := 0 err = store.Walk(func(id digest.Digest) error { - assert.Equal(t, id, fooID) + assert.Equal(t, fooID, id) n++ return nil }) - assert.NilError(t, err) - assert.Equal(t, n, 1) + assert.NoError(t, err) + assert.Equal(t, 1, n) } func TestFSGetSet(t *testing.T) { @@ -161,12 +162,12 @@ func TestFSGetSet(t *testing.T) { randomInput := make([]byte, 8*1024) _, err := rand.Read(randomInput) - assert.NilError(t, err) + assert.NoError(t, err) // skipping use of digest pkg because it is used by the implementation h := sha256.New() _, err = h.Write(randomInput) - assert.NilError(t, err) + assert.NoError(t, err) tcases = append(tcases, tcase{ input: randomInput, @@ -175,13 +176,13 @@ func TestFSGetSet(t *testing.T) { for _, tc := range tcases { id, err := store.Set([]byte(tc.input)) - assert.NilError(t, err) - assert.Equal(t, id, tc.expected) + assert.NoError(t, err) + assert.Equal(t, tc.expected, id) } for _, tc := range tcases { data, err := store.Get(tc.expected) - assert.NilError(t, err) + assert.NoError(t, err) if bytes.Compare(data, tc.input) != 0 { t.Fatalf("expected data %q, got %q", tc.input, data) } @@ -194,7 +195,7 @@ func TestFSGetUnsetKey(t *testing.T) { for _, key := range []digest.Digest{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { _, err := store.Get(key) - assert.Error(t, err, "failed to get digest") + testutil.ErrorContains(t, err, "failed to get digest") } } @@ -204,7 +205,7 @@ func TestFSGetEmptyData(t *testing.T) { for _, emptyData := range [][]byte{nil, {}} { _, err := store.Set(emptyData) - assert.Error(t, err, "invalid empty data") + testutil.ErrorContains(t, err, "invalid empty data") } } @@ -213,25 +214,25 @@ func TestFSDelete(t *testing.T) { defer cleanup() id, err := store.Set([]byte("foo")) - assert.NilError(t, err) + assert.NoError(t, err) id2, err := store.Set([]byte("bar")) - assert.NilError(t, err) + assert.NoError(t, err) err = store.Delete(id) - assert.NilError(t, err) + assert.NoError(t, err) _, err = store.Get(id) - assert.Error(t, err, "failed to get digest") + testutil.ErrorContains(t, err, "failed to get digest") _, err = store.Get(id2) - assert.NilError(t, err) + assert.NoError(t, err) err = store.Delete(id2) - assert.NilError(t, err) + assert.NoError(t, err) _, err = store.Get(id2) - assert.Error(t, err, "failed to get digest") + testutil.ErrorContains(t, err, "failed to get digest") } func TestFSWalker(t *testing.T) { @@ -239,10 +240,10 @@ func TestFSWalker(t *testing.T) { defer cleanup() id, err := store.Set([]byte("foo")) - assert.NilError(t, err) + assert.NoError(t, err) id2, err := store.Set([]byte("bar")) - assert.NilError(t, err) + assert.NoError(t, err) tcases := make(map[digest.Digest]struct{}) tcases[id] = struct{}{} @@ -253,9 +254,9 @@ func TestFSWalker(t *testing.T) { n++ return nil }) - assert.NilError(t, err) - assert.Equal(t, n, 2) - assert.Equal(t, len(tcases), 0) + assert.NoError(t, err) + assert.Equal(t, 2, n) + assert.Len(t, tcases, 0) } func TestFSWalkerStopOnError(t *testing.T) { @@ -263,12 +264,12 @@ func TestFSWalkerStopOnError(t *testing.T) { defer cleanup() id, err := store.Set([]byte("foo")) - assert.NilError(t, err) + assert.NoError(t, err) tcases := make(map[digest.Digest]struct{}) tcases[id] = struct{}{} err = store.Walk(func(id digest.Digest) error { return errors.New("what") }) - assert.Error(t, err, "what") + testutil.ErrorContains(t, err, "what") } diff --git a/fn/vendor/github.com/docker/docker/image/image.go b/fn/vendor/github.com/docker/docker/image/image.go index 17935ac23..ab95d93da 100644 --- a/fn/vendor/github.com/docker/docker/image/image.go +++ b/fn/vendor/github.com/docker/docker/image/image.go @@ -4,9 +4,13 @@ import ( "encoding/json" "errors" "io" + "runtime" + "strings" "time" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/layer" "github.com/opencontainers/go-digest" ) @@ -92,6 +96,15 @@ func (img *Image) RunConfig() *container.Config { return img.Config } +// Platform returns the image's operating system. If not populated, defaults to the host runtime OS. +func (img *Image) Platform() string { + os := img.OS + if os == "" { + os = runtime.GOOS + } + return os +} + // MarshalJSON serializes the image to JSON. It sorts the top-level keys so // that JSON that's been manipulated by a push/pull cycle with a legacy // registry won't end up with a different key order. @@ -110,6 +123,51 @@ func (img *Image) MarshalJSON() ([]byte, error) { return json.Marshal(c) } +// ChildConfig is the configuration to apply to an Image to create a new +// Child image. Other properties of the image are copied from the parent. +type ChildConfig struct { + ContainerID string + Author string + Comment string + DiffID layer.DiffID + ContainerConfig *container.Config + Config *container.Config +} + +// NewChildImage creates a new Image as a child of this image. +func NewChildImage(img *Image, child ChildConfig, platform string) *Image { + isEmptyLayer := layer.IsEmpty(child.DiffID) + rootFS := img.RootFS + if rootFS == nil { + rootFS = NewRootFS() + } + if !isEmptyLayer { + rootFS.Append(child.DiffID) + } + imgHistory := NewHistory( + child.Author, + child.Comment, + strings.Join(child.ContainerConfig.Cmd, " "), + isEmptyLayer) + + return &Image{ + V1Image: V1Image{ + DockerVersion: dockerversion.Version, + Config: child.Config, + Architecture: runtime.GOARCH, + OS: platform, + Container: child.ContainerID, + ContainerConfig: *child.ContainerConfig, + Author: child.Author, + Created: imgHistory.Created, + }, + RootFS: rootFS, + History: append(img.History, imgHistory), + OSFeatures: img.OSFeatures, + OSVersion: img.OSVersion, + } +} + // History stores build commands that were used to create an image type History struct { // Created is the timestamp at which the image was created @@ -126,6 +184,18 @@ type History struct { EmptyLayer bool `json:"empty_layer,omitempty"` } +// NewHistory creates a new history struct from arguments, and sets the created +// time to the current time in UTC +func NewHistory(author, comment, createdBy string, isEmptyLayer bool) History { + return History{ + Author: author, + Created: time.Now().UTC(), + CreatedBy: createdBy, + Comment: comment, + EmptyLayer: isEmptyLayer, + } +} + // Exporter provides interface for loading and saving images type Exporter interface { Load(io.ReadCloser, io.Writer, bool) error diff --git a/fn/vendor/github.com/docker/docker/image/image_test.go b/fn/vendor/github.com/docker/docker/image/image_test.go index b0561536d..1455947df 100644 --- a/fn/vendor/github.com/docker/docker/image/image_test.go +++ b/fn/vendor/github.com/docker/docker/image/image_test.go @@ -6,7 +6,8 @@ import ( "strings" "testing" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const sampleImageJSON = `{ @@ -21,13 +22,13 @@ const sampleImageJSON = `{ func TestNewFromJSON(t *testing.T) { img, err := NewFromJSON([]byte(sampleImageJSON)) - assert.NilError(t, err) - assert.Equal(t, string(img.RawJSON()), sampleImageJSON) + require.NoError(t, err) + assert.Equal(t, sampleImageJSON, string(img.RawJSON())) } func TestNewFromJSONWithInvalidJSON(t *testing.T) { _, err := NewFromJSON([]byte("{}")) - assert.Error(t, err, "invalid image JSON, no RootFS key") + assert.EqualError(t, err, "invalid image JSON, no RootFS key") } func TestMarshalKeyOrder(t *testing.T) { @@ -38,7 +39,7 @@ func TestMarshalKeyOrder(t *testing.T) { Architecture: "c", }, }) - assert.NilError(t, err) + assert.NoError(t, err) expectedOrder := []string{"architecture", "author", "comment"} var indexes []int diff --git a/fn/vendor/github.com/docker/docker/image/spec/v1.1.md b/fn/vendor/github.com/docker/docker/image/spec/v1.1.md index 6279da54a..ce761f112 100644 --- a/fn/vendor/github.com/docker/docker/image/spec/v1.1.md +++ b/fn/vendor/github.com/docker/docker/image/spec/v1.1.md @@ -88,7 +88,7 @@ This specification uses the following terms: A tag serves to map a descriptive, user-given name to any single image ID. Tag values are limited to the set of characters [a-zA-Z0-9_.-], except they may not start with a . - or - character. Tags are limited to 127 characters. + or - character. Tags are limited to 128 characters.
Repository diff --git a/fn/vendor/github.com/docker/docker/image/spec/v1.2.md b/fn/vendor/github.com/docker/docker/image/spec/v1.2.md index 483ce1e4c..789680c7a 100644 --- a/fn/vendor/github.com/docker/docker/image/spec/v1.2.md +++ b/fn/vendor/github.com/docker/docker/image/spec/v1.2.md @@ -88,7 +88,7 @@ This specification uses the following terms: A tag serves to map a descriptive, user-given name to any single image ID. Tag values are limited to the set of characters [a-zA-Z0-9_.-], except they may not start with a . - or - character. Tags are limited to 127 characters. + or - character. Tags are limited to 128 characters.
Repository diff --git a/fn/vendor/github.com/docker/docker/image/store.go b/fn/vendor/github.com/docker/docker/image/store.go index 26ae109a0..c85f8d683 100644 --- a/fn/vendor/github.com/docker/docker/image/store.go +++ b/fn/vendor/github.com/docker/docker/image/store.go @@ -2,14 +2,17 @@ package image import ( "encoding/json" - "errors" "fmt" + "strings" "sync" + "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digestset" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" "github.com/opencontainers/go-digest" + "github.com/pkg/errors" ) // Store is an interface for creating and accessing images @@ -20,6 +23,8 @@ type Store interface { Search(partialID string) (ID, error) SetParent(id ID, parent ID) error GetParent(id ID) (ID, error) + SetLastUpdated(id ID) error + GetLastUpdated(id ID) (time.Time, error) Children(id ID) []ID Map() map[ID]*Image Heads() map[ID]*Image @@ -37,20 +42,22 @@ type imageMeta struct { } type store struct { - sync.Mutex + sync.RWMutex ls LayerGetReleaser images map[ID]*imageMeta fs StoreBackend digestSet *digestset.Set + platform string } // NewImageStore returns new store object for given layer store -func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { +func NewImageStore(fs StoreBackend, platform string, ls LayerGetReleaser) (Store, error) { is := &store{ ls: ls, images: make(map[ID]*imageMeta), fs: fs, digestSet: digestset.NewSet(), + platform: platform, } // load all current images and retain layers @@ -111,6 +118,14 @@ func (is *store) Create(config []byte) (ID, error) { return "", err } + // TODO @jhowardmsft - LCOW Support. This will need revisiting. + // Integrity check - ensure we are creating something for the correct platform + if system.LCOWSupported() { + if strings.ToLower(img.Platform()) != strings.ToLower(is.platform) { + return "", fmt.Errorf("cannot create entry for platform %q in image store for platform %q", img.Platform(), is.platform) + } + } + // Must reject any config that references diffIDs from the history // which aren't among the rootfs layers. rootFSLayers := make(map[layer.DiffID]struct{}) @@ -147,7 +162,7 @@ func (is *store) Create(config []byte) (ID, error) { if layerID != "" { l, err = is.ls.Get(layerID) if err != nil { - return "", err + return "", errors.Wrapf(err, "failed to get layer %s", layerID) } } @@ -166,9 +181,6 @@ func (is *store) Create(config []byte) (ID, error) { } func (is *store) Search(term string) (ID, error) { - is.Lock() - defer is.Unlock() - dgst, err := is.digestSet.Lookup(term) if err != nil { if err == digestset.ErrDigestNotFound { @@ -250,9 +262,25 @@ func (is *store) GetParent(id ID) (ID, error) { return ID(d), nil // todo: validate? } +// SetLastUpdated time for the image ID to the current time +func (is *store) SetLastUpdated(id ID) error { + lastUpdated := []byte(time.Now().Format(time.RFC3339Nano)) + return is.fs.SetMetadata(id.Digest(), "lastUpdated", lastUpdated) +} + +// GetLastUpdated time for the image ID +func (is *store) GetLastUpdated(id ID) (time.Time, error) { + bytes, err := is.fs.GetMetadata(id.Digest(), "lastUpdated") + if err != nil || len(bytes) == 0 { + // No lastUpdated time + return time.Time{}, nil + } + return time.Parse(time.RFC3339Nano, string(bytes)) +} + func (is *store) Children(id ID) []ID { - is.Lock() - defer is.Unlock() + is.RLock() + defer is.RUnlock() return is.children(id) } @@ -276,8 +304,8 @@ func (is *store) Map() map[ID]*Image { } func (is *store) imagesMap(all bool) map[ID]*Image { - is.Lock() - defer is.Unlock() + is.RLock() + defer is.RUnlock() images := make(map[ID]*Image) diff --git a/fn/vendor/github.com/docker/docker/image/store_test.go b/fn/vendor/github.com/docker/docker/image/store_test.go index bc0148b03..fc6d461d9 100644 --- a/fn/vendor/github.com/docker/docker/image/store_test.go +++ b/fn/vendor/github.com/docker/docker/image/store_test.go @@ -1,11 +1,13 @@ package image import ( + "runtime" "testing" "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil" "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" ) func TestRestore(t *testing.T) { @@ -13,55 +15,55 @@ func TestRestore(t *testing.T) { defer cleanup() id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) - assert.NilError(t, err) + assert.NoError(t, err) _, err = fs.Set([]byte(`invalid`)) - assert.NilError(t, err) + assert.NoError(t, err) id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - assert.NilError(t, err) + assert.NoError(t, err) err = fs.SetMetadata(id2, "parent", []byte(id1)) - assert.NilError(t, err) + assert.NoError(t, err) - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - assert.NilError(t, err) + is, err := NewImageStore(fs, runtime.GOOS, &mockLayerGetReleaser{}) + assert.NoError(t, err) - assert.Equal(t, len(is.Map()), 2) + assert.Len(t, is.Map(), 2) img1, err := is.Get(ID(id1)) - assert.NilError(t, err) - assert.Equal(t, img1.computedID, ID(id1)) - assert.Equal(t, img1.computedID.String(), string(id1)) + assert.NoError(t, err) + assert.Equal(t, ID(id1), img1.computedID) + assert.Equal(t, string(id1), img1.computedID.String()) img2, err := is.Get(ID(id2)) - assert.NilError(t, err) - assert.Equal(t, img1.Comment, "abc") - assert.Equal(t, img2.Comment, "def") + assert.NoError(t, err) + assert.Equal(t, "abc", img1.Comment) + assert.Equal(t, "def", img2.Comment) p, err := is.GetParent(ID(id1)) - assert.Error(t, err, "failed to read metadata") + testutil.ErrorContains(t, err, "failed to read metadata") p, err = is.GetParent(ID(id2)) - assert.NilError(t, err) - assert.Equal(t, p, ID(id1)) + assert.NoError(t, err) + assert.Equal(t, ID(id1), p) children := is.Children(ID(id1)) - assert.Equal(t, len(children), 1) - assert.Equal(t, children[0], ID(id2)) - assert.Equal(t, len(is.Heads()), 1) + assert.Len(t, children, 1) + assert.Equal(t, ID(id2), children[0]) + assert.Len(t, is.Heads(), 1) sid1, err := is.Search(string(id1)[:10]) - assert.NilError(t, err) - assert.Equal(t, sid1, ID(id1)) + assert.NoError(t, err) + assert.Equal(t, ID(id1), sid1) sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) - assert.NilError(t, err) - assert.Equal(t, sid1, ID(id1)) + assert.NoError(t, err) + assert.Equal(t, ID(id1), sid1) invalidPattern := digest.Digest(id1).Hex()[1:6] _, err = is.Search(invalidPattern) - assert.Error(t, err, "No such image") + testutil.ErrorContains(t, err, "No such image") } func TestAddDelete(t *testing.T) { @@ -69,34 +71,34 @@ func TestAddDelete(t *testing.T) { defer cleanup() id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - assert.NilError(t, err) - assert.Equal(t, id1, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993")) + assert.NoError(t, err) + assert.Equal(t, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"), id1) img, err := is.Get(id1) - assert.NilError(t, err) - assert.Equal(t, img.Comment, "abc") + assert.NoError(t, err) + assert.Equal(t, "abc", img.Comment) id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - assert.NilError(t, err) + assert.NoError(t, err) err = is.SetParent(id2, id1) - assert.NilError(t, err) + assert.NoError(t, err) pid1, err := is.GetParent(id2) - assert.NilError(t, err) + assert.NoError(t, err) assert.Equal(t, pid1, id1) _, err = is.Delete(id1) - assert.NilError(t, err) + assert.NoError(t, err) _, err = is.Get(id1) - assert.Error(t, err, "failed to get digest") + testutil.ErrorContains(t, err, "failed to get digest") _, err = is.Get(id2) - assert.NilError(t, err) + assert.NoError(t, err) _, err = is.GetParent(id2) - assert.Error(t, err, "failed to read metadata") + testutil.ErrorContains(t, err, "failed to read metadata") } func TestSearchAfterDelete(t *testing.T) { @@ -104,17 +106,17 @@ func TestSearchAfterDelete(t *testing.T) { defer cleanup() id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) - assert.NilError(t, err) + assert.NoError(t, err) id1, err := is.Search(string(id)[:15]) - assert.NilError(t, err) + assert.NoError(t, err) assert.Equal(t, id1, id) _, err = is.Delete(id) - assert.NilError(t, err) + assert.NoError(t, err) _, err = is.Search(string(id)[:15]) - assert.Error(t, err, "No such image") + testutil.ErrorContains(t, err, "No such image") } func TestParentReset(t *testing.T) { @@ -122,31 +124,49 @@ func TestParentReset(t *testing.T) { defer cleanup() id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) - assert.NilError(t, err) + assert.NoError(t, err) id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) - assert.NilError(t, err) + assert.NoError(t, err) id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) - assert.NilError(t, err) + assert.NoError(t, err) - assert.NilError(t, is.SetParent(id, id2)) - assert.Equal(t, len(is.Children(id2)), 1) + assert.NoError(t, is.SetParent(id, id2)) + assert.Len(t, is.Children(id2), 1) - assert.NilError(t, is.SetParent(id, id3)) - assert.Equal(t, len(is.Children(id2)), 0) - assert.Equal(t, len(is.Children(id3)), 1) + assert.NoError(t, is.SetParent(id, id3)) + assert.Len(t, is.Children(id2), 0) + assert.Len(t, is.Children(id3), 1) } func defaultImageStore(t *testing.T) (Store, func()) { fsBackend, cleanup := defaultFSStoreBackend(t) - store, err := NewImageStore(fsBackend, &mockLayerGetReleaser{}) - assert.NilError(t, err) + store, err := NewImageStore(fsBackend, runtime.GOOS, &mockLayerGetReleaser{}) + assert.NoError(t, err) return store, cleanup } +func TestGetAndSetLastUpdated(t *testing.T) { + store, cleanup := defaultImageStore(t) + defer cleanup() + + id, err := store.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + updated, err := store.GetLastUpdated(id) + assert.NoError(t, err) + assert.Equal(t, updated.IsZero(), true) + + assert.NoError(t, store.SetLastUpdated(id)) + + updated, err = store.GetLastUpdated(id) + assert.NoError(t, err) + assert.Equal(t, updated.IsZero(), false) +} + type mockLayerGetReleaser struct{} func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) { diff --git a/fn/vendor/github.com/docker/docker/image/tarexport/load.go b/fn/vendor/github.com/docker/docker/image/tarexport/load.go index cdd377ab9..af8cefc6a 100644 --- a/fn/vendor/github.com/docker/docker/image/tarexport/load.go +++ b/fn/vendor/github.com/docker/docker/image/tarexport/load.go @@ -2,12 +2,14 @@ package tarexport import ( "encoding/json" + "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "reflect" + "runtime" "github.com/Sirupsen/logrus" "github.com/docker/distribution" @@ -26,14 +28,11 @@ import ( ) func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - var ( - sf = streamformatter.NewJSONStreamFormatter() - progressOutput progress.Output - ) + var progressOutput progress.Output if !quiet { - progressOutput = sf.NewProgressOutput(outStream, false) + progressOutput = streamformatter.NewJSONProgressOutput(outStream, false) } - outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()} + outStream = streamformatter.NewStdoutWriter(outStream) tmpDir, err := ioutil.TempDir("", "docker-import-") if err != nil { @@ -80,6 +79,9 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) if err != nil { return err } + if err := checkCompatibleOS(img.OS); err != nil { + return err + } var rootFS image.RootFS rootFS = *img.RootFS rootFS.DiffIDs = nil @@ -88,6 +90,17 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) return fmt.Errorf("invalid manifest, layers length mismatch: expected %d, got %d", expected, actual) } + // On Windows, validate the platform, defaulting to windows if not present. + platform := layer.Platform(img.OS) + if runtime.GOOS == "windows" { + if platform == "" { + platform = "windows" + } + if (platform != "windows") && (platform != "linux") { + return fmt.Errorf("configuration for this image has an unsupported platform: %s", platform) + } + } + for i, diffID := range img.RootFS.DiffIDs { layerPath, err := safePath(tmpDir, m.Layers[i]) if err != nil { @@ -97,7 +110,7 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) r.Append(diffID) newLayer, err := l.ls.Get(r.ChainID()) if err != nil { - newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), m.LayerSources[diffID], progressOutput) + newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), platform, m.LayerSources[diffID], progressOutput) if err != nil { return err } @@ -164,7 +177,7 @@ func (l *tarexporter) setParentID(id, parentID image.ID) error { return l.is.SetParent(id, parentID) } -func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, platform layer.Platform, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { // We use system.OpenSequential to use sequential file access on Windows, avoiding // depleting the standby list. On Linux, this equates to a regular os.Open. rawTar, err := system.OpenSequential(filename) @@ -194,9 +207,9 @@ func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, defer inflatedLayerData.Close() if ds, ok := l.ls.(layer.DescribableStore); ok { - return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), foreignSrc) + return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), platform, foreignSrc) } - return l.ls.Register(inflatedLayerData, rootFS.ChainID()) + return l.ls.Register(inflatedLayerData, rootFS.ChainID(), platform) } func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID digest.Digest, outStream io.Writer) error { @@ -211,6 +224,10 @@ func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID digest.Diges } func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { + if runtime.GOOS == "windows" { + return errors.New("Windows does not support legacy loading of images") + } + legacyLoadedMap := make(map[string]image.ID) dirs, err := ioutil.ReadDir(tmpDir) @@ -278,11 +295,18 @@ func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[str return err } - var img struct{ Parent string } + var img struct { + OS string + Parent string + } if err := json.Unmarshal(imageJSON, &img); err != nil { return err } + if err := checkCompatibleOS(img.OS); err != nil { + return err + } + var parentID image.ID if img.Parent != "" { for { @@ -315,7 +339,7 @@ func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[str if err != nil { return err } - newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, distribution.Descriptor{}, progressOutput) + newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, "", distribution.Descriptor{}, progressOutput) if err != nil { return err } @@ -388,3 +412,20 @@ func checkValidParent(img, parent *image.Image) bool { } return true } + +func checkCompatibleOS(os string) error { + // TODO @jhowardmsft LCOW - revisit for simultaneous platforms + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + // always compatible if the OS matches; also match an empty OS + if os == platform || os == "" { + return nil + } + // for compatibility, only fail if the image or runtime OS is Windows + if os == "windows" || platform == "windows" { + return fmt.Errorf("cannot load %s image on %s", os, platform) + } + return nil +} diff --git a/fn/vendor/github.com/docker/docker/image/tarexport/save.go b/fn/vendor/github.com/docker/docker/image/tarexport/save.go index 8d33a5872..d304a54c3 100644 --- a/fn/vendor/github.com/docker/docker/image/tarexport/save.go +++ b/fn/vendor/github.com/docker/docker/image/tarexport/save.go @@ -21,8 +21,10 @@ import ( ) type imageDescriptor struct { - refs []reference.NamedTagged - layers []string + refs []reference.NamedTagged + layers []string + image *image.Image + layerRef layer.Layer } type saveSession struct { @@ -39,33 +41,47 @@ func (l *tarexporter) Save(names []string, outStream io.Writer) error { return err } + // Release all the image top layer references + defer l.releaseLayerReferences(images) return (&saveSession{tarexporter: l, images: images}).save(outStream) } -func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { +// parseNames will parse the image names to a map which contains image.ID to *imageDescriptor. +// Each imageDescriptor holds an image top layer reference named 'layerRef'. It is taken here, should be released later. +func (l *tarexporter) parseNames(names []string) (desc map[image.ID]*imageDescriptor, rErr error) { imgDescr := make(map[image.ID]*imageDescriptor) + defer func() { + if rErr != nil { + l.releaseLayerReferences(imgDescr) + } + }() - addAssoc := func(id image.ID, ref reference.Named) { + addAssoc := func(id image.ID, ref reference.Named) error { if _, ok := imgDescr[id]; !ok { - imgDescr[id] = &imageDescriptor{} + descr := &imageDescriptor{} + if err := l.takeLayerReference(id, descr); err != nil { + return err + } + imgDescr[id] = descr } if ref != nil { if _, ok := ref.(reference.Canonical); ok { - return + return nil } tagged, ok := reference.TagNameOnly(ref).(reference.NamedTagged) if !ok { - return + return nil } for _, t := range imgDescr[id].refs { if tagged.String() == t.String() { - return + return nil } } imgDescr[id].refs = append(imgDescr[id].refs, tagged) } + return nil } for _, name := range names { @@ -78,11 +94,9 @@ func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, // Check if digest ID reference if digested, ok := ref.(reference.Digested); ok { id := image.IDFromDigest(digested.Digest()) - _, err := l.is.Get(id) - if err != nil { + if err := addAssoc(id, nil); err != nil { return nil, err } - addAssoc(id, nil) continue } return nil, errors.Errorf("invalid reference: %v", name) @@ -93,20 +107,26 @@ func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, if err != nil { return nil, err } - addAssoc(imgID, nil) + if err := addAssoc(imgID, nil); err != nil { + return nil, err + } continue } if reference.IsNameOnly(namedRef) { assocs := l.rs.ReferencesByName(namedRef) for _, assoc := range assocs { - addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref) + if err := addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref); err != nil { + return nil, err + } } if len(assocs) == 0 { imgID, err := l.is.Search(name) if err != nil { return nil, err } - addAssoc(imgID, nil) + if err := addAssoc(imgID, nil); err != nil { + return nil, err + } } continue } @@ -114,12 +134,43 @@ func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, if err != nil { return nil, err } - addAssoc(image.IDFromDigest(id), namedRef) + if err := addAssoc(image.IDFromDigest(id), namedRef); err != nil { + return nil, err + } } return imgDescr, nil } +// takeLayerReference will take/Get the image top layer reference +func (l *tarexporter) takeLayerReference(id image.ID, imgDescr *imageDescriptor) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + imgDescr.image = img + topLayerID := img.RootFS.ChainID() + if topLayerID == "" { + return nil + } + layer, err := l.ls.Get(topLayerID) + if err != nil { + return err + } + imgDescr.layerRef = layer + return nil +} + +// releaseLayerReferences will release all the image top layer references +func (l *tarexporter) releaseLayerReferences(imgDescr map[image.ID]*imageDescriptor) error { + for _, descr := range imgDescr { + if descr.layerRef != nil { + l.ls.Release(descr.layerRef) + } + } + return nil +} + func (s *saveSession) save(outStream io.Writer) error { s.savedLayers = make(map[string]struct{}) s.diffIDPaths = make(map[layer.DiffID]string) @@ -224,11 +275,7 @@ func (s *saveSession) save(outStream io.Writer) error { } func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { - img, err := s.is.Get(id) - if err != nil { - return nil, err - } - + img := s.images[id].image if len(img.RootFS.DiffIDs) == 0 { return nil, fmt.Errorf("empty export - not implemented") } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/check_test.go b/fn/vendor/github.com/docker/docker/integration-cli/check_test.go index 3b797b463..cc3b80c94 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/check_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/check_test.go @@ -12,8 +12,9 @@ import ( "testing" "github.com/docker/docker/api/types/swarm" - cliconfig "github.com/docker/docker/cli/config" + "github.com/docker/docker/cli/config" "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/integration-cli/environment" "github.com/docker/docker/integration-cli/registry" @@ -36,7 +37,7 @@ var ( testEnv *environment.Execution // the docker client binary to use - dockerBinary = "docker" + dockerBinary = "" ) func init() { @@ -65,6 +66,7 @@ func TestMain(m *testing.M) { func Test(t *testing.T) { cli.EnsureTestEnvIsLoaded(t) + fakestorage.EnsureTestEnvIsLoaded(t) cmd := exec.Command(dockerBinary, "images", "-f", "dangling=false", "--format", "{{.Repository}}:{{.Tag}}") cmd.Env = appendBaseEnv(true) out, err := cmd.CombinedOutput() @@ -404,7 +406,7 @@ func (s *DockerTrustSuite) TearDownTest(c *check.C) { } // Remove trusted keys and metadata after test - os.RemoveAll(filepath.Join(cliconfig.Dir(), "trust")) + os.RemoveAll(filepath.Join(config.Dir(), "trust")) s.ds.TearDownTest(c) } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/cli/build/build.go b/fn/vendor/github.com/docker/docker/integration-cli/cli/build/build.go index 4a186252f..8ffaa35b4 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/cli/build/build.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/cli/build/build.go @@ -1,11 +1,30 @@ package build import ( + "io" "strings" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" icmd "github.com/docker/docker/pkg/testutil/cmd" ) +type testingT interface { + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// WithStdinContext sets the build context from the standard input with the specified reader +func WithStdinContext(closer io.ReadCloser) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, "-") + cmd.Stdin = closer + return func() { + // FIXME(vdemeester) we should not ignore the error here… + closer.Close() + } + } +} + // WithDockerfile creates / returns a CmdOperator to set the Dockerfile for a build operation func WithDockerfile(dockerfile string) func(*icmd.Cmd) func() { return func(cmd *icmd.Cmd) func() { @@ -28,3 +47,36 @@ func WithContextPath(path string) func(*icmd.Cmd) func() { return nil } } + +// WithExternalBuildContext use the specified context as build context +func WithExternalBuildContext(ctx *fakecontext.Fake) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Command = append(cmd.Command, ".") + return nil + } +} + +// WithBuildContext sets up the build context +func WithBuildContext(t testingT, contextOperators ...func(*fakecontext.Fake) error) func(*icmd.Cmd) func() { + // FIXME(vdemeester) de-duplicate that + ctx := fakecontext.New(t, "", contextOperators...) + return func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Command = append(cmd.Command, ".") + return closeBuildContext(t, ctx) + } +} + +// WithFile adds the specified file (with content) in the build context +func WithFile(name, content string) func(*fakecontext.Fake) error { + return fakecontext.WithFile(name, content) +} + +func closeBuildContext(t testingT, ctx *fakecontext.Fake) func() { + return func() { + if err := ctx.Close(); err != nil { + t.Fatal(err) + } + } +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakecontext/context.go b/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakecontext/context.go new file mode 100644 index 000000000..8ecf4e3c6 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakecontext/context.go @@ -0,0 +1,124 @@ +package fakecontext + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" +) + +type testingT interface { + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// New creates a fake build context +func New(t testingT, dir string, modifiers ...func(*Fake) error) *Fake { + fakeContext := &Fake{Dir: dir} + if dir == "" { + if err := newDir(fakeContext); err != nil { + t.Fatal(err) + } + } + + for _, modifier := range modifiers { + if err := modifier(fakeContext); err != nil { + t.Fatal(err) + } + } + + return fakeContext +} + +func newDir(fake *Fake) error { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return err + } + if err := os.Chmod(tmp, 0755); err != nil { + return err + } + fake.Dir = tmp + return nil +} + +// WithFile adds the specified file (with content) in the build context +func WithFile(name, content string) func(*Fake) error { + return func(ctx *Fake) error { + return ctx.Add(name, content) + } +} + +// WithDockerfile adds the specified content as Dockerfile in the build context +func WithDockerfile(content string) func(*Fake) error { + return WithFile("Dockerfile", content) +} + +// WithFiles adds the specified files in the build context, content is a string +func WithFiles(files map[string]string) func(*Fake) error { + return func(fakeContext *Fake) error { + for file, content := range files { + if err := fakeContext.Add(file, content); err != nil { + return err + } + } + return nil + } +} + +// WithBinaryFiles adds the specified files in the build context, content is binary +func WithBinaryFiles(files map[string]*bytes.Buffer) func(*Fake) error { + return func(fakeContext *Fake) error { + for file, content := range files { + if err := fakeContext.Add(file, string(content.Bytes())); err != nil { + return err + } + } + return nil + } +} + +// Fake creates directories that can be used as a build context +type Fake struct { + Dir string +} + +// Add a file at a path, creating directories where necessary +func (f *Fake) Add(file, content string) error { + return f.addFile(file, []byte(content)) +} + +func (f *Fake) addFile(file string, content []byte) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + dirpath := filepath.Dir(fp) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(fp, content, 0644) + +} + +// Delete a file at a path +func (f *Fake) Delete(file string) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + return os.RemoveAll(fp) +} + +// Close deletes the context +func (f *Fake) Close() error { + return os.RemoveAll(f.Dir) +} + +// AsTarReader returns a ReadCloser with the contents of Dir as a tar archive. +func (f *Fake) AsTarReader(t testingT) io.ReadCloser { + reader, err := archive.TarWithOptions(f.Dir, &archive.TarOptions{}) + if err != nil { + t.Fatalf("Failed to create tar from %s: %s", f.Dir, err) + } + return reader +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakegit/fakegit.go b/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakegit/fakegit.go new file mode 100644 index 000000000..74faffd92 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakegit/fakegit.go @@ -0,0 +1,125 @@ +package fakegit + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" +) + +type testingT interface { + logT + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +type gitServer interface { + URL() string + Close() error +} + +type localGitServer struct { + *httptest.Server +} + +func (r *localGitServer) Close() error { + r.Server.Close() + return nil +} + +func (r *localGitServer) URL() string { + return r.Server.URL +} + +// FakeGit is a fake git server +type FakeGit struct { + root string + server gitServer + RepoURL string +} + +// Close closes the server, implements Closer interface +func (g *FakeGit) Close() { + g.server.Close() + os.RemoveAll(g.root) +} + +// New create a fake git server that can be used for git related tests +func New(c testingT, name string, files map[string]string, enforceLocalServer bool) *FakeGit { + ctx := fakecontext.New(c, "", fakecontext.WithFiles(files)) + defer ctx.Close() + curdir, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(curdir) + + if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { + c.Fatalf("error trying to init repo: %s (%s)", err, output) + } + err = os.Chdir(ctx.Dir) + if err != nil { + c.Fatal(err) + } + if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { + c.Fatalf("error trying to set 'user.name': %s (%s)", err, output) + } + if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { + c.Fatalf("error trying to set 'user.email': %s (%s)", err, output) + } + if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { + c.Fatalf("error trying to add files to repo: %s (%s)", err, output) + } + if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { + c.Fatalf("error trying to commit to repo: %s (%s)", err, output) + } + + root, err := ioutil.TempDir("", "docker-test-git-repo") + if err != nil { + c.Fatal(err) + } + repoPath := filepath.Join(root, name+".git") + if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { + os.RemoveAll(root) + c.Fatalf("error trying to clone --bare: %s (%s)", err, output) + } + err = os.Chdir(repoPath) + if err != nil { + os.RemoveAll(root) + c.Fatal(err) + } + if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { + os.RemoveAll(root) + c.Fatalf("error trying to git update-server-info: %s (%s)", err, output) + } + err = os.Chdir(curdir) + if err != nil { + os.RemoveAll(root) + c.Fatal(err) + } + + var server gitServer + if !enforceLocalServer { + // use fakeStorage server, which might be local or remote (at test daemon) + server = fakestorage.New(c, root) + } else { + // always start a local http server on CLI test machine + httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) + server = &localGitServer{httpServer} + } + return &FakeGit{ + root: root, + server: server, + RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), + } +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/fixtures_test.go b/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/fixtures.go similarity index 88% rename from fn/vendor/github.com/docker/docker/integration-cli/fixtures_test.go rename to fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/fixtures.go index 9683e4a87..f6a63dcf0 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/fixtures_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/fixtures.go @@ -1,4 +1,4 @@ -package main +package fakestorage import ( "io/ioutil" @@ -6,6 +6,8 @@ import ( "os/exec" "path/filepath" "sync" + + "github.com/docker/docker/integration-cli/cli" ) var ensureHTTPServerOnce sync.Once @@ -61,7 +63,5 @@ func ensureHTTPServerImage(t testingT) { t.Fatalf("could not build http server: %v", string(out)) } - if out, err = exec.Command(dockerBinary, "build", "-q", "-t", "httpserver", tmp).CombinedOutput(); err != nil { - t.Fatalf("could not build http server: %v", string(out)) - } + cli.DockerCmd(t, "build", "-q", "-t", "httpserver", tmp) } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/storage.go b/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/storage.go new file mode 100644 index 000000000..49f47e436 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/storage.go @@ -0,0 +1,176 @@ +package fakestorage + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "sync" + + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/environment" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/stringutils" +) + +var ( + testEnv *environment.Execution + onlyOnce sync.Once +) + +// EnsureTestEnvIsLoaded make sure the test environment is loaded for this package +func EnsureTestEnvIsLoaded(t testingT) { + var doIt bool + var err error + onlyOnce.Do(func() { + doIt = true + }) + + if !doIt { + return + } + testEnv, err = environment.New() + if err != nil { + t.Fatalf("error loading testenv : %v", err) + } +} + +type testingT interface { + logT + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +// Fake is a static file server. It might be running locally or remotely +// on test host. +type Fake interface { + Close() error + URL() string + CtxDir() string +} + +// New returns a static file server that will be use as build context. +func New(t testingT, dir string, modifiers ...func(*fakecontext.Fake) error) Fake { + ctx := fakecontext.New(t, dir, modifiers...) + if testEnv.LocalDaemon() { + return newLocalFakeStorage(t, ctx) + } + return newRemoteFileServer(t, ctx) +} + +// localFileStorage is a file storage on the running machine +type localFileStorage struct { + *fakecontext.Fake + *httptest.Server +} + +func (s *localFileStorage) URL() string { + return s.Server.URL +} + +func (s *localFileStorage) CtxDir() string { + return s.Fake.Dir +} + +func (s *localFileStorage) Close() error { + defer s.Server.Close() + return s.Fake.Close() +} + +func newLocalFakeStorage(t testingT, ctx *fakecontext.Fake) *localFileStorage { + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &localFileStorage{ + Fake: ctx, + Server: server, + } +} + +// remoteFileServer is a containerized static file server started on the remote +// testing machine to be used in URL-accepting docker build functionality. +type remoteFileServer struct { + host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 + container string + image string + ctx *fakecontext.Fake +} + +func (f *remoteFileServer) URL() string { + u := url.URL{ + Scheme: "http", + Host: f.host} + return u.String() +} + +func (f *remoteFileServer) CtxDir() string { + return f.ctx.Dir +} + +func (f *remoteFileServer) Close() error { + defer func() { + if f.ctx != nil { + f.ctx.Close() + } + if f.image != "" { + if err := cli.Docker(cli.Args("rmi", "-f", f.image)).Error; err != nil { + fmt.Fprintf(os.Stderr, "Error closing remote file server : %v\n", err) + } + } + }() + if f.container == "" { + return nil + } + return cli.Docker(cli.Args("rm", "-fv", f.container)).Error +} + +func newRemoteFileServer(t testingT, ctx *fakecontext.Fake) *remoteFileServer { + var ( + image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + ) + + ensureHTTPServerImage(t) + + // Build the image + if err := ctx.Add("Dockerfile", `FROM httpserver +COPY . /static`); err != nil { + t.Fatal(err) + } + cli.BuildCmd(t, image, build.WithoutCache, build.WithExternalBuildContext(ctx)) + + // Start the container + cli.DockerCmd(t, "run", "-d", "-P", "--name", container, image) + + // Find out the system assigned port + out := cli.DockerCmd(t, "port", container, "80/tcp").Combined() + fileserverHostPort := strings.Trim(out, "\n") + _, port, err := net.SplitHostPort(fileserverHostPort) + if err != nil { + t.Fatalf("unable to parse file server host:port: %v", err) + } + + dockerHostURL, err := url.Parse(request.DaemonHost()) + if err != nil { + t.Fatalf("unable to parse daemon host URL: %v", err) + } + + host, _, err := net.SplitHostPort(dockerHostURL.Host) + if err != nil { + t.Fatalf("unable to parse docker daemon host:port: %v", err) + } + + return &remoteFileServer{ + container: container, + image: image, + host: fmt.Sprintf("%s:%s", host, port), + ctx: ctx} +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/cli/cli.go b/fn/vendor/github.com/docker/docker/integration-cli/cli/cli.go index b1f22dbfc..d8355217e 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/cli/cli.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/cli/cli.go @@ -39,6 +39,7 @@ func EnsureTestEnvIsLoaded(t testingT) { type CmdOperator func(*icmd.Cmd) func() type testingT interface { + Fatal(args ...interface{}) Fatalf(string, ...interface{}) } @@ -57,6 +58,58 @@ func InspectCmd(t testingT, name string, cmdOperators ...CmdOperator) *icmd.Resu return Docker(Inspect(name), cmdOperators...).Assert(t, icmd.Success) } +// WaitRun will wait for the specified container to be running, maximum 5 seconds. +func WaitRun(t testingT, name string, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.State.Running}}", "true", 5*time.Second, cmdOperators...) +} + +// WaitExited will wait for the specified container to state exit, subject +// to a maximum time limit in seconds supplied by the caller +func WaitExited(t testingT, name string, timeout time.Duration, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.State.Status}}", "exited", timeout, cmdOperators...) +} + +// WaitRestart will wait for the specified container to restart once +func WaitRestart(t testingT, name string, timeout time.Duration, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.RestartCount}}", "1", timeout, cmdOperators...) +} + +// WaitForInspectResult waits for the specified expression to be equals to the specified expected string in the given time. +func WaitForInspectResult(t testingT, name, expr, expected string, timeout time.Duration, cmdOperators ...CmdOperator) { + after := time.After(timeout) + + args := []string{"inspect", "-f", expr, name} + for { + result := Docker(Args(args...), cmdOperators...) + if result.Error != nil { + if !strings.Contains(strings.ToLower(result.Stderr()), "no such") { + t.Fatalf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + t.Fatal(result.Error) + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + t.Fatalf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout) + default: + } + + time.Sleep(100 * time.Millisecond) + } +} + // Docker executes the specified docker command func Docker(cmd icmd.Cmd, cmdOperators ...CmdOperator) *icmd.Result { for _, op := range cmdOperators { diff --git a/fn/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go b/fn/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go index 3546c378a..8b086c942 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go @@ -758,7 +758,7 @@ func (d *Daemon) ReloadConfig() error { } // WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time. -// FIXME(vdemeester) Attach this to the Daemon struct +// Deprecated: use cli.WaitCmd instead func WaitInspectWithArgs(dockerBinary, name, expr, expected string, timeout time.Duration, arg ...string) error { after := time.After(timeout) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go b/fn/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go index 48ceae047..ba414066c 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go @@ -1,6 +1,7 @@ package daemon import ( + "context" "encoding/json" "fmt" "net/http" @@ -10,6 +11,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" "github.com/pkg/errors" @@ -118,23 +120,35 @@ type NodeConstructor func(*swarm.Node) // SecretConstructor defines a swarm secret constructor type SecretConstructor func(*swarm.Secret) +// ConfigConstructor defines a swarm config constructor +type ConfigConstructor func(*swarm.Config) + // SpecConstructor defines a swarm spec constructor type SpecConstructor func(*swarm.Spec) -// CreateService creates a swarm service given the specified service constructor -func (d *Swarm) CreateService(c *check.C, f ...ServiceConstructor) string { +// CreateServiceWithOptions creates a swarm service given the specified service constructors +// and auth config +func (d *Swarm) CreateServiceWithOptions(c *check.C, opts types.ServiceCreateOptions, f ...ServiceConstructor) string { + cl, err := client.NewClient(d.Sock(), "", nil, nil) + c.Assert(err, checker.IsNil, check.Commentf("failed to create client")) + defer cl.Close() + var service swarm.Service for _, fn := range f { fn(&service) } - status, out, err := d.SockRequest("POST", "/services/create", service.Spec) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() - var scr types.ServiceCreateResponse - c.Assert(json.Unmarshal(out, &scr), checker.IsNil) - return scr.ID + res, err := cl.ServiceCreate(ctx, service.Spec, opts) + c.Assert(err, checker.IsNil) + return res.ID +} + +// CreateService creates a swarm service given the specified service constructor +func (d *Swarm) CreateService(c *check.C, f ...ServiceConstructor) string { + return d.CreateServiceWithOptions(c, types.ServiceCreateOptions{}, f...) } // GetService returns the swarm service corresponding to the specified id @@ -197,6 +211,37 @@ func (d *Swarm) CheckServiceUpdateState(service string) func(*check.C) (interfac } } +// CheckPluginRunning returns the runtime state of the plugin +func (d *Swarm) CheckPluginRunning(plugin string) func(c *check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + status, out, err := d.SockRequest("GET", "/plugins/"+plugin+"/json", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + if status != http.StatusOK { + return false, nil + } + + var p types.Plugin + c.Assert(json.Unmarshal(out, &p), checker.IsNil, check.Commentf(string(out))) + + return p.Enabled, check.Commentf("%+v", p) + } +} + +// CheckPluginImage returns the runtime state of the plugin +func (d *Swarm) CheckPluginImage(plugin string) func(c *check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + status, out, err := d.SockRequest("GET", "/plugins/"+plugin+"/json", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + if status != http.StatusOK { + return false, nil + } + + var p types.Plugin + c.Assert(json.Unmarshal(out, &p), checker.IsNil, check.Commentf(string(out))) + return p.PluginReference, check.Commentf("%+v", p) + } +} + // CheckServiceTasks returns the number of tasks for the specified service func (d *Swarm) CheckServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { return func(c *check.C) (interface{}, check.CommentInterface) { @@ -244,7 +289,7 @@ func (d *Swarm) CheckRunningTaskImages(c *check.C) (interface{}, check.CommentIn result := make(map[string]int) for _, task := range tasks { - if task.Status.State == swarm.TaskStateRunning { + if task.Status.State == swarm.TaskStateRunning && task.Spec.ContainerSpec != nil { result[task.Spec.ContainerSpec.Image]++ } } @@ -409,6 +454,59 @@ func (d *Swarm) UpdateSecret(c *check.C, id string, f ...SecretConstructor) { c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) } +// CreateConfig creates a config given the specified spec +func (d *Swarm) CreateConfig(c *check.C, configSpec swarm.ConfigSpec) string { + status, out, err := d.SockRequest("POST", "/configs/create", configSpec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.ConfigCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +// ListConfigs returns the list of the current swarm configs +func (d *Swarm) ListConfigs(c *check.C) []swarm.Config { + status, out, err := d.SockRequest("GET", "/configs", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + configs := []swarm.Config{} + c.Assert(json.Unmarshal(out, &configs), checker.IsNil) + return configs +} + +// GetConfig returns a swarm config identified by the specified id +func (d *Swarm) GetConfig(c *check.C, id string) *swarm.Config { + var config swarm.Config + status, out, err := d.SockRequest("GET", "/configs/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &config), checker.IsNil) + return &config +} + +// DeleteConfig removes the swarm config identified by the specified id +func (d *Swarm) DeleteConfig(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/configs/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) +} + +// UpdateConfig updates the swarm config identified by the specified id +// Currently, only label update is supported. +func (d *Swarm) UpdateConfig(c *check.C, id string, f ...ConfigConstructor) { + config := d.GetConfig(c, id) + for _, fn := range f { + fn(config) + } + url := fmt.Sprintf("/configs/%s/update?version=%d", config.ID, config.Version.Index) + status, out, err := d.SockRequest("POST", url, config.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + // GetSwarm returns the current swarm object func (d *Swarm) GetSwarm(c *check.C) swarm.Swarm { var sw swarm.Swarm diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go index 88dba3e97..11f7340c1 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go @@ -74,7 +74,7 @@ func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { // regression gh14320 func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { - client, err := request.NewClient(daemonHost()) + client, err := request.NewHTTPClient(daemonHost()) c.Assert(err, checker.IsNil) req, err := request.New(daemonHost(), "/containers/doesnotexist/attach", request.Method(http.MethodPost)) resp, err := client.Do(req) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go index 0a1cd70dd..c1ab7661e 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go @@ -3,15 +3,28 @@ package main import ( "archive/tar" "bytes" + "encoding/json" + "fmt" + "io" "io/ioutil" "net/http" "regexp" "strings" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client/session" + "github.com/docker/docker/client/session/filesync" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/cli/build/fakegit" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" "github.com/docker/docker/integration-cli/request" "github.com/docker/docker/pkg/testutil" "github.com/go-check/check" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" ) func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) { @@ -19,17 +32,15 @@ func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) { var testD string if testEnv.DaemonPlatform() == "windows" { testD = `FROM busybox -COPY * /tmp/ RUN find / -name ba* RUN find /tmp/` } else { // -xdev is required because sysfs can cause EPERM testD = `FROM busybox -COPY * /tmp/ RUN find / -xdev -name ba* RUN find /tmp/` } - server := fakeStorage(c, map[string]string{"testD": testD}) + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"testD": testD})) defer server.Close() res, body, err := request.Post("/build?dockerfile=baz&remote="+server.URL()+"/testD", request.JSON) @@ -42,7 +53,7 @@ RUN find /tmp/` // Make sure Dockerfile exists. // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL out := string(buf) - c.Assert(out, checker.Contains, "/tmp/Dockerfile") + c.Assert(out, checker.Contains, "RUN find /tmp") c.Assert(out, checker.Not(checker.Contains), "baz") } @@ -66,9 +77,9 @@ func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *check.C) { // failed to close tar archive c.Assert(tw.Close(), checker.IsNil) - server := fakeBinaryStorage(c, map[string]*bytes.Buffer{ + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ "testT.tar": buffer, - }) + })) defer server.Close() res, b, err := request.Post("/build?remote="+server.URL()+"/testT.tar", request.ContentType("application/tar")) @@ -113,9 +124,9 @@ RUN echo 'right' // failed to close tar archive c.Assert(tw.Close(), checker.IsNil) - server := fakeBinaryStorage(c, map[string]*bytes.Buffer{ + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ "testT.tar": buffer, - }) + })) defer server.Close() url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar" @@ -132,7 +143,7 @@ RUN echo 'right' } func (s *DockerSuite) TestBuildAPILowerDockerfile(c *check.C) { - git := newFakeGit(c, "repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "dockerfile": `FROM busybox RUN echo from dockerfile`, }, false) @@ -150,7 +161,7 @@ RUN echo from dockerfile`, } func (s *DockerSuite) TestBuildAPIBuildGitWithF(c *check.C) { - git := newFakeGit(c, "repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "baz": `FROM busybox RUN echo from baz`, "Dockerfile": `FROM busybox @@ -172,7 +183,7 @@ RUN echo from Dockerfile`, func (s *DockerSuite) TestBuildAPIDoubleDockerfile(c *check.C) { testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows - git := newFakeGit(c, "repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "Dockerfile": `FROM busybox RUN echo from Dockerfile`, "dockerfile": `FROM busybox @@ -248,3 +259,277 @@ func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) { c.Assert(imageA, checker.Not(checker.Equals), imageB) } + +func (s *DockerSuite) TestBuildOnBuildWithCopy(c *check.C) { + dockerfile := ` + FROM ` + minimalBaseImage() + ` as onbuildbase + ONBUILD COPY file /file + + FROM onbuildbase + ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFile("file", "some content"), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(out), checker.Contains, "Successfully built") +} + +func (s *DockerSuite) TestBuildOnBuildCache(c *check.C) { + build := func(dockerfile string) []byte { + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") + return out + } + + dockerfile := ` + FROM ` + minimalBaseImage() + ` as onbuildbase + ENV something=bar + ONBUILD ENV foo=bar + ` + build(dockerfile) + + dockerfile += "FROM onbuildbase" + out := build(dockerfile) + + imageIDs := getImageIDsFromBuild(c, out) + assert.Len(c, imageIDs, 2) + parentID, childID := imageIDs[0], imageIDs[1] + + client, err := request.NewClient() + require.NoError(c, err) + + // check parentID is correct + image, _, err := client.ImageInspectWithRaw(context.Background(), childID) + require.NoError(c, err) + assert.Equal(c, parentID, image.Parent) +} + +func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *check.C) { + client, err := request.NewClient() + require.NoError(c, err) + + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + err = client.ImageTag(context.TODO(), "busybox", repoName) + assert.Nil(c, err) + // push the image to the registry + rc, err := client.ImagePush(context.TODO(), repoName, types.ImagePushOptions{RegistryAuth: "{}"}) + assert.Nil(c, err) + _, err = io.Copy(ioutil.Discard, rc) + assert.Nil(c, err) + + dockerfile := fmt.Sprintf(` + FROM %s AS foo + RUN touch abc + FROM %s + COPY --from=foo /abc / + `, repoName, repoName) + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build?pull=1", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") +} + +func (s *DockerSuite) TestBuildAddRemoteNoDecompress(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + dt := []byte("contents") + err := tw.WriteHeader(&tar.Header{ + Name: "foo", + Size: int64(len(dt)), + Mode: 0600, + Typeflag: tar.TypeReg, + }) + require.NoError(c, err) + _, err = tw.Write(dt) + require.NoError(c, err) + err = tw.Close() + require.NoError(c, err) + + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ + "test.tar": buffer, + })) + defer server.Close() + + dockerfile := fmt.Sprintf(` + FROM busybox + ADD %s/test.tar / + RUN [ -f test.tar ] + `, server.URL()) + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") +} + +func (s *DockerSuite) TestBuildWithSession(c *check.C) { + testRequires(c, ExperimentalDaemon) + + dockerfile := ` + FROM busybox + COPY file / + RUN cat /file + ` + + fctx := fakecontext.New(c, "", + fakecontext.WithFile("file", "some content"), + ) + defer fctx.Close() + + out := testBuildWithSession(c, fctx.Dir, dockerfile) + assert.Contains(c, out, "some content") + + fctx.Add("second", "contentcontent") + + dockerfile += ` + COPY second / + RUN cat /second + ` + + out = testBuildWithSession(c, fctx.Dir, dockerfile) + assert.Equal(c, strings.Count(out, "Using cache"), 2) + assert.Contains(c, out, "contentcontent") + + client, err := request.NewClient() + require.NoError(c, err) + + du, err := client.DiskUsage(context.TODO()) + assert.Nil(c, err) + assert.True(c, du.BuilderSize > 10) + + out = testBuildWithSession(c, fctx.Dir, dockerfile) + assert.Equal(c, strings.Count(out, "Using cache"), 4) + + du2, err := client.DiskUsage(context.TODO()) + assert.Nil(c, err) + assert.Equal(c, du.BuilderSize, du2.BuilderSize) + + // rebuild with regular tar, confirm cache still applies + fctx.Add("Dockerfile", dockerfile) + res, body, err := request.Post( + "/build", + request.RawContent(fctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + outBytes, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(outBytes), "Successfully built") + assert.Equal(c, strings.Count(string(outBytes), "Using cache"), 4) + + _, err = client.BuildCachePrune(context.TODO()) + assert.Nil(c, err) + + du, err = client.DiskUsage(context.TODO()) + assert.Nil(c, err) + assert.Equal(c, du.BuilderSize, int64(0)) +} + +func testBuildWithSession(c *check.C, dir, dockerfile string) (outStr string) { + client, err := request.NewClient() + require.NoError(c, err) + + sess, err := session.NewSession("foo1", "foo") + assert.Nil(c, err) + + fsProvider := filesync.NewFSSyncProvider(dir, nil) + sess.Allow(fsProvider) + + g, ctx := errgroup.WithContext(context.Background()) + + g.Go(func() error { + return sess.Run(ctx, client.DialSession) + }) + + g.Go(func() error { + res, body, err := request.Post("/build?remote=client-session&session="+sess.UUID(), func(req *http.Request) error { + req.Body = ioutil.NopCloser(strings.NewReader(dockerfile)) + return nil + }) + if err != nil { + return err + } + assert.Equal(c, res.StatusCode, http.StatusOK) + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") + sess.Close() + outStr = string(out) + return nil + }) + + err = g.Wait() + assert.Nil(c, err) + return +} + +type buildLine struct { + Stream string + Aux struct { + ID string + } +} + +func getImageIDsFromBuild(c *check.C, output []byte) []string { + ids := []string{} + for _, line := range bytes.Split(output, []byte("\n")) { + if len(line) == 0 { + continue + } + entry := buildLine{} + require.NoError(c, json.Unmarshal(line, &entry)) + if entry.Aux.ID != "" { + ids = append(ids, entry.Aux.ID) + } + } + return ids +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go index 104a0788c..25c724425 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go @@ -21,6 +21,7 @@ import ( mounttypes "github.com/docker/docker/api/types/mount" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" "github.com/docker/docker/integration-cli/request" "github.com/docker/docker/pkg/ioutils" @@ -211,7 +212,7 @@ func (s *DockerSuite) TestGetContainerStats(c *check.C) { } func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) buf := &testutil.ChannelBuffer{C: make(chan []byte, 1)} @@ -347,25 +348,29 @@ func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { func (s *DockerSuite) TestContainerAPIPause(c *check.C) { // Problematic on Windows as Windows does not support pause testRequires(c, DaemonIsLinux) - defer unpauseAllContainers(c) - out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "30") + + getPaused := func(c *check.C) []string { + return strings.Fields(cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined()) + } + + out := cli.DockerCmd(c, "run", "-d", "busybox", "sleep", "30").Combined() ContainerID := strings.TrimSpace(out) - status, _, err := request.SockRequest("POST", "/containers/"+ContainerID+"/pause", nil, daemonHost()) + resp, _, err := request.Post("/containers/" + ContainerID + "/pause") c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNoContent) - pausedContainers := getPausedContainers(c) + pausedContainers := getPaused(c) if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) } - status, _, err = request.SockRequest("POST", "/containers/"+ContainerID+"/unpause", nil, daemonHost()) + resp, _, err = request.Post("/containers/" + ContainerID + "/unpause") c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNoContent) - pausedContainers = getPausedContainers(c) + pausedContainers = getPaused(c) c.Assert(pausedContainers, checker.HasLen, 0, check.Commentf("There should be no paused container.")) } @@ -396,7 +401,7 @@ func (s *DockerSuite) TestContainerAPITop(c *check.C) { func (s *DockerSuite) TestContainerAPITopWindows(c *check.C) { testRequires(c, DaemonIsWindows) - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") id := strings.TrimSpace(string(out)) c.Assert(waitRun(id), checker.IsNil) @@ -887,7 +892,7 @@ func (s *DockerSuite) TestContainerAPIRestart(c *check.C) { func (s *DockerSuite) TestContainerAPIRestartNotimeoutParam(c *check.C) { name := "test-api-restart-no-timeout-param" - out, _ := runSleepingContainer(c, "-di", "--name", name) + out := runSleepingContainer(c, "-di", "--name", name) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) @@ -1042,7 +1047,7 @@ func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *check.C) { } func (s *DockerSuite) TestContainerAPIDelete(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) @@ -1062,7 +1067,7 @@ func (s *DockerSuite) TestContainerAPIDeleteNotExist(c *check.C) { } func (s *DockerSuite) TestContainerAPIDeleteForce(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) @@ -1097,7 +1102,7 @@ func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) { } func (s *DockerSuite) TestContainerAPIDeleteConflict(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) @@ -1115,7 +1120,7 @@ func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *check.C) { vol = `c:\testvolume` } - out, _ := runSleepingContainer(c, "-v", vol) + out := runSleepingContainer(c, "-v", vol) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) @@ -1153,7 +1158,7 @@ func (s *DockerSuite) TestContainerAPIChunkedEncoding(c *check.C) { } func (s *DockerSuite) TestContainerAPIPostContainerStop(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) containerID := strings.TrimSpace(out) c.Assert(waitRun(containerID), checker.IsNil) @@ -1262,7 +1267,6 @@ func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs( readOnly: true, volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 }) - defer deleteContainer(cID) // Attempt to extract to a symlink in the volume which points to a // directory outside the volume. This should cause an error because the @@ -1444,7 +1448,7 @@ func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted( var containerJSON types.ContainerJSON c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) - c.Assert(*containerJSON.HostConfig.MemorySwappiness, check.Equals, int64(-1)) + c.Assert(containerJSON.HostConfig.MemorySwappiness, check.IsNil) } // check validation is done daemon side and not only in cli @@ -1929,3 +1933,18 @@ func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) { } } } + +// Regression test for #33334 +// Makes sure that when a container which has a custom stop signal + restart=always +// gets killed (with SIGKILL) by the kill API, that the restart policy is cancelled. +func (s *DockerSuite) TestContainerKillCustomStopSignal(c *check.C) { + id := strings.TrimSpace(runSleepingContainer(c, "--stop-signal=SIGTERM", "--restart=always")) + res, _, err := request.Post("/containers/" + id + "/kill") + c.Assert(err, checker.IsNil) + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent, check.Commentf(string(b))) + err = waitInspect(id, "{{.State.Running}} {{.State.Restarting}}", "false false", 30*time.Second) + c.Assert(err, checker.IsNil) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go index 7328f4d06..e404b6cf5 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go @@ -1,9 +1,11 @@ package main import ( + "fmt" "net/http" "time" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/request" "github.com/go-check/check" @@ -91,8 +93,8 @@ func (s *DockerSuite) TestAPICreateWithInvalidHealthcheckParams(c *check.C) { config := map[string]interface{}{ "Image": "busybox", "Healthcheck": map[string]interface{}{ - "Interval": time.Duration(-10000000), - "Timeout": time.Duration(1000000000), + "Interval": -10 * time.Millisecond, + "Timeout": time.Second, "Retries": int(1000), }, } @@ -100,39 +102,38 @@ func (s *DockerSuite) TestAPICreateWithInvalidHealthcheckParams(c *check.C) { status, body, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusInternalServerError) - expected := "Interval in Healthcheck cannot be less than one second" + expected := fmt.Sprintf("Interval in Healthcheck cannot be less than %s", container.MinimumDuration) c.Assert(getErrorMessage(c, body), checker.Contains, expected) - // test invalid Interval in Healthcheck: larger than 0s but less than 1s + // test invalid Interval in Healthcheck: larger than 0s but less than 1ms name = "test2" config = map[string]interface{}{ "Image": "busybox", "Healthcheck": map[string]interface{}{ - "Interval": time.Duration(500000000), - "Timeout": time.Duration(1000000000), + "Interval": 500 * time.Microsecond, + "Timeout": time.Second, "Retries": int(1000), }, } status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusInternalServerError) - expected = "Interval in Healthcheck cannot be less than one second" c.Assert(getErrorMessage(c, body), checker.Contains, expected) - // test invalid Timeout in Healthcheck: less than 1s + // test invalid Timeout in Healthcheck: less than 1ms name = "test3" config = map[string]interface{}{ "Image": "busybox", "Healthcheck": map[string]interface{}{ - "Interval": time.Duration(1000000000), - "Timeout": time.Duration(-100000000), + "Interval": time.Second, + "Timeout": -100 * time.Millisecond, "Retries": int(1000), }, } status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusInternalServerError) - expected = "Timeout in Healthcheck cannot be less than one second" + expected = fmt.Sprintf("Timeout in Healthcheck cannot be less than %s", container.MinimumDuration) c.Assert(getErrorMessage(c, body), checker.Contains, expected) // test invalid Retries in Healthcheck: less than 0 @@ -140,8 +141,8 @@ func (s *DockerSuite) TestAPICreateWithInvalidHealthcheckParams(c *check.C) { config = map[string]interface{}{ "Image": "busybox", "Healthcheck": map[string]interface{}{ - "Interval": time.Duration(1000000000), - "Timeout": time.Duration(1000000000), + "Interval": time.Second, + "Timeout": time.Second, "Retries": int(-10), }, } @@ -150,4 +151,21 @@ func (s *DockerSuite) TestAPICreateWithInvalidHealthcheckParams(c *check.C) { c.Assert(status, check.Equals, http.StatusInternalServerError) expected = "Retries in Healthcheck cannot be negative" c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + // test invalid StartPeriod in Healthcheck: not 0 and less than 1ms + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": time.Second, + "Timeout": time.Second, + "Retries": int(1000), + "StartPeriod": 100 * time.Microsecond, + }, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = fmt.Sprintf("StartPeriod in Healthcheck cannot be less than %s", container.MinimumDuration) + c.Assert(getErrorMessage(c, body), checker.Contains, expected) } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go index 0f18bf8bd..a95422f58 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go @@ -40,7 +40,7 @@ func (s *DockerSuite) TestEventsAPIBackwardsCompatible(c *check.C) { since := daemonTime(c).Unix() ts := strconv.FormatInt(since, 10) - out, _ := runSleepingContainer(c, "--name=foo", "-d") + out := runSleepingContainer(c, "--name=foo", "-d") containerID := strings.TrimSpace(out) c.Assert(waitRun(containerID), checker.IsNil) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go index f27c9a238..d44b307fa 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go @@ -3,6 +3,7 @@ package main import ( "encoding/json" "net/http" + "net/http/httptest" "net/url" "strings" @@ -120,13 +121,16 @@ func (s *DockerSuite) TestAPIImagesHistory(c *check.C) { func (s *DockerSuite) TestAPIImagesImportBadSrc(c *check.C) { testRequires(c, Network) + server := httptest.NewServer(http.NewServeMux()) + defer server.Close() + tt := []struct { statusExp int fromSrc string }{ - {http.StatusNotFound, "http://example.com/nofile.tar"}, - {http.StatusNotFound, "example.com/nofile.tar"}, - {http.StatusNotFound, "example.com%2Fdata%2Ffile.tar"}, + {http.StatusNotFound, server.URL + "/nofile.tar"}, + {http.StatusNotFound, strings.TrimPrefix(server.URL, "http://") + "/nofile.tar"}, + {http.StatusNotFound, strings.TrimPrefix(server.URL, "http://") + "%2Fdata%2Ffile.tar"}, {http.StatusInternalServerError, "%2Fdata%2Ffile.tar"}, } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go index 57dac36e1..9cb873d60 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go @@ -3,8 +3,11 @@ package main import ( "net/http" + "encoding/json" + "github.com/docker/docker/api/types" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" "github.com/go-check/check" ) @@ -40,6 +43,25 @@ func (s *DockerSuite) TestInfoAPI(c *check.C) { } } +// TestInfoAPIRuncCommit tests that dockerd is able to obtain RunC version +// information, and that the version matches the expected version +func (s *DockerSuite) TestInfoAPIRuncCommit(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not have RunC version information + + res, body, err := request.Get("/v1.30/info") + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + var i types.Info + + c.Assert(json.Unmarshal(b, &i), checker.IsNil) + c.Assert(i.RuncCommit.ID, checker.Not(checker.Equals), "N/A") + c.Assert(i.RuncCommit.ID, checker.Equals, i.RuncCommit.Expected) +} + func (s *DockerSuite) TestInfoAPIVersioned(c *check.C) { testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later endpoint := "/v1.20/info" diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go index 535b210a3..5e953b79d 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go @@ -19,34 +19,31 @@ func (s *DockerSuite) TestLogsAPIWithStdout(c *check.C) { type logOut struct { out string - res *http.Response err error } + chLog := make(chan logOut) + res, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id)) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) go func() { - res, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id)) - if err != nil { - chLog <- logOut{"", nil, err} - return - } defer body.Close() out, err := bufio.NewReader(body).ReadString('\n') if err != nil { - chLog <- logOut{"", nil, err} + chLog <- logOut{"", err} return } - chLog <- logOut{strings.TrimSpace(out), res, err} + chLog <- logOut{strings.TrimSpace(out), err} }() select { case l := <-chLog: c.Assert(l.err, checker.IsNil) - c.Assert(l.res.StatusCode, checker.Equals, http.StatusOK) if !strings.HasSuffix(l.out, "hello") { c.Fatalf("expected log output to container 'hello', but it does not") } - case <-time.After(20 * time.Second): + case <-time.After(30 * time.Second): c.Fatal("timeout waiting for logs to exit") } } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go index 682f315a5..4a07fc737 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go @@ -10,7 +10,7 @@ import ( ) func (s *DockerSuite) TestResizeAPIResponse(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" @@ -20,7 +20,7 @@ func (s *DockerSuite) TestResizeAPIResponse(c *check.C) { } func (s *DockerSuite) TestResizeAPIHeightWidthNoInt(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_session_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_session_test.go new file mode 100644 index 000000000..e1ad880ea --- /dev/null +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_session_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestSessionCreate(c *check.C) { + testRequires(c, ExperimentalDaemon) + + res, body, err := request.Post("/session", func(r *http.Request) error { + r.Header.Set("X-Docker-Expose-Session-Uuid", "testsessioncreate") // so we don't block default name if something else is using it + r.Header.Set("Upgrade", "h2c") + return nil + }) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusSwitchingProtocols) + c.Assert(res.Header.Get("Upgrade"), checker.Equals, "h2c") + c.Assert(body.Close(), checker.IsNil) +} + +func (s *DockerSuite) TestSessionCreateWithBadUpgrade(c *check.C) { + testRequires(c, ExperimentalDaemon) + + res, body, err := request.Post("/session") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "no upgrade") + + res, body, err = request.Post("/session", func(r *http.Request) error { + r.Header.Set("Upgrade", "foo") + return nil + }) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + buf, err = testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out = string(buf) + c.Assert(out, checker.Contains, "not supported") +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go index f4f9ab5cb..f1cb5bb4a 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go @@ -21,11 +21,10 @@ import ( var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors tx_packets", " ") func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;do echo 'Hello'; usleep 100000; done") + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;usleep 100; do echo 'Hello'; done") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) - resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id)) c.Assert(err, checker.IsNil) c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) @@ -98,7 +97,7 @@ func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) { func (s *DockerSuite) TestAPIStatsNetworkStats(c *check.C) { testRequires(c, SameHostDaemon) - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) @@ -166,7 +165,7 @@ func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *check.C) { // Windows doesn't support API versions less than 1.25, so no point testing 1.17 .. 1.21 testRequires(c, SameHostDaemon, DaemonIsLinux) - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) wg := sync.WaitGroup{} @@ -274,11 +273,11 @@ func (s *DockerSuite) TestAPIStatsContainerNotFound(c *check.C) { func (s *DockerSuite) TestAPIStatsNoStreamConnectedContainers(c *check.C) { testRequires(c, DaemonIsLinux) - out1, _ := runSleepingContainer(c) + out1 := runSleepingContainer(c) id1 := strings.TrimSpace(out1) c.Assert(waitRun(id1), checker.IsNil) - out2, _ := runSleepingContainer(c, "--net", "container:"+id1) + out2 := runSleepingContainer(c, "--net", "container:"+id1) id2 := strings.TrimSpace(out2) c.Assert(waitRun(id2), checker.IsNil) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_config_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_config_test.go new file mode 100644 index 000000000..fab65ccbd --- /dev/null +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_config_test.go @@ -0,0 +1,118 @@ +// +build !windows + +package main + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestAPISwarmConfigsEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + configs := d.ListConfigs(c) + c.Assert(configs, checker.NotNil) + c.Assert(len(configs), checker.Equals, 0, check.Commentf("configs: %#v", configs)) +} + +func (s *DockerSwarmSuite) TestAPISwarmConfigsCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + configs := d.ListConfigs(c) + c.Assert(len(configs), checker.Equals, 1, check.Commentf("configs: %#v", configs)) + name := configs[0].Spec.Annotations.Name + c.Assert(name, checker.Equals, testName, check.Commentf("configs: %s", name)) +} + +func (s *DockerSwarmSuite) TestAPISwarmConfigsDelete(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.ID, checker.Equals, id, check.Commentf("config: %v", config)) + + d.DeleteConfig(c, config.ID) + status, out, err := d.SockRequest("GET", "/configs/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("config delete: %s", string(out))) +} + +func (s *DockerSwarmSuite) TestAPISwarmConfigsUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "test": "test1", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.ID, checker.Equals, id, check.Commentf("config: %v", config)) + + // test UpdateConfig with full ID + d.UpdateConfig(c, id, func(s *swarm.Config) { + s.Spec.Labels = map[string]string{ + "test": "test1", + } + }) + + config = d.GetConfig(c, id) + c.Assert(config.Spec.Labels["test"], checker.Equals, "test1", check.Commentf("config: %v", config)) + + // test UpdateConfig with full name + d.UpdateConfig(c, config.Spec.Name, func(s *swarm.Config) { + s.Spec.Labels = map[string]string{ + "test": "test2", + } + }) + + config = d.GetConfig(c, id) + c.Assert(config.Spec.Labels["test"], checker.Equals, "test2", check.Commentf("config: %v", config)) + + // test UpdateConfig with prefix ID + d.UpdateConfig(c, id[:1], func(s *swarm.Config) { + s.Spec.Labels = map[string]string{ + "test": "test3", + } + }) + + config = d.GetConfig(c, id) + c.Assert(config.Spec.Labels["test"], checker.Equals, "test3", check.Commentf("config: %v", config)) + + // test UpdateConfig in updating Data which is not supported in daemon + // this test will produce an error in func UpdateConfig + config = d.GetConfig(c, id) + config.Spec.Data = []byte("TESTINGDATA2") + + url := fmt.Sprintf("/configs/%s/update?version=%d", config.ID, config.Version.Index) + status, out, err := d.SockRequest("POST", url, config.Spec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out))) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_secret_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_secret_test.go index 64a1fc8d8..cb82af8e2 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_secret_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_secret_test.go @@ -23,18 +23,25 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) { d := s.AddDaemon(c, true, true) testName := "test_secret" - id := d.CreateSecret(c, swarm.SecretSpec{ + secretSpec := swarm.SecretSpec{ Annotations: swarm.Annotations{ Name: testName, }, Data: []byte("TESTINGDATA"), - }) + } + + id := d.CreateSecret(c, secretSpec) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) secrets := d.ListSecrets(c) c.Assert(len(secrets), checker.Equals, 1, check.Commentf("secrets: %#v", secrets)) name := secrets[0].Spec.Annotations.Name c.Assert(name, checker.Equals, testName, check.Commentf("secret: %s", name)) + + // create an already existing secret, daemon should return a status code of 409 + status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("secret create: %s", string(out))) } func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { @@ -55,6 +62,13 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) + + // delete non-existing secret, daemon should return a status code of 404 + id = "non-existing" + status, out, err = d.SockRequest("DELETE", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) + } func (s *DockerSwarmSuite) TestAPISwarmSecretsUpdate(c *check.C) { @@ -114,5 +128,5 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsUpdate(c *check.C) { status, out, err := d.SockRequest("POST", url, secret.Spec) c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out))) + c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out))) } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_service_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_service_test.go index 6a3c9f170..66e17c4de 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_service_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_service_test.go @@ -4,15 +4,19 @@ package main import ( "fmt" + "path" "strconv" "strings" "syscall" "time" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/swarm/runtime" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/fixtures/plugin" "github.com/go-check/check" + "golang.org/x/net/context" ) func setPortConfig(portConfig []swarm.PortConfig) daemon.ServiceConstructor { @@ -196,7 +200,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *check.C) { // service started from this image won't pass health check _, _, err := d.BuildImageWithOut(image2, `FROM busybox - HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ + HEALTHCHECK --interval=1s --timeout=30s --retries=1024 \ CMD cat /status`, true) c.Check(err, check.IsNil) @@ -596,3 +600,77 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { } } } + +// Test plugins deployed via swarm services +func (s *DockerSwarmSuite) TestAPISwarmServicesPlugin(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64) + reg := setupRegistry(c, false, "", "") + defer reg.Close() + + repo := path.Join(privateRegistryURL, "swarm", "test:v1") + repo2 := path.Join(privateRegistryURL, "swarm", "test:v2") + name := "test" + + err := plugin.CreateInRegistry(context.Background(), repo, nil) + c.Assert(err, checker.IsNil, check.Commentf("failed to create plugin")) + err = plugin.CreateInRegistry(context.Background(), repo2, nil) + c.Assert(err, checker.IsNil, check.Commentf("failed to create plugin")) + + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + makePlugin := func(repo, name string, constraints []string) func(*swarm.Service) { + return func(s *swarm.Service) { + s.Spec.TaskTemplate.Runtime = "plugin" + s.Spec.TaskTemplate.PluginSpec = &runtime.PluginSpec{ + Name: name, + Remote: repo, + } + if constraints != nil { + s.Spec.TaskTemplate.Placement = &swarm.Placement{ + Constraints: constraints, + } + } + } + } + + id := d1.CreateService(c, makePlugin(repo, name, nil)) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(name), checker.True) + + service := d1.GetService(c, id) + d1.UpdateService(c, service, makePlugin(repo2, name, nil)) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginImage(name), checker.Equals, repo2) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginImage(name), checker.Equals, repo2) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginImage(name), checker.Equals, repo2) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(name), checker.True) + + d1.RemoveService(c, id) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(name), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(name), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(name), checker.False) + + // constrain to managers only + id = d1.CreateService(c, makePlugin(repo, name, []string{"node.role==manager"})) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(name), checker.False) // Not a manager, not running it + d1.RemoveService(c, id) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(name), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(name), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(name), checker.False) + + // with no name + id = d1.CreateService(c, makePlugin(repo, "", nil)) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(repo), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(repo), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(repo), checker.True) + d1.RemoveService(c, id) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(repo), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(repo), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(repo), checker.False) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go index 7b131000c..9d24757b4 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go @@ -8,18 +8,22 @@ import ( "io/ioutil" "net" "net/http" + "net/url" "os" "path/filepath" "strings" "sync" "time" + "github.com/cloudflare/cfssl/csr" "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/initca" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/swarmkit/ca" "github.com/go-check/check" ) @@ -32,6 +36,7 @@ func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.True) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(info.Cluster.RootRotationInProgress, checker.False) d2 := s.AddDaemon(c, true, false) info, err = d2.SwarmInfo() @@ -146,9 +151,6 @@ func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) { } func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) { - // TODO: when root rotation is in, convert to a series of root rotation tests instead. - // currently just makes sure that we don't have to provide a CA certificate when - // providing an external CA d1 := s.AddDaemon(c, false, false) c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) d1.UpdateSwarm(c, func(s *swarm.Spec) { @@ -157,11 +159,18 @@ func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) { Protocol: swarm.ExternalCAProtocolCFSSL, URL: "https://thishasnoca.org", }, + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://thishasacacert.org", + CACert: "cacert", + }, } }) info, err := d1.SwarmInfo() c.Assert(err, checker.IsNil) - c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 1) + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, "cacert") } func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) { @@ -221,7 +230,7 @@ func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) { url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) status, out, err := d1.SockRequest("POST", url, node.Spec) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out))) + c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out))) // The warning specific to demoting the last manager is best-effort and // won't appear until the Role field of the demoted manager has been // updated. @@ -350,9 +359,6 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { d3.Stop(c) - // make sure there is a leader - waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) - var service swarm.Service simpleTestService(&service) service.Spec.Name = "top2" @@ -554,7 +560,7 @@ func simpleTestService(s *swarm.Service) { s.Spec = swarm.ServiceSpec{ TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ + ContainerSpec: &swarm.ContainerSpec{ Image: "busybox:latest", Command: []string{"/bin/top"}, }, @@ -577,7 +583,7 @@ func serviceForUpdate(s *swarm.Service) { s.Spec = swarm.ServiceSpec{ TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ + ContainerSpec: &swarm.ContainerSpec{ Image: "busybox:latest", Command: []string{"/bin/top"}, }, @@ -635,6 +641,9 @@ func setRollbackOrder(order string) daemon.ServiceConstructor { func setImage(image string) daemon.ServiceConstructor { return func(s *swarm.Service) { + if s.Spec.TaskTemplate.ContainerSpec == nil { + s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } s.Spec.TaskTemplate.ContainerSpec.Image = image } } @@ -915,6 +924,9 @@ func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) { instances := 1 d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) { + if s.Spec.TaskTemplate.ContainerSpec == nil { + s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{} s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{ {Target: "lb"}, @@ -928,3 +940,106 @@ func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) { out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) } + +func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) { + m := s.AddDaemon(c, true, true) + w := s.AddDaemon(c, true, false) + + info, err := m.SwarmInfo() + c.Assert(err, checker.IsNil) + + currentTrustRoot := info.Cluster.TLSInfo.TrustRoot + + // rotate multiple times + for i := 0; i < 4; i++ { + var cert, key []byte + if i%2 != 0 { + cert, _, key, err = initca.New(&csr.CertificateRequest{ + CN: "newRoot", + KeyRequest: csr.NewBasicKeyRequest(), + CA: &csr.CAConfig{Expiry: ca.RootCAExpiration}, + }) + c.Assert(err, checker.IsNil) + } + expectedCert := string(cert) + m.UpdateSwarm(c, func(s *swarm.Spec) { + s.CAConfig.SigningCACert = expectedCert + s.CAConfig.SigningCAKey = string(key) + s.CAConfig.ForceRotate++ + }) + + // poll to make sure update succeeds + var clusterTLSInfo swarm.TLSInfo + for j := 0; j < 18; j++ { + info, err := m.SwarmInfo() + c.Assert(err, checker.IsNil) + + // the desired CA cert and key is always redacted + c.Assert(info.Cluster.Spec.CAConfig.SigningCAKey, checker.Equals, "") + c.Assert(info.Cluster.Spec.CAConfig.SigningCACert, checker.Equals, "") + + clusterTLSInfo = info.Cluster.TLSInfo + + // if root rotation is done and the trust root has changed, we don't have to poll anymore + if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot { + break + } + + // root rotation not done + time.Sleep(250 * time.Millisecond) + } + if cert != nil { + c.Assert(clusterTLSInfo.TrustRoot, checker.Equals, expectedCert) + } + // could take another second or two for the nodes to trust the new roots after they've all gotten + // new TLS certificates + for j := 0; j < 18; j++ { + mInfo := m.GetNode(c, m.NodeID).Description.TLSInfo + wInfo := m.GetNode(c, w.NodeID).Description.TLSInfo + + if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot { + break + } + + // nodes don't trust root certs yet + time.Sleep(250 * time.Millisecond) + } + + c.Assert(m.GetNode(c, m.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo) + c.Assert(m.GetNode(c, w.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo) + currentTrustRoot = clusterTLSInfo.TrustRoot + } +} + +func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + networkCreateRequest := types.NetworkCreateRequest{ + Name: name, + } + + var n types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "overlay" + + status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + c.Assert(json.Unmarshal(out, &n), checker.IsNil) + + var r types.NetworkResource + + status, body, err := d.SockRequest("GET", "/networks/"+name, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + c.Assert(json.Unmarshal(body, &r), checker.IsNil) + c.Assert(r.Scope, checker.Equals, "swarm") + c.Assert(r.ID, checker.Equals, n.ID) + + v := url.Values{} + v.Set("scope", "local") + + status, body, err = d.SockRequest("GET", "/networks/"+name+"?"+v.Encode(), nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf(string(out))) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go index 3cc03dfb3..f354856d3 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go @@ -2,8 +2,11 @@ package main import ( "encoding/json" + "fmt" "net/http" "path/filepath" + "strings" + "time" "github.com/docker/docker/api/types" volumetypes "github.com/docker/docker/api/types/volume" @@ -69,6 +72,8 @@ func (s *DockerSuite) TestVolumesAPIInspect(c *check.C) { config := volumetypes.VolumesCreateBody{ Name: "test", } + // sampling current time minus a minute so to now have false positive in case of delays + now := time.Now().Truncate(time.Minute) status, b, err := request.SockRequest("POST", "/volumes/create", config, daemonHost()) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) @@ -87,4 +92,12 @@ func (s *DockerSuite) TestVolumesAPIInspect(c *check.C) { c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) c.Assert(json.Unmarshal(b, &vol), checker.IsNil) c.Assert(vol.Name, checker.Equals, config.Name) + + // comparing CreatedAt field time for the new volume to now. Removing a minute from both to avoid false positive + testCreatedAt, err := time.Parse(time.RFC3339, strings.TrimSpace(vol.CreatedAt)) + c.Assert(err, check.IsNil) + testCreatedAt = testCreatedAt.Truncate(time.Minute) + if !testCreatedAt.Equal(now) { + c.Assert(fmt.Errorf("Time Volume is CreatedAt not equal to current time"), check.NotNil) + } } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go index a776b83ab..33ecb44b6 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "github.com/docker/docker/integration-cli/cli" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/go-check/check" ) @@ -22,8 +23,8 @@ func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { endGroup.Add(3) startGroup.Add(3) - err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") - c.Assert(err, check.IsNil) + cli.DockerCmd(c, "run", "--name", "attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") + cli.WaitRun(c, "attacher") startDone := make(chan struct{}) endDone := make(chan struct{}) @@ -77,7 +78,7 @@ func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { c.Fatalf("Attaches did not initialize properly") } - dockerCmd(c, "kill", "attacher") + cli.DockerCmd(c, "kill", "attacher") select { case <-endDone: @@ -155,7 +156,6 @@ func (s *DockerSuite) TestAttachDisconnect(c *check.C) { func (s *DockerSuite) TestAttachPausedContainer(c *check.C) { testRequires(c, IsPausable) - defer unpauseAllContainers(c) runSleepingContainer(c, "-d", "--name=test") dockerCmd(c, "pause", "test") diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go index b046330d6..5a3d3efc6 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go @@ -20,11 +20,15 @@ import ( "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/cli/build/fakegit" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/pkg/testutil" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/go-check/check" + "github.com/opencontainers/go-digest" ) func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { @@ -143,8 +147,8 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { name := "testbuildenvironmentreplacement" - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", ` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM `+minimalBaseImage()+` ENV baz foo ENV quux bar @@ -157,10 +161,10 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { ADD ${zzz:-${fee}} ${dot} COPY ${zzz:-${gee}} ${dot} `), - withFile("foo", "test1"), - withFile("bar", "test2"), - withFile("fff", "test3"), - withFile("ggg", "test4"), + build.WithFile("foo", "test1"), + build.WithFile("bar", "test2"), + build.WithFile("fff", "test3"), + build.WithFile("ggg", "test4"), )) } @@ -182,6 +186,14 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) ENV abc4 "\$foo" RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) + ENV foo2="abc\def" + RUN [ "$foo2" = 'abc\def' ] + ENV foo3="abc\\def" + RUN [ "$foo3" = 'abc\def' ] + ENV foo4='abc\\def' + RUN [ "$foo4" = 'abc\\def' ] + ENV foo5='abc\def' + RUN [ "$foo5" = 'abc\def' ] `)) envResult := []string{} @@ -326,19 +338,16 @@ func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { name1 := "onbuildcmd" name2 := "onbuildgenerated" - buildImageSuccessfully(c, name1, build.WithDockerfile(` + cli.BuildCmd(c, name1, build.WithDockerfile(` FROM busybox ONBUILD CMD ["hello world"] ONBUILD ENTRYPOINT ["echo"] ONBUILD RUN ["true"]`)) - buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s`, name1))) - - out, _ := dockerCmd(c, "run", name2) - if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { - c.Fatalf("did not get echo output from onbuild. Got: %q", out) - } + cli.BuildCmd(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s`, name1))) + result := cli.DockerCmd(c, "run", name2) + result.Assert(c, icmd.Expected{Out: "hello world"}) } // FIXME(vdemeester) why we disabled cache here ? @@ -362,10 +371,10 @@ ONBUILD ENTRYPOINT ["echo"]`)) func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testbuildtwoimageswithadd" - server := fakeStorage(c, map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "robots.txt": "hello", "index.html": "world", - }) + })) defer server.Close() cli.BuildCmd(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch @@ -386,9 +395,9 @@ func (s *DockerSuite) TestBuildLastModified(c *check.C) { name := "testbuildlastmodified" - server := fakeStorage(c, map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "file": "hello", - }) + })) defer server.Close() var out, out2 string @@ -397,15 +406,15 @@ func (s *DockerSuite) TestBuildLastModified(c *check.C) { ADD %s/file /` dockerfile := fmt.Sprintf(dFmt, server.URL()) - buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) - out, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined() // Build it again and make sure the mtime of the file didn't change. // Wait a few seconds to make sure the time changed enough to notice time.Sleep(2 * time.Second) - buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) - out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out2 = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined() if out != out2 { c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2) @@ -413,14 +422,14 @@ ADD %s/file /` // Now 'touch' the file and make sure the timestamp DID change this time // Create a new fakeStorage instead of just using Add() to help windows - server = fakeStorage(c, map[string]string{ + server = fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "file": "hello", - }) + })) defer server.Close() dockerfile = fmt.Sprintf(dFmt, server.URL()) - buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) - out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out2 = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined() if out == out2 { c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2) @@ -434,20 +443,19 @@ ADD %s/file /` func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) { name := "testbuildmodifyfileinfolder" - ctx := fakeContext(c, `FROM busybox + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox RUN ["mkdir", "/test"] -ADD folder/file /test/changetarget`, - map[string]string{}) +ADD folder/file /test/changetarget`)) defer ctx.Close() if err := ctx.Add("folder/file", "first"); err != nil { c.Fatal(err) } - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) if err := ctx.Add("folder/file", "second"); err != nil { c.Fatal(err) } - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name) if id1 == id2 { c.Fatal("cache was used even though file contents in folder was changed") @@ -456,8 +464,8 @@ ADD folder/file /test/changetarget`, func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testaddimg", withBuildContext(c, - withFile("Dockerfile", fmt.Sprintf(`FROM busybox + buildImageSuccessfully(c, "testaddimg", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -466,22 +474,23 @@ ADD test_file / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), - withFile("test_file", "test1"))) + build.WithFile("test_file", "test1"))) } // Issue #3960: "ADD src ." hangs func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { name := "testaddsinglefiletoworkdir" - ctx := fakeContext(c, `FROM busybox -ADD test_file .`, - map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile( + `FROM busybox + ADD test_file .`), + fakecontext.WithFiles(map[string]string{ "test_file": "test1", - }) + })) defer ctx.Close() errChan := make(chan error) go func() { - errChan <- buildImage(name, withExternalBuildContext(ctx)).Error + errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error close(errChan) }() select { @@ -494,8 +503,8 @@ ADD test_file .`, func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testaddsinglefiletoexistdir", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + cli.BuildCmd(c, "testaddsinglefiletoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists @@ -505,18 +514,18 @@ ADD test_file /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), - withFile("test_file", "test1"))) + build.WithFile("test_file", "test1"))) } func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - server := fakeStorage(c, map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "robots.txt": "hello", - }) + })) defer server.Close() - buildImageSuccessfully(c, "testcopymultiplefilestofile", withBuildContext(c, - withFile("Dockerfile", fmt.Sprintf(`FROM busybox + cli.BuildCmd(c, "testcopymultiplefilestofile", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists @@ -532,11 +541,11 @@ RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] `, server.URL())), - withFile("test_file1", "test1"), - withFile("test_file2", "test2"), - withFile("test_file3", "test3"), - withFile("test_file3", "test3"), - withFile("test_file4", "test4"))) + build.WithFile("test_file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test_file3", "test3"), + build.WithFile("test_file3", "test3"), + build.WithFile("test_file4", "test4"))) } // These tests are mainly for user namespaces to verify that new directories @@ -550,13 +559,13 @@ func (s *DockerSuite) TestBuildUsernamespaceValidateRemappedRoot(c *check.C) { } name := "testbuildusernamespacevalidateremappedroot" for _, tc := range testCases { - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", fmt.Sprintf(`FROM busybox + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox %s RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, tc)), - withFile("test_dir/test_file", "test file"))) + build.WithFile("test_dir/test_file", "test file"))) - dockerCmd(c, "rmi", name) + cli.DockerCmd(c, "rmi", name) } } @@ -565,8 +574,8 @@ func (s *DockerSuite) TestBuildAddAndCopyFileWithWhitespace(c *check.C) { name := "testaddfilewithwhitespace" for _, command := range []string{"ADD", "COPY"} { - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", fmt.Sprintf(`FROM busybox + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN mkdir "/test dir" RUN mkdir "/test_dir" %s [ "test file1", "/test_file1" ] @@ -581,15 +590,15 @@ RUN [ $(cat "/test file3") = 'test3' ] RUN [ $(cat "/test_dir/test_file4") = 'test4' ] RUN [ $(cat "/test dir/test_file5") = 'test5' ] RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, command, command, command, command, command, command)), - withFile("test file1", "test1"), - withFile("test_file2", "test2"), - withFile("test file3", "test3"), - withFile("test dir/test_file4", "test4"), - withFile("test_dir/test_file5", "test5"), - withFile("test dir/test_file6", "test6"), + build.WithFile("test file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test file3", "test3"), + build.WithFile("test dir/test_file4", "test4"), + build.WithFile("test_dir/test_file5", "test5"), + build.WithFile("test dir/test_file6", "test6"), )) - dockerCmd(c, "rmi", name) + cli.DockerCmd(c, "rmi", name) } } @@ -612,26 +621,26 @@ RUN find "test5" "C:/test dir/test_file5" RUN find "test6" "C:/test dir/test_file6"` name := "testcopyfilewithwhitespace" - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile("test file1", "test1"), - withFile("test_file2", "test2"), - withFile("test file3", "test3"), - withFile("test dir/test_file4", "test4"), - withFile("test_dir/test_file5", "test5"), - withFile("test dir/test_file6", "test6"), + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("test file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test file3", "test3"), + build.WithFile("test dir/test_file4", "test4"), + build.WithFile("test_dir/test_file5", "test5"), + build.WithFile("test dir/test_file6", "test6"), )) } func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { name := "testcopywildcard" - server := fakeStorage(c, map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "robots.txt": "hello", "index.html": "world", - }) + })) defer server.Close() - ctx := fakeContext(c, fmt.Sprintf(`FROM busybox + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM busybox COPY file*.txt /tmp/ RUN ls /tmp/file1.txt /tmp/file2.txt RUN [ "mkdir", "/tmp1" ] @@ -640,21 +649,21 @@ func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { RUN [ "mkdir", "/tmp2" ] ADD dir/*dir %s/robots.txt /tmp2/ RUN ls /tmp2/nest_nest_file /tmp2/robots.txt - `, server.URL()), - map[string]string{ + `, server.URL())), + fakecontext.WithFiles(map[string]string{ "file1.txt": "test1", "file2.txt": "test2", "dir/nested_file": "nested file", "dir/nested_dir/nest_nest_file": "2 times nested", "dirt": "dirty", - }) + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) // Now make sure we use a cache the 2nd time - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name) if id1 != id2 { @@ -674,25 +683,25 @@ func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { // say which OSs this works on or not. testRequires(c, DaemonIsLinux, UnixCli) - buildImageSuccessfully(c, "testcopywildcardinname", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testcopywildcardinname", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox COPY *.txt /tmp/ RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] `), - withFile("*.txt", "hi there"), + build.WithFile("*.txt", "hi there"), )) } func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { name := "testcopywildcardcache" - ctx := fakeContext(c, `FROM busybox - COPY file1.txt /tmp/`, - map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox + COPY file1.txt /tmp/`), + fakecontext.WithFiles(map[string]string{ "file1.txt": "test1", - }) + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) // Now make sure we use a cache the 2nd time even with wild cards. @@ -700,7 +709,7 @@ func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { ctx.Add("Dockerfile", `FROM busybox COPY file*.txt /tmp/`) - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name) if id1 != id2 { @@ -711,8 +720,8 @@ func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testaddsinglefiletononexistingdir", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testaddsinglefiletononexistingdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -721,13 +730,13 @@ ADD test_file /test_dir/ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), - withFile("test_file", "test1"))) + build.WithFile("test_file", "test1"))) } func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testadddircontenttoroot", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testadddircontenttoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -735,13 +744,13 @@ RUN chown dockerio.dockerio exists ADD test_dir / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), - withFile("test_dir/test_file", "test1"))) + build.WithFile("test_dir/test_file", "test1"))) } func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testadddircontenttoexistingdir", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testadddircontenttoexistingdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists @@ -751,13 +760,13 @@ ADD test_dir/ /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`), - withFile("test_dir/test_file", "test1"))) + build.WithFile("test_dir/test_file", "test1"))) } func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testaddwholedirtoroot", withBuildContext(c, - withFile("Dockerfile", fmt.Sprintf(`FROM busybox + buildImageSuccessfully(c, "testaddwholedirtoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -768,39 +777,39 @@ RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), - withFile("test_dir/test_file", "test1"))) + build.WithFile("test_dir/test_file", "test1"))) } // Testing #5941 : Having an etc directory in context conflicts with the /etc/mtab func (s *DockerSuite) TestBuildAddOrCopyEtcToRootShouldNotConflict(c *check.C) { - buildImageSuccessfully(c, "testaddetctoroot", withBuildContext(c, - withFile("Dockerfile", `FROM `+minimalBaseImage()+` + buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` ADD . /`), - withFile("etc/test_file", "test1"))) - buildImageSuccessfully(c, "testcopyetctoroot", withBuildContext(c, - withFile("Dockerfile", `FROM `+minimalBaseImage()+` + build.WithFile("etc/test_file", "test1"))) + buildImageSuccessfully(c, "testcopyetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` COPY . /`), - withFile("etc/test_file", "test1"))) + build.WithFile("etc/test_file", "test1"))) } // Testing #9401 : Losing setuid flag after a ADD func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testaddetctoroot", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox ADD suidbin /usr/bin/suidbin RUN chmod 4755 /usr/bin/suidbin RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] ADD ./data/ / RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`), - withFile("suidbin", "suidbin"), - withFile("/data/usr/test_file", "test1"))) + build.WithFile("suidbin", "suidbin"), + build.WithFile("/data/usr/test_file", "test1"))) } func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testcopysinglefiletoroot", withBuildContext(c, - withFile("Dockerfile", fmt.Sprintf(`FROM busybox + buildImageSuccessfully(c, "testcopysinglefiletoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -809,22 +818,22 @@ COPY test_file / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), - withFile("test_file", "test1"))) + build.WithFile("test_file", "test1"))) } // Issue #3960: "ADD src ." hangs - adapted for COPY func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { name := "testcopysinglefiletoworkdir" - ctx := fakeContext(c, `FROM busybox -COPY test_file .`, - map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox +COPY test_file .`), + fakecontext.WithFiles(map[string]string{ "test_file": "test1", - }) + })) defer ctx.Close() errChan := make(chan error) go func() { - errChan <- buildImage(name, withExternalBuildContext(ctx)).Error + errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error close(errChan) }() select { @@ -837,8 +846,8 @@ COPY test_file .`, func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testcopysinglefiletoexistdir", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testcopysinglefiletoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists @@ -848,13 +857,13 @@ COPY test_file /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), - withFile("test_file", "test1"))) + build.WithFile("test_file", "test1"))) } func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific - buildImageSuccessfully(c, "testcopysinglefiletononexistdir", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testcopysinglefiletononexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -863,13 +872,13 @@ COPY test_file /test_dir/ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), - withFile("test_file", "test1"))) + build.WithFile("test_file", "test1"))) } func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testcopydircontenttoroot", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testcopydircontenttoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -877,13 +886,13 @@ RUN chown dockerio.dockerio exists COPY test_dir / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), - withFile("test_dir/test_file", "test1"))) + build.WithFile("test_dir/test_file", "test1"))) } func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testcopydircontenttoexistdir", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testcopydircontenttoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists @@ -893,13 +902,13 @@ COPY test_dir/ /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`), - withFile("test_dir/test_file", "test1"))) + build.WithFile("test_dir/test_file", "test1"))) } func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - buildImageSuccessfully(c, "testcopywholedirtoroot", withBuildContext(c, - withFile("Dockerfile", fmt.Sprintf(`FROM busybox + buildImageSuccessfully(c, "testcopywholedirtoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -910,7 +919,7 @@ RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), - withFile("test_dir/test_file", "test1"))) + build.WithFile("test_dir/test_file", "test1"))) } func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { @@ -925,7 +934,7 @@ func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { var ( name = "test-link-absolute" ) - ctx := fakeContext(c, dockerfile, nil) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) defer ctx.Close() tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") @@ -986,7 +995,7 @@ func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { c.Fatal(err) } - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } @@ -1017,7 +1026,7 @@ func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) nonExistingFile := filepath.Join(tempDir, targetFile) - ctx := fakeContext(c, dockerfile, nil) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) defer ctx.Close() fooPath := filepath.Join(ctx.Dir, targetFile) @@ -1031,7 +1040,7 @@ func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { c.Fatal(err) } - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } @@ -1045,7 +1054,10 @@ func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { { name := "testbuildinaccessiblefiles" - ctx := fakeContext(c, "FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{"fileWithoutReadAccess": "foo"}), + ) defer ctx.Close() // This is used to ensure we detect inaccessible files early during build in the cli client pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") @@ -1069,13 +1081,16 @@ func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { c.Fatalf("output should've contained the string: no permission to read from but contained: %s", result.Combined()) } - if !strings.Contains(result.Combined(), "Error checking context") { - c.Fatalf("output should've contained the string: Error checking context") + if !strings.Contains(result.Combined(), "error checking context") { + c.Fatalf("output should've contained the string: error checking context") } } { name := "testbuildinaccessibledirectory" - ctx := fakeContext(c, "FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{"directoryWeCantStat/bar": "foo"}), + ) defer ctx.Close() // This is used to ensure we detect inaccessible directories early during build in the cli client pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") @@ -1104,14 +1119,14 @@ func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { c.Fatalf("output should've contained the string: can't access %s", result.Combined()) } - if !strings.Contains(result.Combined(), "Error checking context") { - c.Fatalf("output should've contained the string: Error checking context\ngot:%s", result.Combined()) + if !strings.Contains(result.Combined(), "error checking context") { + c.Fatalf("output should've contained the string: error checking context\ngot:%s", result.Combined()) } } { name := "testlinksok" - ctx := fakeContext(c, "FROM scratch\nADD . /foo/", nil) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM scratch\nADD . /foo/")) defer ctx.Close() target := "../../../../../../../../../../../../../../../../../../../azA" @@ -1121,15 +1136,17 @@ func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { defer os.Remove(target) // This is used to ensure we don't follow links when checking if everything in the context is accessible // This test doesn't require that we run commands as an unprivileged user - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } { name := "testbuildignoredinaccessible" - ctx := fakeContext(c, "FROM scratch\nADD . /foo/", - map[string]string{ + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{ "directoryWeCantStat/bar": "foo", ".dockerignore": "directoryWeCantStat", - }) + }), + ) defer ctx.Close() // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") @@ -1157,8 +1174,8 @@ func (s *DockerSuite) TestBuildForceRm(c *check.C) { containerCountBefore := getContainerCount(c) name := "testbuildforcerm" - buildImage(name, cli.WithFlags("--force-rm"), withBuildContext(c, - withFile("Dockerfile", `FROM `+minimalBaseImage()+` + buildImage(name, cli.WithFlags("--force-rm"), build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` RUN true RUN thiswillfail`))).Assert(c, icmd.Expected{ ExitCode: 1, @@ -1335,8 +1352,8 @@ func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { // support backslash such as .\\ being equivalent to ./ and c:\\ being // equivalent to c:/. This is not currently (nor ever has been) supported // by docker on the Windows platform. - buildImageSuccessfully(c, "testbuildwindowsaddcopypathprocessing", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testbuildwindowsaddcopypathprocessing", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox # No trailing slash on COPY/ADD # Results in dir being changed to a file WORKDIR /wc1 @@ -1355,10 +1372,10 @@ func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]" RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]" `), - withFile("wc1", "hellowc1"), - withFile("wc2", "worldwc2"), - withFile("wd1", "hellowd1"), - withFile("wd2", "worldwd2"), + build.WithFile("wc1", "hellowc1"), + build.WithFile("wc2", "worldwc2"), + build.WithFile("wd1", "hellowd1"), + build.WithFile("wd2", "worldwd2"), )) } @@ -1394,8 +1411,8 @@ func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { expected = `/test1/test2` } - buildImageSuccessfully(c, "testbuildrelativecopy", withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, "testbuildrelativecopy", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox WORKDIR /test1 WORKDIR test2 RUN sh -c "[ "$PWD" = '`+expected+`' ]" @@ -1417,7 +1434,7 @@ func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { COPY foo ../ RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" `), - withFile("foo", "hello"), + build.WithFile("foo", "hello"), )) } @@ -1695,7 +1712,7 @@ func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { } // #6445 ensure ONBUILD triggers aren't committed to grandchildren -func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { +func (s *DockerSuite) TestBuildOnBuildLimitedInheritance(c *check.C) { buildImageSuccessfully(c, "testonbuildtrigger1", build.WithDockerfile(` FROM busybox RUN echo "GRANDPARENT" @@ -1741,12 +1758,14 @@ func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { dockerfile := ` FROM busybox ADD foo /tmp/` - ctx := fakeContext(c, dockerfile, map[string]string{ - "foo": "hello", - }) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "hello", + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) if err := ctx.Add("foo", "bye"); err != nil { @@ -1754,42 +1773,48 @@ func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { } // Updating a file should invalidate the cache - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name) if id2 == id1 { c.Fatal("Should not have used the cache") } - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id3 := getIDByName(c, name) if id3 != id2 { c.Fatal("Should have used the cache") } } -// FIXME(vdemeester) this really seems to test the same thing as before func (s *DockerSuite) TestBuildAddMultipleLocalFileWithAndWithoutCache(c *check.C) { name := "testbuildaddmultiplelocalfilewithcache" - dockerfile := ` + baseName := name + "-base" + + cli.BuildCmd(c, baseName, build.WithDockerfile(` FROM busybox + ENTRYPOINT ["/bin/sh"] + `)) + + dockerfile := ` + FROM testbuildaddmultiplelocalfilewithcache-base MAINTAINER dockerio ADD foo Dockerfile /usr/lib/bla/ RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"` - ctx := fakeContext(c, dockerfile, map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ "foo": "hello", - }) + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + result2 := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name) - buildImageSuccessfully(c, name, build.WithoutCache, withExternalBuildContext(ctx)) + result3 := cli.BuildCmd(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) id3 := getIDByName(c, name) if id1 != id2 { - c.Fatal("The cache should have been used but hasn't.") + c.Fatalf("The cache should have been used but hasn't: %s", result2.Stdout()) } if id1 == id3 { - c.Fatal("The cache should have been invalided but hasn't.") + c.Fatalf("The cache should have been invalided but hasn't: %s", result3.Stdout()) } } @@ -1800,17 +1825,17 @@ func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { dockerfile := ` FROM ` + minimalBaseImage() + ` COPY dir /tmp/` - ctx := fakeContext(c, dockerfile, map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ "dir/foo": "hello", - }) + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) // Check that adding file with similar name doesn't mess with cache if err := ctx.Add("dir_file", "hello2"); err != nil { c.Fatal(err) } - buildImageSuccessfully(c, name2, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name2) if id1 != id2 { c.Fatal("The cache should have been used but wasn't") @@ -1826,17 +1851,17 @@ func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { FROM ` + minimalBaseImage() + ` MAINTAINER dockerio ADD . /usr/lib/bla` - ctx := fakeContext(c, dockerfile, map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ "foo": "hello", - }) + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) // Check that adding file invalidate cache of "ADD ." if err := ctx.Add("bar", "hello2"); err != nil { c.Fatal(err) } - buildImageSuccessfully(c, name2, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name2, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name2) if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") @@ -1845,7 +1870,7 @@ func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { if err := ctx.Add("foo", "hello1"); err != nil { c.Fatal(err) } - buildImageSuccessfully(c, name3, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name3, build.WithExternalBuildContext(ctx)) id3 := getIDByName(c, name3) if id2 == id3 { c.Fatal("The cache should have been invalided but hasn't.") @@ -1856,7 +1881,7 @@ func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { if err := ctx.Add("foo", "hello1"); err != nil { c.Fatal(err) } - buildImageSuccessfully(c, name4, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name4, build.WithExternalBuildContext(ctx)) id4 := getIDByName(c, name4) if id3 != id4 { c.Fatal("The cache should have been used but hasn't.") @@ -1870,13 +1895,13 @@ func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { FROM ` + minimalBaseImage() + ` MAINTAINER dockerio ADD . /usr/lib/bla` - ctx := fakeContext(c, dockerfile, map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ "foo": "hello", - }) + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) - buildImageSuccessfully(c, name, build.WithoutCache, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name) if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") @@ -1885,19 +1910,19 @@ func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { func (s *DockerSuite) TestBuildAddRemoteFileWithAndWithoutCache(c *check.C) { name := "testbuildaddremotefilewithcache" - server := fakeStorage(c, map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "baz": "hello", - }) + })) defer server.Close() dockerfile := fmt.Sprintf(`FROM `+minimalBaseImage()+` MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()) - buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + cli.BuildCmd(c, name, build.WithDockerfile(dockerfile)) id1 := getIDByName(c, name) - buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + cli.BuildCmd(c, name, build.WithDockerfile(dockerfile)) id2 := getIDByName(c, name) - buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) id3 := getIDByName(c, name) if id1 != id2 { @@ -1914,17 +1939,17 @@ func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { name3 := name + "3" files := map[string]string{"baz": "hello"} - server := fakeStorage(c, files) + server := fakestorage.New(c, "", fakecontext.WithFiles(files)) defer server.Close() - ctx := fakeContext(c, fmt.Sprintf(`FROM `+minimalBaseImage()+` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) + ADD %s/baz /usr/lib/baz/quux`, server.URL()))) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) - buildImageSuccessfully(c, name2, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name2) if id1 != id2 { c.Fatal("The cache should have been used but wasn't - #1") @@ -1936,14 +1961,14 @@ func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { // allow some time for clock to pass as mtime precision is only 1s time.Sleep(2 * time.Second) - server2 := fakeStorage(c, files) + server2 := fakestorage.New(c, "", fakecontext.WithFiles(files)) defer server2.Close() - ctx2 := fakeContext(c, fmt.Sprintf(`FROM `+minimalBaseImage()+` + ctx2 := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) + ADD %s/baz /usr/lib/baz/quux`, server2.URL()))) defer ctx2.Close() - buildImageSuccessfully(c, name3, withExternalBuildContext(ctx2)) + cli.BuildCmd(c, name3, build.WithExternalBuildContext(ctx2)) id3 := getIDByName(c, name3) if id1 != id3 { c.Fatal("The cache should have been used but wasn't") @@ -1953,24 +1978,24 @@ func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { // FIXME(vdemeester) this really seems to test the same thing as before (combined) func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithAndWithoutCache(c *check.C) { name := "testbuildaddlocalandremotefilewithcache" - server := fakeStorage(c, map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "baz": "hello", - }) + })) defer server.Close() - ctx := fakeContext(c, fmt.Sprintf(`FROM `+minimalBaseImage()+` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` MAINTAINER dockerio ADD foo /usr/lib/bla/bar - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - map[string]string{ + ADD %s/baz /usr/lib/baz/quux`, server.URL())), + fakecontext.WithFiles(map[string]string{ "foo": "hello world", - }) + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name) - buildImageSuccessfully(c, name, build.WithoutCache, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) id3 := getIDByName(c, name) if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") @@ -1981,13 +2006,13 @@ func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithAndWithoutCache(c *chec } func testContextTar(c *check.C, compression archive.Compression) { - ctx := fakeContext(c, - `FROM busybox + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(`FROM busybox ADD foo /foo -CMD ["cat", "/foo"]`, - map[string]string{ +CMD ["cat", "/foo"]`), + fakecontext.WithFiles(map[string]string{ "foo": "bar", - }, + }), ) defer ctx.Close() context, err := archive.Tar(ctx.Dir, compression) @@ -1996,10 +2021,7 @@ CMD ["cat", "/foo"]`, } name := "contexttar" - icmd.RunCmd(icmd.Cmd{ - Command: []string{dockerBinary, "build", "-t", name, "-"}, - Stdin: context, - }).Assert(c, icmd.Success) + cli.BuildCmd(c, name, build.WithStdinContext(context)) } func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { @@ -2130,12 +2152,12 @@ func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN echo "hello"`)) - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo "hello" ADD foo /foo ENTRYPOINT ["/bin/echo"]`), - withFile("foo", "hello"))) + build.WithFile("foo", "hello"))) res := inspectField(c, name, "Config.Cmd") // Cmd must be cleaned up @@ -2152,10 +2174,10 @@ func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { expected = "foo: The system cannot find the file specified" } - buildImage(name, withBuildContext(c, - withFile("Dockerfile", `FROM `+minimalBaseImage()+` + buildImage(name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` ADD foo /usr/local/bar`), - withFile("bar", "hello"))).Assert(c, icmd.Expected{ + build.WithFile("bar", "hello"))).Assert(c, icmd.Expected{ ExitCode: 1, Err: expected, }) @@ -2206,15 +2228,15 @@ func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { makeLink = `mklink /D C:\bar C:\foo` } name := "testbuildaddtosymlinkdest" - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", ` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox RUN sh -c "mkdir /foo" RUN `+makeLink+` ADD foo /bar/ RUN sh -c "[ -f /bar/foo ]" RUN sh -c "[ -f /foo/foo ]"`), - withFile("foo", "hello"), + build.WithFile("foo", "hello"), )) } @@ -2253,8 +2275,8 @@ func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { func (s *DockerSuite) TestBuildDockerignore(c *check.C) { name := "testbuilddockerignore" - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", ` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox ADD . /bla RUN sh -c "[[ -f /bla/src/x.go ]]" @@ -2268,17 +2290,17 @@ func (s *DockerSuite) TestBuildDockerignore(c *check.C) { RUN sh -c "[[ ! -e v.cc ]]" RUN sh -c "[[ ! -e src/v.cc ]]" RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"`), - withFile("Makefile", "all:"), - withFile(".git/HEAD", "ref: foo"), - withFile("src/x.go", "package main"), - withFile("src/_vendor/v.go", "package main"), - withFile("src/_vendor/v.cc", "package main"), - withFile("src/v.cc", "package main"), - withFile("v.cc", "package main"), - withFile("dir/foo", ""), - withFile(".gitignore", ""), - withFile("README.md", "readme"), - withFile(".dockerignore", ` + build.WithFile("Makefile", "all:"), + build.WithFile(".git/HEAD", "ref: foo"), + build.WithFile("src/x.go", "package main"), + build.WithFile("src/_vendor/v.go", "package main"), + build.WithFile("src/_vendor/v.cc", "package main"), + build.WithFile("src/v.cc", "package main"), + build.WithFile("v.cc", "package main"), + build.WithFile("dir/foo", ""), + build.WithFile(".gitignore", ""), + build.WithFile("README.md", "readme"), + build.WithFile(".dockerignore", ` .git pkg .gitignore @@ -2291,22 +2313,22 @@ dir`), func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { name := "testbuilddockerignorecleanpaths" - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", ` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox ADD . /tmp/ RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"`), - withFile("foo", "foo"), - withFile("foo2", "foo2"), - withFile("dir1/foo", "foo in dir1"), - withFile(".dockerignore", "./foo\ndir1//foo\n./dir1/../foo2"), + build.WithFile("foo", "foo"), + build.WithFile("foo2", "foo2"), + build.WithFile("dir1/foo", "foo in dir1"), + build.WithFile(".dockerignore", "./foo\ndir1//foo\n./dir1/../foo2"), )) } func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { name := "testbuilddockerignoreexceptions" - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", ` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox ADD . /bla RUN sh -c "[[ -f /bla/src/x.go ]]" @@ -2321,20 +2343,20 @@ func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { RUN sh -c "[[ ! -e /bla/foo ]]" RUN sh -c "[[ ! -e /bla/.git ]]" RUN sh -c "[[ -e /bla/dir/a.cc ]]"`), - withFile("Makefile", "all:"), - withFile(".git/HEAD", "ref: foo"), - withFile("src/x.go", "package main"), - withFile("src/_vendor/v.go", "package main"), - withFile("dir/foo", ""), - withFile("dir/foo1", ""), - withFile("dir/dir/f1", ""), - withFile("dir/dir/foo", ""), - withFile("dir/e", ""), - withFile("dir/e-dir/foo", ""), - withFile(".gitignore", ""), - withFile("README.md", "readme"), - withFile("dir/a.cc", "hello"), - withFile(".dockerignore", ` + build.WithFile("Makefile", "all:"), + build.WithFile(".git/HEAD", "ref: foo"), + build.WithFile("src/x.go", "package main"), + build.WithFile("src/_vendor/v.go", "package main"), + build.WithFile("dir/foo", ""), + build.WithFile("dir/foo1", ""), + build.WithFile("dir/dir/f1", ""), + build.WithFile("dir/dir/foo", ""), + build.WithFile("dir/e", ""), + build.WithFile("dir/e-dir/foo", ""), + build.WithFile(".gitignore", ""), + build.WithFile("README.md", "readme"), + build.WithFile("dir/a.cc", "hello"), + build.WithFile(".dockerignore", ` .git pkg .gitignore @@ -2355,13 +2377,13 @@ func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { ADD . /tmp/ RUN sh -c "! ls /tmp/Dockerfile" RUN ls /tmp/.dockerignore` - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile(".dockerignore", "Dockerfile\n"), + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "Dockerfile\n"), )) - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile(".dockerignore", "./Dockerfile\n"), + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "./Dockerfile\n"), )) } @@ -2373,15 +2395,15 @@ func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { RUN ls /tmp/Dockerfile RUN sh -c "! ls /tmp/MyDockerfile" RUN ls /tmp/.dockerignore` - buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), withBuildContext(c, - withFile("Dockerfile", "Should not use me"), - withFile("MyDockerfile", dockerfile), - withFile(".dockerignore", "MyDockerfile\n"), + buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c, + build.WithFile("Dockerfile", "Should not use me"), + build.WithFile("MyDockerfile", dockerfile), + build.WithFile(".dockerignore", "MyDockerfile\n"), )) - buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), withBuildContext(c, - withFile("Dockerfile", "Should not use me"), - withFile("MyDockerfile", dockerfile), - withFile(".dockerignore", "./MyDockerfile\n"), + buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c, + build.WithFile("Dockerfile", "Should not use me"), + build.WithFile("MyDockerfile", dockerfile), + build.WithFile(".dockerignore", "./MyDockerfile\n"), )) } @@ -2392,9 +2414,9 @@ func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { ADD . /tmp/ RUN sh -c "! ls /tmp/.dockerignore" RUN ls /tmp/Dockerfile` - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile(".dockerignore", ".dockerignore\n"), + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", ".dockerignore\n"), )) } @@ -2403,16 +2425,17 @@ func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { dockerfile := ` FROM busybox ADD . /tmp/` - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - ".dockerignore": "Dockerfile\n", - }) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + ".dockerignore": "Dockerfile\n", + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, name) - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, name) if id1 != id2 { c.Fatalf("Didn't use the cache - 1") @@ -2422,7 +2445,7 @@ func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { c.Fatalf("Didn't add Dockerfile: %s", err) } - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id2 = getIDByName(c, name) if id1 != id2 { c.Fatalf("Didn't use the cache - 2") @@ -2432,7 +2455,7 @@ func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { c.Fatalf("Didn't add Dockerfile: %s", err) } - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) id2 = getIDByName(c, name) if id1 != id2 { c.Fatalf("Didn't use the cache - 3") @@ -2448,11 +2471,11 @@ func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { RUN sh -c "[[ ! -e /.gitignore ]]" RUN sh -c "[[ ! -e /Makefile ]]"` - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile(".dockerignore", "*\n"), - withFile("Makefile", "all:"), - withFile(".gitignore", ""), + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "*\n"), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), )) } @@ -2465,28 +2488,28 @@ func (s *DockerSuite) TestBuildDockerignoringOnlyDotfiles(c *check.C) { RUN sh -c "[[ ! -e /.gitignore ]]" RUN sh -c "[[ -f /Makefile ]]"` - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile(".dockerignore", ".*"), - withFile("Makefile", "all:"), - withFile(".gitignore", ""), + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", ".*"), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), )) } func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { name := "testbuilddockerignorebadexclusion" - buildImage(name, withBuildContext(c, - withFile("Dockerfile", ` + buildImage(name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox COPY . / RUN sh -c "[[ ! -e /.gitignore ]]" RUN sh -c "[[ -f /Makefile ]]"`), - withFile("Makefile", "all:"), - withFile(".gitignore", ""), - withFile(".dockerignore", "!\n"), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), + build.WithFile(".dockerignore", "!\n"), )).Assert(c, icmd.Expected{ ExitCode: 1, - Err: "Error checking context: 'illegal exclusion pattern: \"!\"", + Err: "error checking context: 'illegal exclusion pattern: \"!\"", }) } @@ -2501,11 +2524,11 @@ func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) { // All of these should result in ignoring all files for _, variant := range []string{"**", "**/", "**/**", "*"} { - buildImageSuccessfully(c, "noname", withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile("file1", ""), - withFile("dir/file1", ""), - withFile(".dockerignore", variant), + buildImageSuccessfully(c, "noname", build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("file1", ""), + build.WithFile("dir/file1", ""), + build.WithFile(".dockerignore", variant), )) dockerCmd(c, "rmi", "noname") @@ -2553,25 +2576,25 @@ dir1/dir3/** **/dir5/file. ` - buildImageSuccessfully(c, "noname", withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile(".dockerignore", dockerignore), - withFile("dir1/file0", ""), - withFile("dir1/dir2/file0", ""), - withFile("file1", ""), - withFile("dir1/file1", ""), - withFile("dir1/dir2/file1", ""), - withFile("dir1/file2", ""), - withFile("dir1/dir2/file2", ""), // remains - withFile("dir1/dir2/file4", ""), - withFile("dir1/dir2/file5", ""), - withFile("dir1/dir2/file6", ""), - withFile("dir1/dir3/file7", ""), - withFile("dir1/dir3/file8", ""), - withFile("dir1/dir4/file9", ""), - withFile("dir1/dir5/fileAA", ""), - withFile("dir1/dir5/fileAB", ""), - withFile("dir1/dir5/fileB", ""), + buildImageSuccessfully(c, "noname", build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", dockerignore), + build.WithFile("dir1/file0", ""), + build.WithFile("dir1/dir2/file0", ""), + build.WithFile("file1", ""), + build.WithFile("dir1/file1", ""), + build.WithFile("dir1/dir2/file1", ""), + build.WithFile("dir1/file2", ""), + build.WithFile("dir1/dir2/file2", ""), // remains + build.WithFile("dir1/dir2/file4", ""), + build.WithFile("dir1/dir2/file5", ""), + build.WithFile("dir1/dir2/file6", ""), + build.WithFile("dir1/dir3/file7", ""), + build.WithFile("dir1/dir3/file8", ""), + build.WithFile("dir1/dir4/file9", ""), + build.WithFile("dir1/dir5/fileAA", ""), + build.WithFile("dir1/dir5/fileAB", ""), + build.WithFile("dir1/dir5/fileB", ""), )) } @@ -2691,9 +2714,9 @@ ENV abc=def ENV ghi=$abc RUN [ "$ghi" = "def" ] ` - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile("hello/docker/world", "hello"), + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("hello/docker/world", "hello"), )) } @@ -2761,9 +2784,9 @@ ENV eee4 'foo' RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] ` - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile("hello/docker/world", "hello"), + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("hello/docker/world", "hello"), )) } @@ -2777,9 +2800,9 @@ RUN ["chmod","+x","/test"] RUN ["/test"] RUN [ "$(cat /testfile)" = 'test!' ]` - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile("test", "#!/bin/sh\necho 'test!' > /testfile"), + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("test", "#!/bin/sh\necho 'test!' > /testfile"), )) } @@ -2788,7 +2811,7 @@ func (s *DockerSuite) TestBuildAddTar(c *check.C) { testRequires(c, NotUserNamespace) name := "testbuildaddtar" - ctx := func() *FakeContext { + ctx := func() *fakecontext.Fake { dockerfile := ` FROM busybox ADD test.tar / @@ -2830,17 +2853,17 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } - return fakeContextFromDir(tmpDir) + return fakecontext.New(c, tmpDir) }() defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) { name := "testbuildaddbrokentar" - ctx := func() *FakeContext { + ctx := func() *fakecontext.Fake { dockerfile := ` FROM busybox ADD test.tar /` @@ -2879,11 +2902,11 @@ ADD test.tar /` if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } - return fakeContextFromDir(tmpDir) + return fakecontext.New(c, tmpDir) }() defer ctx.Close() - buildImage(name, withExternalBuildContext(ctx)).Assert(c, icmd.Expected{ + buildImage(name, build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{ ExitCode: 1, }) } @@ -2892,12 +2915,12 @@ func (s *DockerSuite) TestBuildAddNonTar(c *check.C) { name := "testbuildaddnontar" // Should not try to extract test.tar - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", ` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox ADD test.tar / RUN test -f /test.tar`), - withFile("test.tar", "not_a_tar_file"), + build.WithFile("test.tar", "not_a_tar_file"), )) } @@ -2907,7 +2930,7 @@ func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddtarxz" - ctx := func() *FakeContext { + ctx := func() *fakecontext.Fake { dockerfile := ` FROM busybox ADD test.tar.xz / @@ -2942,19 +2965,19 @@ func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } - return fakeContextFromDir(tmpDir) + return fakecontext.New(c, tmpDir) }() defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddtarxzgz" - ctx := func() *FakeContext { + ctx := func() *fakecontext.Fake { dockerfile := ` FROM busybox ADD test.tar.xz.gz / @@ -2994,17 +3017,17 @@ func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } - return fakeContextFromDir(tmpDir) + return fakecontext.New(c, tmpDir) }() defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } func (s *DockerSuite) TestBuildFromGit(c *check.C) { name := "testbuildfromgit" - git := newFakeGit(c, "repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "Dockerfile": `FROM busybox ADD first /first RUN [ -f /first ] @@ -3023,7 +3046,7 @@ func (s *DockerSuite) TestBuildFromGit(c *check.C) { func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) { name := "testbuildfromgit" - git := newFakeGit(c, "repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "docker/Dockerfile": `FROM busybox ADD first /first RUN [ -f /first ] @@ -3040,9 +3063,9 @@ func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) { } } -func (s *DockerSuite) TestBuildFromGitwithF(c *check.C) { +func (s *DockerSuite) TestBuildFromGitWithF(c *check.C) { name := "testbuildfromgitwithf" - git := newFakeGit(c, "repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "myApp/myDockerfile": `FROM busybox RUN echo hi from Dockerfile`, }, true) @@ -3075,12 +3098,12 @@ func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { c.Fatalf("failed to close tar archive: %v", err) } - server := fakeBinaryStorage(c, map[string]*bytes.Buffer{ + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ "testT.tar": buffer, - }) + })) defer server.Close() - buildImageSuccessfully(c, name, build.WithContextPath(server.URL()+"/testT.tar")) + cli.BuildCmd(c, name, build.WithContextPath(server.URL()+"/testT.tar")) res := inspectField(c, name, "Author") if res != "docker" { @@ -3202,7 +3225,7 @@ func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { } } -func (s *DockerSuite) TestBuildEntrypointCanBeOverridenByChild(c *check.C) { +func (s *DockerSuite) TestBuildEntrypointCanBeOverriddenByChild(c *check.C) { buildImageSuccessfully(c, "parent", build.WithDockerfile(` FROM busybox ENTRYPOINT exit 130 @@ -3222,7 +3245,7 @@ func (s *DockerSuite) TestBuildEntrypointCanBeOverridenByChild(c *check.C) { }) } -func (s *DockerSuite) TestBuildEntrypointCanBeOverridenByChildInspect(c *check.C) { +func (s *DockerSuite) TestBuildEntrypointCanBeOverriddenByChildInspect(c *check.C) { var ( name = "testbuildepinherit" name2 = "testbuildepinherit2" @@ -3398,16 +3421,16 @@ func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { { Name: "quiet_build_ctx_success", BuildFunc: func(name string) *icmd.Result { - return buildImage(name, buildFlags, withBuildContext(c, - withFile("Dockerfile", "FROM busybox"), - withFile("quiet_build_success_fctx", "test"), + return buildImage(name, buildFlags, build.WithBuildContext(c, + build.WithFile("Dockerfile", "FROM busybox"), + build.WithFile("quiet_build_success_fctx", "test"), )) }, }, { Name: "quiet_build_git_success", BuildFunc: func(name string) *icmd.Result { - git := newFakeGit(c, "repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "Dockerfile": "FROM busybox", }, true) return buildImage(name, buildFlags, build.WithContextPath(git.RepoURL)) @@ -3518,21 +3541,23 @@ func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { name := "testbuildchownsinglefile" - ctx := fakeContext(c, ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox COPY test / RUN ls -l /test RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] -`, map[string]string{ - "test": "test", - }) +`), + fakecontext.WithFiles(map[string]string{ + "test": "test", + })) defer ctx.Close() if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { c.Fatal(err) } - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) } func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { @@ -3577,7 +3602,7 @@ func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { w.Close() f.Close() - buildImageSuccessfully(c, name, build.WithoutCache, withExternalBuildContext(fakeContextFromDir(ctx))) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(fakecontext.New(c, ctx))) if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { c.Fatal("symlink breakout - inject") } else if !os.IsNotExist(err) { @@ -3591,15 +3616,15 @@ func (s *DockerSuite) TestBuildXZHost(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildxzhost" - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", ` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox ADD xz /usr/local/sbin/ RUN chmod 755 /usr/local/sbin/xz ADD test.xz / RUN [ ! -e /injected ]`), - withFile("test.xz", "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00"+"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd"+"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21"), - withFile("xz", "#!/bin/sh\ntouch /injected"), + build.WithFile("test.xz", "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00"+"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd"+"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21"), + build.WithFile("xz", "#!/bin/sh\ntouch /injected"), )) } @@ -3617,13 +3642,13 @@ func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { volName = "C:/foo" } - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", ` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox COPY content /foo/file VOLUME `+volName+` CMD cat /foo/file`), - withFile("content", expected), + build.WithFile("content", expected), )) out, _ := dockerCmd(c, "run", "--rm", name) @@ -3635,15 +3660,13 @@ CMD cat /foo/file`), // FIXME(vdemeester) part of this should be unit test, other part should be clearer func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { - ctx := fakeContext(c, `FROM busybox - RUN echo from Dockerfile`, - map[string]string{ - "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", - "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", - "files/dFile": "FROM busybox\nRUN echo from files/dFile", - "dFile": "FROM busybox\nRUN echo from dFile", - "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", - }) + ctx := fakecontext.New(c, "", fakecontext.WithFiles(map[string]string{ + "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", + "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", + "files/dFile": "FROM busybox\nRUN echo from files/dFile", + "dFile": "FROM busybox\nRUN echo from dFile", + "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", + })) defer ctx.Close() cli.Docker(cli.Args("build", "-t", "test1", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ @@ -3670,7 +3693,7 @@ func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { } cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ ExitCode: 1, - Err: fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile), + Err: fmt.Sprintf("unable to prepare context: the Dockerfile (%s) must be within the build context", nonDockerfileFile), }) cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ @@ -3701,18 +3724,18 @@ func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { testRequires(c, DaemonIsLinux) // If Dockerfile is not present, use dockerfile - buildImage("test1", withBuildContext(c, - withFile("dockerfile", `FROM busybox + buildImage("test1", build.WithBuildContext(c, + build.WithFile("dockerfile", `FROM busybox RUN echo from dockerfile`), )).Assert(c, icmd.Expected{ Out: "from dockerfile", }) // Prefer Dockerfile in place of dockerfile - buildImage("test1", withBuildContext(c, - withFile("dockerfile", `FROM busybox + buildImage("test1", build.WithBuildContext(c, + build.WithFile("dockerfile", `FROM busybox RUN echo from dockerfile`), - withFile("Dockerfile", `FROM busybox + build.WithFile("Dockerfile", `FROM busybox RUN echo from Dockerfile`), )).Assert(c, icmd.Expected{ Out: "from Dockerfile", @@ -3720,24 +3743,22 @@ func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { } func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { - server := fakeStorage(c, map[string]string{"baz": `FROM busybox + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"baz": `FROM busybox RUN echo from baz COPY * /tmp/ -RUN find /tmp/`}) +RUN find /tmp/`})) defer server.Close() - ctx := fakeContext(c, `FROM busybox -RUN echo from Dockerfile`, - map[string]string{}) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox + RUN echo from Dockerfile`)) defer ctx.Close() // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir - result := buildImage("test1", cli.WithFlags("-f", "baz", server.URL()+"/baz"), func(cmd *icmd.Cmd) func() { + result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", server.URL()+"/baz"), func(cmd *icmd.Cmd) func() { cmd.Dir = ctx.Dir return nil }) - result.Assert(c, icmd.Success) if !strings.Contains(result.Combined(), "from baz") || strings.Contains(result.Combined(), "/tmp/baz") || @@ -3749,14 +3770,13 @@ RUN echo from Dockerfile`, func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why - ctx := fakeContext(c, `FROM busybox -RUN echo "from Dockerfile"`, - map[string]string{}) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox +RUN echo "from Dockerfile"`)) defer ctx.Close() // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir - result := buildImage("test1", cli.WithFlags("-f", "baz", "-"), func(cmd *icmd.Cmd) func() { + result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", "-"), func(cmd *icmd.Cmd) func() { cmd.Dir = ctx.Dir cmd.Stdin = strings.NewReader(`FROM busybox RUN echo "from baz" @@ -3764,7 +3784,6 @@ COPY * /tmp/ RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`) return nil }) - result.Assert(c, icmd.Success) if !strings.Contains(result.Combined(), "from baz") || strings.Contains(result.Combined(), "/tmp/baz") || @@ -3851,19 +3870,16 @@ func (s *DockerSuite) TestBuildSpaces(c *check.C) { // Test to make sure that leading/trailing spaces on a command // doesn't change the error msg we get name := "testspaces" - ctx := fakeContext(c, "FROM busybox\nCOPY\n", - map[string]string{ - "Dockerfile": "FROM busybox\nCOPY\n", - }) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM busybox\nCOPY\n")) defer ctx.Close() - result1 := buildImage(name, withExternalBuildContext(ctx)) + result1 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx)) result1.Assert(c, icmd.Expected{ ExitCode: 1, }) ctx.Add("Dockerfile", "FROM busybox\nCOPY ") - result2 := buildImage(name, withExternalBuildContext(ctx)) + result2 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx)) result2.Assert(c, icmd.Expected{ ExitCode: 1, }) @@ -3882,7 +3898,7 @@ func (s *DockerSuite) TestBuildSpaces(c *check.C) { } ctx.Add("Dockerfile", "FROM busybox\n COPY") - result2 = buildImage(name, build.WithoutCache, withExternalBuildContext(ctx)) + result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx)) result2.Assert(c, icmd.Expected{ ExitCode: 1, }) @@ -3897,7 +3913,7 @@ func (s *DockerSuite) TestBuildSpaces(c *check.C) { } ctx.Add("Dockerfile", "FROM busybox\n COPY ") - result2 = buildImage(name, build.WithoutCache, withExternalBuildContext(ctx)) + result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx)) result2.Assert(c, icmd.Expected{ ExitCode: 1, }) @@ -4000,9 +4016,9 @@ func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { } func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { - buildImageSuccessfully(c, "sc", withBuildContext(c, - withFile("Dockerfile", "FROM busybox\n"), - withFile("..gitme", ""), + buildImageSuccessfully(c, "sc", build.WithBuildContext(c, + build.WithFile("Dockerfile", "FROM busybox\n"), + build.WithFile("..gitme", ""), )) } @@ -4188,21 +4204,20 @@ func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) { // push a different tag to the releases role otherTag := fmt.Sprintf("%s:other", repoName) - dockerCmd(c, "tag", "busybox", otherTag) + cli.DockerCmd(c, "tag", "busybox", otherTag) - icmd.RunCmd(icmd.Command(dockerBinary, "push", otherTag), trustedCmd).Assert(c, icmd.Success) + cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success) s.assertTargetInRoles(c, repoName, "other", "targets/releases") s.assertTargetNotInRoles(c, repoName, "other", "targets") - out, status := dockerCmd(c, "rmi", otherTag) - c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) + cli.DockerCmd(c, "rmi", otherTag) dockerFile := fmt.Sprintf(` FROM %s RUN [] `, otherTag) name := "testtrustedbuildreleasesrole" - buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + cli.BuildCmd(c, name, trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ Out: fmt.Sprintf("FROM %s@sha", repoName), }) } @@ -4220,14 +4235,13 @@ func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *che // push a different tag to the other role otherTag := fmt.Sprintf("%s:other", repoName) - dockerCmd(c, "tag", "busybox", otherTag) + cli.DockerCmd(c, "tag", "busybox", otherTag) - icmd.RunCmd(icmd.Command(dockerBinary, "push", otherTag), trustedCmd).Assert(c, icmd.Success) + cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success) s.assertTargetInRoles(c, repoName, "other", "targets/other") s.assertTargetNotInRoles(c, repoName, "other", "targets") - out, status := dockerCmd(c, "rmi", otherTag) - c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) + cli.DockerCmd(c, "rmi", otherTag) dockerFile := fmt.Sprintf(` FROM %s @@ -4235,7 +4249,7 @@ func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *che `, otherTag) name := "testtrustedbuildotherrole" - buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + cli.Docker(cli.Build(name), trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ ExitCode: 1, }) } @@ -4248,16 +4262,16 @@ func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { volName = `C:\\nullvolume` } - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", ` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox ADD null / COPY nullfile / VOLUME `+volName+` `), - withFile("null", "test1"), - withFile("nullfile", "test2"), + build.WithFile("null", "test1"), + build.WithFile("nullfile", "test2"), )) } @@ -4366,12 +4380,8 @@ func (s *DockerSuite) TestBuildTimeArgHistoryExclusions(c *check.C) { if strings.Contains(out, "https_proxy") { c.Fatalf("failed to exclude proxy settings from history!") } - if !strings.Contains(out, fmt.Sprintf("%s=%s", envKey, envVal)) { - c.Fatalf("explicitly defined ARG %s is not in output", explicitProxyKey) - } - if !strings.Contains(out, fmt.Sprintf("%s=%s", envKey, envVal)) { - c.Fatalf("missing build arguments from output") - } + result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", envKey, envVal)}) + result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal)}) cacheID := buildImage(imgName + "-two") c.Assert(origID, checker.Equals, cacheID) @@ -4462,26 +4472,26 @@ func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check. imgName := "bldargtest" envKey := "foo" envVal := "bar" - envValOveride := "barOverride" + envValOverride := "barOverride" dockerfile := fmt.Sprintf(`FROM busybox ARG %s ENV %s %s RUN echo $%s CMD echo $%s - `, envKey, envKey, envValOveride, envKey, envKey) + `, envKey, envKey, envValOverride, envKey, envKey) result := buildImage(imgName, cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), build.WithDockerfile(dockerfile), ) result.Assert(c, icmd.Success) - if strings.Count(result.Combined(), envValOveride) != 2 { - c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride) + if strings.Count(result.Combined(), envValOverride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) } containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) } } @@ -4491,25 +4501,25 @@ func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check. imgName := "bldargtest" envKey := "foo" envVal := "bar" - envValOveride := "barOverride" + envValOverride := "barOverride" dockerfile := fmt.Sprintf(`FROM busybox ENV %s %s ARG %s RUN echo $%s CMD echo $%s - `, envKey, envValOveride, envKey, envKey, envKey) + `, envKey, envValOverride, envKey, envKey, envKey) result := buildImage(imgName, cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), build.WithDockerfile(dockerfile), ) result.Assert(c, icmd.Success) - if strings.Count(result.Combined(), envValOveride) != 2 { - c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride) + if strings.Count(result.Combined(), envValOverride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) } containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) } } @@ -4542,8 +4552,8 @@ func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), ), - withBuildContext(c, - withFile("Dockerfile", fmt.Sprintf(`FROM busybox + build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox ARG %s WORKDIR ${%s} ARG %s @@ -4560,15 +4570,13 @@ func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { VOLUME ${%s}`, wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar)), - withFile(addVal, "some stuff"), - withFile(copyVal, "some stuff"), + build.WithFile(addVal, "some stuff"), + build.WithFile(copyVal, "some stuff"), ), ) res := inspectField(c, imgName, "Config.WorkingDir") - if res != filepath.ToSlash(filepath.Clean(wdVal)) { - c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res) - } + c.Check(res, check.Equals, filepath.ToSlash(wdVal)) var resArr []string inspectFieldAndUnmarshall(c, imgName, "Config.Env", &resArr) @@ -4608,25 +4616,25 @@ func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) { envKey := "foo" envVal := "bar" envKey1 := "foo1" - envValOveride := "barOverride" + envValOverride := "barOverride" dockerfile := fmt.Sprintf(`FROM busybox ARG %s ENV %s %s ENV %s ${%s} RUN echo $%s - CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1) + CMD echo $%s`, envKey, envKey, envValOverride, envKey1, envKey, envKey1, envKey1) result := buildImage(imgName, cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), build.WithDockerfile(dockerfile), ) result.Assert(c, icmd.Success) - if strings.Count(result.Combined(), envValOveride) != 2 { - c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride) + if strings.Count(result.Combined(), envValOverride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) } containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) } } @@ -4682,24 +4690,24 @@ func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) { imgName := "bldargtest" envKey := "foo" envVal := "bar" - envValOveride := "barOverride" + envValOverride := "barOverride" dockerfile := fmt.Sprintf(`FROM busybox ARG %s=%s ENV %s $%s RUN echo $%s CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey) result := buildImage(imgName, - cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride)), + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envValOverride)), build.WithDockerfile(dockerfile), ) result.Assert(c, icmd.Success) - if strings.Count(result.Combined(), envValOveride) != 1 { - c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride) + if strings.Count(result.Combined(), envValOverride) != 1 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) } containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) } } @@ -4759,8 +4767,8 @@ func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) { "FOO1=fromenv", "FOO2=fromenv", "FOO3=fromenv")...), - withBuildContext(c, - withFile("Dockerfile", dockerfile), + build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), ), ) result.Assert(c, icmd.Success) @@ -4816,7 +4824,7 @@ func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile)) } -func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) { +func (s *DockerSuite) TestBuildBuildTimeArgDefinitionWithNoEnvInjection(c *check.C) { imgName := "bldargtest" envKey := "foo" dockerfile := fmt.Sprintf(`FROM busybox @@ -4955,26 +4963,26 @@ func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { // #17290 func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { name := "testbuildbrokensymlink" - ctx := fakeContext(c, ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox - COPY . ./`, - map[string]string{ + COPY . ./`), + fakecontext.WithFiles(map[string]string{ "foo": "bar", - }) + })) defer ctx.Close() err := os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) // warm up cache - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) // add new file to context, should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) c.Assert(err, checker.IsNil) - result := buildImage(name, withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) if strings.Contains(result.Combined(), "Using cache") { c.Fatal("2nd build used cache on ADD, it shouldn't") } @@ -4982,62 +4990,62 @@ func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { name := "testbuildbrokensymlink" - ctx := fakeContext(c, ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox - COPY asymlink target`, - map[string]string{ + COPY asymlink target`), + fakecontext.WithFiles(map[string]string{ "foo": "bar", - }) + })) defer ctx.Close() err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "--rm", name, "cat", "target") + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined() c.Assert(out, checker.Matches, "bar") // change target file should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) c.Assert(err, checker.IsNil) - result := buildImage(name, withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") - out, _ = dockerCmd(c, "run", "--rm", name, "cat", "target") + out = cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined() c.Assert(out, checker.Matches, "baz") } func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { name := "testbuildbrokensymlink" - ctx := fakeContext(c, ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox - COPY asymlink /`, - map[string]string{ + COPY asymlink /`), + fakecontext.WithFiles(map[string]string{ "foo/abc": "bar", "foo/def": "baz", - }) + })) defer ctx.Close() err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "--rm", name, "cat", "abc", "def") + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined() c.Assert(out, checker.Matches, "barbaz") // change target file should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) c.Assert(err, checker.IsNil) - result := buildImage(name, withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") - out, _ = dockerCmd(c, "run", "--rm", name, "cat", "abc", "def") + out = cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined() c.Assert(out, checker.Matches, "barbax") } @@ -5046,43 +5054,44 @@ func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { // not from the target file. func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { name := "testbuildbrokensymlink" - ctx := fakeContext(c, ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox - COPY asymlink /`, - map[string]string{ + COPY asymlink /`), + fakecontext.WithFiles(map[string]string{ "foo": "bar", - }) + })) defer ctx.Close() err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "--rm", name, "cat", "asymlink") + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "asymlink").Combined() c.Assert(out, checker.Matches, "bar") } // #17827 func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { name := "testbuildrootsource" - ctx := fakeContext(c, ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox - COPY / /data`, - map[string]string{ + COPY / /data`), + fakecontext.WithFiles(map[string]string{ "foo": "bar", - }) + })) defer ctx.Close() // warm up cache - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) // change file, should invalidate cache err := ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) c.Assert(err, checker.IsNil) - result := buildImage(name, withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") } @@ -5354,14 +5363,14 @@ func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) { RUN sh -c "(ls -la /tmp/#1)" RUN sh -c "(! ls -la /tmp/#2)" RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"` - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile("foo", "foo"), - withFile("foo2", "foo2"), - withFile("dir1/foo", "foo in dir1"), - withFile("#1", "# file 1"), - withFile("#2", "# file 2"), - withFile(".dockerignore", `# Visual C++ cache files + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("foo", "foo"), + build.WithFile("foo2", "foo2"), + build.WithFile("dir1/foo", "foo in dir1"), + build.WithFile("#1", "# file 1"), + build.WithFile("#2", "# file 2"), + build.WithFile(".dockerignore", `# Visual C++ cache files # because we have git ;-) # The above comment is from #20083 foo @@ -5379,8 +5388,8 @@ func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) { name := "test-with-utf8-bom" dockerfile := []byte(`FROM busybox`) bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...) - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", string(bomDockerfile)), + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", string(bomDockerfile)), )) } @@ -5395,9 +5404,9 @@ func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) { RUN ls /tmp/.dockerignore` dockerignore := []byte("./Dockerfile\n") bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...) - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", dockerfile), - withFile(".dockerignore", string(bomDockerignore)), + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", string(bomDockerignore)), )) } @@ -5585,17 +5594,18 @@ func (s *DockerSuite) TestBuildCacheFromEqualDiffIDsLength(c *check.C) { FROM busybox RUN echo "test" ENTRYPOINT ["sh"]` - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - }) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + })) defer ctx.Close() - buildImageSuccessfully(c, "build1", withExternalBuildContext(ctx)) + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, "build1") // rebuild with cache-from - result := buildImage("build2", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, "build2") c.Assert(id1, checker.Equals, id2) c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) @@ -5608,30 +5618,30 @@ func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { ENV FOO=bar ADD baz / RUN touch bax` - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "baz": "baz", - }) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + })) defer ctx.Close() - buildImageSuccessfully(c, "build1", withExternalBuildContext(ctx)) + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) id1 := getIDByName(c, "build1") // rebuild with cache-from - result := buildImage("build2", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) id2 := getIDByName(c, "build2") c.Assert(id1, checker.Equals, id2) c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) - dockerCmd(c, "rmi", "build2") + cli.DockerCmd(c, "rmi", "build2") // no cache match with unknown source - result = buildImage("build2", cli.WithFlags("--cache-from=nosuchtag"), withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=nosuchtag"), build.WithExternalBuildContext(ctx)) id2 = getIDByName(c, "build2") c.Assert(id1, checker.Not(checker.Equals), id2) c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 0) - dockerCmd(c, "rmi", "build2") + cli.DockerCmd(c, "rmi", "build2") // clear parent images tempDir, err := ioutil.TempDir("", "test-build-cache-from-") @@ -5640,33 +5650,31 @@ func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { } defer os.RemoveAll(tempDir) tempFile := filepath.Join(tempDir, "img.tar") - dockerCmd(c, "save", "-o", tempFile, "build1") - dockerCmd(c, "rmi", "build1") - dockerCmd(c, "load", "-i", tempFile) - parentID, _ := dockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1") + cli.DockerCmd(c, "save", "-o", tempFile, "build1") + cli.DockerCmd(c, "rmi", "build1") + cli.DockerCmd(c, "load", "-i", tempFile) + parentID := cli.DockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1").Combined() c.Assert(strings.TrimSpace(parentID), checker.Equals, "") // cache still applies without parents - result = buildImage("build2", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) id2 = getIDByName(c, "build2") c.Assert(id1, checker.Equals, id2) c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) - history1, _ := dockerCmd(c, "history", "-q", "build2") + history1 := cli.DockerCmd(c, "history", "-q", "build2").Combined() // Retry, no new intermediate images - result = buildImage("build3", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result = cli.BuildCmd(c, "build3", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) id3 := getIDByName(c, "build3") c.Assert(id1, checker.Equals, id3) c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) - history2, _ := dockerCmd(c, "history", "-q", "build3") + history2 := cli.DockerCmd(c, "history", "-q", "build3").Combined() c.Assert(history1, checker.Equals, history2) - dockerCmd(c, "rmi", "build2") - dockerCmd(c, "rmi", "build3") - dockerCmd(c, "rmi", "build1") - dockerCmd(c, "load", "-i", tempFile) + cli.DockerCmd(c, "rmi", "build2") + cli.DockerCmd(c, "rmi", "build3") + cli.DockerCmd(c, "rmi", "build1") + cli.DockerCmd(c, "load", "-i", tempFile) // Modify file, everything up to last command and layers are reused dockerfile = ` @@ -5677,14 +5685,13 @@ func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644) c.Assert(err, checker.IsNil) - result = buildImage("build2", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) id2 = getIDByName(c, "build2") c.Assert(id1, checker.Not(checker.Equals), id2) c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) - layers1Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1") - layers2Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2") + layers1Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1").Combined() + layers2Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2").Combined() var layers1 []string var layers2 []string @@ -5705,19 +5712,19 @@ func (s *DockerSuite) TestBuildCacheMultipleFrom(c *check.C) { ADD baz / FROM busybox ADD baz /` - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "baz": "baz", - }) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + })) defer ctx.Close() - result := buildImage("build1", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result := cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) // second part of dockerfile was a repeat of first so should be cached c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1) - result = buildImage("build2", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) // now both parts of dockerfile should be cached c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) } @@ -5778,7 +5785,7 @@ func (s *DockerSuite) TestBuildWithExtraHostInvalidFormat(c *check.C) { buildFlag string }{ {"extra_host_missing_ip", dockerfile, "--add-host=foo"}, - {"extra_host_missing_ip_with_delimeter", dockerfile, "--add-host=foo:"}, + {"extra_host_missing_ip_with_delimiter", dockerfile, "--add-host=foo:"}, {"extra_host_missing_hostname", dockerfile, "--add-host=:127.0.0.1"}, {"extra_host_invalid_ipv4", dockerfile, "--add-host=foo:101.10.2"}, {"extra_host_invalid_ipv6", dockerfile, "--add-host=foo:2001::1::3F"}, @@ -5830,7 +5837,7 @@ func (s *DockerSuite) TestBuildSquashParent(c *check.C) { c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1) out = inspectImage(c, id, "len .RootFS.Layers") - c.Assert(strings.TrimSpace(out), checker.Equals, "3") + c.Assert(strings.TrimSpace(out), checker.Equals, "2") } func (s *DockerSuite) TestBuildContChar(c *check.C) { @@ -5863,162 +5870,137 @@ func (s *DockerSuite) TestBuildCopyFromPreviousRootFS(c *check.C) { dockerfile := ` FROM busybox AS first COPY foo bar - FROM busybox - %s - COPY baz baz - RUN echo mno > baz/cc - FROM busybox - COPY bar / - COPY --from=1 baz sub/ - COPY --from=0 bar baz - COPY --from=first bar bay` - ctx := fakeContext(c, fmt.Sprintf(dockerfile, ""), map[string]string{ - "Dockerfile": dockerfile, - "foo": "abc", - "bar": "def", - "baz/aa": "ghi", - "baz/bb": "jkl", - }) + FROM busybox + %s + COPY baz baz + RUN echo mno > baz/cc + + FROM busybox + COPY bar / + COPY --from=1 baz sub/ + COPY --from=0 bar baz + COPY --from=first bar bay` + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, "")), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + "bar": "def", + "baz/aa": "ghi", + "baz/bb": "jkl", + })) defer ctx.Close() - result := buildImage("build1", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "build1", "cat", "bar") - c.Assert(strings.TrimSpace(out), check.Equals, "def") - out, _ = dockerCmd(c, "run", "build1", "cat", "sub/aa") - c.Assert(strings.TrimSpace(out), check.Equals, "ghi") - out, _ = dockerCmd(c, "run", "build1", "cat", "sub/cc") - c.Assert(strings.TrimSpace(out), check.Equals, "mno") - out, _ = dockerCmd(c, "run", "build1", "cat", "baz") - c.Assert(strings.TrimSpace(out), check.Equals, "abc") - out, _ = dockerCmd(c, "run", "build1", "cat", "bay") - c.Assert(strings.TrimSpace(out), check.Equals, "abc") + cli.DockerCmd(c, "run", "build1", "cat", "bar").Assert(c, icmd.Expected{Out: "def"}) + cli.DockerCmd(c, "run", "build1", "cat", "sub/aa").Assert(c, icmd.Expected{Out: "ghi"}) + cli.DockerCmd(c, "run", "build1", "cat", "sub/cc").Assert(c, icmd.Expected{Out: "mno"}) + cli.DockerCmd(c, "run", "build1", "cat", "baz").Assert(c, icmd.Expected{Out: "abc"}) + cli.DockerCmd(c, "run", "build1", "cat", "bay").Assert(c, icmd.Expected{Out: "abc"}) - result = buildImage("build2", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result := cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) // all commands should be cached c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 7) + c.Assert(getIDByName(c, "build1"), checker.Equals, getIDByName(c, "build2")) err := ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(fmt.Sprintf(dockerfile, "COPY baz/aa foo")), 0644) c.Assert(err, checker.IsNil) // changing file in parent block should not affect last block - result = buildImage("build3", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) - + result = cli.BuildCmd(c, "build3", build.WithExternalBuildContext(ctx)) c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5) - c.Assert(getIDByName(c, "build1"), checker.Equals, getIDByName(c, "build2")) - err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("pqr"), 0644) c.Assert(err, checker.IsNil) // changing file in parent block should affect both first and last block - result = buildImage("build4", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + result = cli.BuildCmd(c, "build4", build.WithExternalBuildContext(ctx)) c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5) - out, _ = dockerCmd(c, "run", "build4", "cat", "bay") - c.Assert(strings.TrimSpace(out), check.Equals, "pqr") - out, _ = dockerCmd(c, "run", "build4", "cat", "baz") - c.Assert(strings.TrimSpace(out), check.Equals, "pqr") + cli.DockerCmd(c, "run", "build4", "cat", "bay").Assert(c, icmd.Expected{Out: "pqr"}) + cli.DockerCmd(c, "run", "build4", "cat", "baz").Assert(c, icmd.Expected{Out: "pqr"}) } func (s *DockerSuite) TestBuildCopyFromPreviousRootFSErrors(c *check.C) { - dockerfile := ` + testCases := []struct { + dockerfile string + expectedError string + }{ + { + dockerfile: ` FROM busybox - COPY --from=foo foo bar` - - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "abc", - }) - defer ctx.Close() - - buildImage("build1", withExternalBuildContext(ctx)).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "invalid from flag value foo", - }) - - dockerfile = ` + COPY --from=foo foo bar`, + expectedError: "invalid from flag value foo", + }, + { + dockerfile: ` FROM busybox - COPY --from=0 foo bar` - - ctx = fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "abc", - }) - defer ctx.Close() - - buildImage("build1", withExternalBuildContext(ctx)).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "invalid from flag value 0 refers current build block", - }) - - dockerfile = ` + COPY --from=0 foo bar`, + expectedError: "invalid from flag value 0: refers to current build stage", + }, + { + dockerfile: ` FROM busybox AS foo - COPY --from=bar foo bar` - - ctx = fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "abc", - }) - defer ctx.Close() - - buildImage("build1", withExternalBuildContext(ctx)).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "invalid from flag value bar", - }) - - dockerfile = ` + COPY --from=bar foo bar`, + expectedError: "invalid from flag value bar", + }, + { + dockerfile: ` FROM busybox AS 1 - COPY --from=1 foo bar` + COPY --from=1 foo bar`, + expectedError: "invalid name for build stage", + }, + } - ctx = fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "abc", - }) - defer ctx.Close() + for _, tc := range testCases { + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(tc.dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) - buildImage("build1", withExternalBuildContext(ctx)).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "invalid name for build stage", - }) + cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: tc.expectedError, + }) + + ctx.Close() + } } func (s *DockerSuite) TestBuildCopyFromPreviousFrom(c *check.C) { dockerfile := ` FROM busybox COPY foo bar` - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "abc", - }) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) defer ctx.Close() - result := buildImage("build1", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) dockerfile = ` FROM build1:latest AS foo FROM busybox COPY --from=foo bar / COPY foo /` - ctx = fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "def", - }) + ctx = fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "def", + })) defer ctx.Close() - result = buildImage("build2", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "build2", "cat", "bar") + out := cli.DockerCmd(c, "run", "build2", "cat", "bar").Combined() c.Assert(strings.TrimSpace(out), check.Equals, "abc") - out, _ = dockerCmd(c, "run", "build2", "cat", "foo") + out = cli.DockerCmd(c, "run", "build2", "cat", "foo").Combined() c.Assert(strings.TrimSpace(out), check.Equals, "def") } @@ -6034,18 +6016,17 @@ func (s *DockerSuite) TestBuildCopyFromImplicitFrom(c *check.C) { COPY --from=busybox License.txt foo` } - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - }) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) defer ctx.Close() - result := buildImage("build1", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) if DaemonIsWindows() { - out, _ := dockerCmd(c, "run", "build1", "cat", "License.txt") + out := cli.DockerCmd(c, "run", "build1", "cat", "License.txt").Combined() c.Assert(len(out), checker.GreaterThan, 10) - out2, _ := dockerCmd(c, "run", "build1", "cat", "foo") + out2 := cli.DockerCmd(c, "run", "build1", "cat", "foo").Combined() c.Assert(out, check.Equals, out2) } } @@ -6056,31 +6037,28 @@ func (s *DockerRegistrySuite) TestBuildCopyFromImplicitPullingFrom(c *check.C) { dockerfile := ` FROM busybox COPY foo bar` - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "abc", - }) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) defer ctx.Close() - result := buildImage(repoName, withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + cli.BuildCmd(c, repoName, build.WithExternalBuildContext(ctx)) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) dockerfile = ` FROM busybox COPY --from=%s bar baz` - ctx = fakeContext(c, fmt.Sprintf(dockerfile, repoName), map[string]string{ - "Dockerfile": dockerfile, - }) + ctx = fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, repoName))) defer ctx.Close() - result = buildImage("build1", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) - dockerCmdWithResult("run", "build1", "cat", "baz").Assert(c, icmd.Expected{Out: "abc"}) + cli.Docker(cli.Args("run", "build1", "cat", "baz")).Assert(c, icmd.Expected{Out: "abc"}) } func (s *DockerSuite) TestBuildFromPreviousBlock(c *check.C) { @@ -6095,20 +6073,17 @@ func (s *DockerSuite) TestBuildFromPreviousBlock(c *check.C) { COPY --from=foo1 foo f1 COPY --from=FOo2 foo f2 ` // foo2 case also tests that names are canse insensitive - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "bar", - }) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) defer ctx.Close() - result := buildImage("build1", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) - - dockerCmdWithResult("run", "build1", "cat", "foo").Assert(c, icmd.Expected{Out: "bar"}) - - dockerCmdWithResult("run", "build1", "cat", "f1").Assert(c, icmd.Expected{Out: "bar1"}) - - dockerCmdWithResult("run", "build1", "cat", "f2").Assert(c, icmd.Expected{Out: "bar2"}) + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + cli.Docker(cli.Args("run", "build1", "cat", "foo")).Assert(c, icmd.Expected{Out: "bar"}) + cli.Docker(cli.Args("run", "build1", "cat", "f1")).Assert(c, icmd.Expected{Out: "bar1"}) + cli.Docker(cli.Args("run", "build1", "cat", "f2")).Assert(c, icmd.Expected{Out: "bar2"}) } func (s *DockerTrustSuite) TestCopyFromTrustedBuild(c *check.C) { @@ -6138,32 +6113,32 @@ func (s *DockerSuite) TestBuildCopyFromPreviousFromWindows(c *check.C) { dockerfile := ` FROM ` + testEnv.MinimalBaseImage() + ` COPY foo c:\\bar` - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "abc", - }) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) defer ctx.Close() - result := buildImage("build1", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) dockerfile = ` FROM build1:latest FROM ` + testEnv.MinimalBaseImage() + ` COPY --from=0 c:\\bar / COPY foo /` - ctx = fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "def", - }) + ctx = fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "def", + })) defer ctx.Close() - result = buildImage("build2", withExternalBuildContext(ctx)) - result.Assert(c, icmd.Success) + cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\bar") + out := cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\bar").Combined() c.Assert(strings.TrimSpace(out), check.Equals, "abc") - out, _ = dockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\foo") + out = cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\foo").Combined() c.Assert(strings.TrimSpace(out), check.Equals, "def") } @@ -6211,17 +6186,34 @@ func (s *DockerSuite) TestBuildCopyFromWindowsIsCaseInsensitive(c *check.C) { COPY --from=0 c:\\fOo c:\\copied RUN type c:\\copied ` - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "foo": "hello world", - }) - defer ctx.Close() - exp := icmd.Expected{ + cli.Docker(cli.Build("copyfrom-windows-insensitive"), build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("foo", "hello world"), + )).Assert(c, icmd.Expected{ ExitCode: 0, Out: "hello world", - } - result := buildImage("copyfrom-windows-insensitive", withExternalBuildContext(ctx)) - result.Assert(c, exp) + }) +} + +// #33176 +func (s *DockerSuite) TestBuildCopyFromResetScratch(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerfile := ` + FROM busybox + WORKDIR /foo/bar + FROM scratch + ENV FOO=bar + ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + res := cli.InspectCmd(c, "build1", cli.Format(".Config.WorkingDir")).Combined() + c.Assert(strings.TrimSpace(res), checker.Equals, "") } func (s *DockerSuite) TestBuildIntermediateTarget(c *check.C) { @@ -6231,19 +6223,17 @@ func (s *DockerSuite) TestBuildIntermediateTarget(c *check.C) { FROM busybox CMD ["/dist"] ` - ctx := fakeContext(c, dockerfile, map[string]string{ - "Dockerfile": dockerfile, - }) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) defer ctx.Close() - result := buildImage("build1", withExternalBuildContext(ctx), + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx), cli.WithFlags("--target", "build-env")) - result.Assert(c, icmd.Success) - res := inspectFieldJSON(c, "build1", "Config.Cmd") - c.Assert(res, checker.Equals, `["/dev"]`) + //res := inspectFieldJSON(c, "build1", "Config.Cmd") + res := cli.InspectCmd(c, "build1", cli.Format("json .Config.Cmd")).Combined() + c.Assert(strings.TrimSpace(res), checker.Equals, `["/dev"]`) - result = buildImage("build1", withExternalBuildContext(ctx), + result := cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx), cli.WithFlags("--target", "nosuchtarget")) result.Assert(c, icmd.Expected{ ExitCode: 1, @@ -6288,13 +6278,13 @@ func (s *DockerSuite) TestBuildWindowsUser(c *check.C) { // Note 27545 was reverted in 28505, but a new fix was added subsequently in 28514. func (s *DockerSuite) TestBuildCopyFileDotWithWorkdir(c *check.C) { name := "testbuildcopyfiledotwithworkdir" - buildImageSuccessfully(c, name, withBuildContext(c, - withFile("Dockerfile", `FROM busybox + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox WORKDIR /foo COPY file . RUN ["cat", "/foo/file"] `), - withFile("file", "content"), + build.WithFile("file", "content"), )) } @@ -6365,23 +6355,23 @@ func (s *DockerSuite) TestBuildLineErrorOnBuild(c *check.C) { } // FIXME(vdemeester) should be a unit test -func (s *DockerSuite) TestBuildLineErrorUknownInstruction(c *check.C) { +func (s *DockerSuite) TestBuildLineErrorUnknownInstruction(c *check.C) { name := "test_build_line_error_unknown_instruction" - buildImage(name, build.WithDockerfile(`FROM busybox + cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox RUN echo hello world NOINSTRUCTION echo ba RUN echo hello ERROR `)).Assert(c, icmd.Expected{ ExitCode: 1, - Err: "Dockerfile parse error line 3: Unknown instruction: NOINSTRUCTION", + Err: "Dockerfile parse error line 3: unknown instruction: NOINSTRUCTION", }) } // FIXME(vdemeester) should be a unit test func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *check.C) { name := "test_build_line_error_with_empty_lines" - buildImage(name, build.WithDockerfile(` + cli.Docker(cli.Build(name), build.WithDockerfile(` FROM busybox RUN echo hello world @@ -6391,21 +6381,21 @@ func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *check.C) { CMD ["/bin/init"] `)).Assert(c, icmd.Expected{ ExitCode: 1, - Err: "Dockerfile parse error line 6: Unknown instruction: NOINSTRUCTION", + Err: "Dockerfile parse error line 6: unknown instruction: NOINSTRUCTION", }) } // FIXME(vdemeester) should be a unit test func (s *DockerSuite) TestBuildLineErrorWithComments(c *check.C) { name := "test_build_line_error_with_comments" - buildImage(name, build.WithDockerfile(`FROM busybox + cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox # This will print hello world # and then ba RUN echo hello world NOINSTRUCTION echo ba `)).Assert(c, icmd.Expected{ ExitCode: 1, - Err: "Dockerfile parse error line 5: Unknown instruction: NOINSTRUCTION", + Err: "Dockerfile parse error line 5: unknown instruction: NOINSTRUCTION", }) } @@ -6423,3 +6413,78 @@ CMD echo foo out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", "build2") c.Assert(strings.TrimSpace(out), checker.Equals, `["/bin/sh","-c","echo foo"]`) } + +func (s *DockerSuite) TestBuildIidFile(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestBuildIidFile") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iid") + + name := "testbuildiidfile" + // Use a Dockerfile with multiple stages to ensure we get the last one + cli.BuildCmd(c, name, + build.WithDockerfile(`FROM `+minimalBaseImage()+` AS stage1 +ENV FOO FOO +FROM `+minimalBaseImage()+` +ENV BAR BAZ`), + cli.WithFlags("--iidfile", tmpIidFile)) + + id, err := ioutil.ReadFile(tmpIidFile) + c.Assert(err, check.IsNil) + d, err := digest.Parse(string(id)) + c.Assert(err, check.IsNil) + c.Assert(d.String(), checker.Equals, getIDByName(c, name)) +} + +func (s *DockerSuite) TestBuildIidFileCleanupOnFail(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestBuildIidFileCleanupOnFail") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iid") + + err = ioutil.WriteFile(tmpIidFile, []byte("Dummy"), 0666) + c.Assert(err, check.IsNil) + + cli.Docker(cli.Build("testbuildiidfilecleanuponfail"), + build.WithDockerfile(`FROM `+minimalBaseImage()+` + RUN /non/existing/command`), + cli.WithFlags("--iidfile", tmpIidFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + _, err = os.Stat(tmpIidFile) + c.Assert(err, check.NotNil) + c.Assert(os.IsNotExist(err), check.Equals, true) +} + +func (s *DockerSuite) TestBuildIidFileSquash(c *check.C) { + testRequires(c, ExperimentalDaemon) + tmpDir, err := ioutil.TempDir("", "TestBuildIidFileSquash") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iidsquash") + + name := "testbuildiidfilesquash" + // Use a Dockerfile with multiple stages to ensure we get the last one + cli.BuildCmd(c, name, + // This could be minimalBaseImage except + // https://github.com/moby/moby/issues/33823 requires + // `touch` to workaround. + build.WithDockerfile(`FROM busybox +ENV FOO FOO +ENV BAR BAR +RUN touch /foop +`), + cli.WithFlags("--iidfile", tmpIidFile, "--squash")) + + id, err := ioutil.ReadFile(tmpIidFile) + c.Assert(err, check.IsNil) + d, err := digest.Parse(string(id)) + c.Assert(err, check.IsNil) + c.Assert(d.String(), checker.Equals, getIDByName(c, name)) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go index d56f73a92..11c682325 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go @@ -16,6 +16,8 @@ import ( "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" "github.com/docker/docker/pkg/testutil" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/docker/go-units" @@ -26,10 +28,10 @@ func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { testRequires(c, cpuCfsQuota) name := "testbuildresourceconstraints" - ctx := fakeContext(c, ` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(` FROM hello-world:frozen RUN ["/hello"] - `, map[string]string{}) + `)) cli.Docker( cli.Args("build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, "."), cli.InDir(ctx.Dir), @@ -85,7 +87,7 @@ func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddown" - ctx := func() *FakeContext { + ctx := func() *fakecontext.Fake { dockerfile := ` FROM busybox ADD foo /bar/ @@ -108,12 +110,12 @@ func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } - return fakeContextFromDir(tmpDir) + return fakecontext.New(c, tmpDir) }() defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } // Test that an infinite sleep during a build is killed if the client disconnects. @@ -134,7 +136,7 @@ func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { defer observer.Stop() // (Note: one year, will never finish) - ctx := fakeContext(c, "FROM busybox\nRUN sleep 31536000", nil) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM busybox\nRUN sleep 31536000")) defer ctx.Close() buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go index e1db70ba2..c7115c88c 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/distribution/manifest/schema2" "github.com/docker/docker/api/types" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" "github.com/docker/docker/pkg/stringutils" "github.com/go-check/check" @@ -35,24 +36,20 @@ func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { // new file is committed because this layer is used for detecting malicious // changes. if this was committed as empty layer it would be skipped on pull // and malicious changes would never be detected. - dockerCmd(c, "run", "-e", "digest=1", "--name", containerName, "busybox", "touch", "anewfile") + cli.DockerCmd(c, "run", "-e", "digest=1", "--name", containerName, "busybox", "touch", "anewfile") // tag the image to upload it to the private registry repoAndTag := repoName + ":" + tag - out, _, err := dockerCmdWithError("commit", containerName, repoAndTag) - c.Assert(err, checker.IsNil, check.Commentf("image tagging failed: %s", out)) + cli.DockerCmd(c, "commit", containerName, repoAndTag) // delete the container as we don't need it any more - err = deleteContainer(containerName) - c.Assert(err, checker.IsNil) + cli.DockerCmd(c, "rm", "-fv", containerName) // push the image - out, _, err = dockerCmdWithError("push", repoAndTag) - c.Assert(err, checker.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) + out := cli.DockerCmd(c, "push", repoAndTag).Combined() // delete our local repo that we previously tagged - rmiout, _, err := dockerCmdWithError("rmi", repoAndTag) - c.Assert(err, checker.IsNil, check.Commentf("error deleting images prior to real test: %s", rmiout)) + cli.DockerCmd(c, "rmi", repoAndTag) matches := pushDigestRegex.FindStringSubmatch(out) c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from push output: %s", out)) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go index bedc358f5..b054c79c3 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go @@ -40,7 +40,6 @@ func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { //test commit a paused container should not unpause it after commit func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { testRequires(c, DaemonIsLinux) - defer unpauseAllContainers(c) out, _ := dockerCmd(c, "run", "-i", "-d", "busybox") cleanedContainerID := strings.TrimSpace(out) @@ -55,9 +54,9 @@ func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { } func (s *DockerSuite) TestCommitNewFile(c *check.C) { - dockerCmd(c, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") + dockerCmd(c, "run", "--name", "committer", "busybox", "/bin/sh", "-c", "echo koye > /foo") - imageID, _ := dockerCmd(c, "commit", "commiter") + imageID, _ := dockerCmd(c, "commit", "committer") imageID = strings.TrimSpace(imageID) out, _ := dockerCmd(c, "run", imageID, "cat", "/foo") diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_create_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_create_test.go new file mode 100644 index 000000000..b82325487 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_create_test.go @@ -0,0 +1,131 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestConfigCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) +} + +func (s *DockerSwarmSuite) TestConfigCreateWithLabels(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) + c.Assert(len(config.Spec.Labels), checker.Equals, 2) + c.Assert(config.Spec.Labels["key1"], checker.Equals, "value1") + c.Assert(config.Spec.Labels["key2"], checker.Equals, "value2") +} + +// Test case for 28884 +func (s *DockerSwarmSuite) TestConfigCreateResolve(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: name, + }, + Data: []byte("foo"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + fake := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: id, + }, + Data: []byte("fake foo"), + }) + c.Assert(fake, checker.Not(checker.Equals), "", check.Commentf("configs: %s", fake)) + + out, err := d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, fake) + + out, err = d.Cmd("config", "rm", id) + c.Assert(out, checker.Contains, id) + + // Fake one will remain + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on name prefix of the fake one + // (which is the same as the ID of foo one) should not work + // as search is only done based on: + // - Full ID + // - Full Name + // - Partial ID (prefix) + out, err = d.Cmd("config", "rm", id[:5]) + c.Assert(out, checker.Not(checker.Contains), id) + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on ID prefix of the fake one should succeed + out, err = d.Cmd("config", "rm", fake[:5]) + c.Assert(out, checker.Contains, fake[:5]) + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Not(checker.Contains), id) + c.Assert(out, checker.Not(checker.Contains), fake) +} + +func (s *DockerSwarmSuite) TestConfigCreateWithFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testFile, err := ioutil.TempFile("", "configCreateTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(testFile.Name()) + + testData := "TESTINGDATA" + _, err = testFile.Write([]byte(testData)) + c.Assert(err, checker.IsNil, check.Commentf("failed to write to temporary file")) + + testName := "test_config" + out, err := d.Cmd("config", "create", testName, testFile.Name()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "", check.Commentf(out)) + + id := strings.TrimSpace(out) + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_inspect_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_inspect_test.go new file mode 100644 index 000000000..ba4e80f07 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_inspect_test.go @@ -0,0 +1,68 @@ +// +build !windows + +package main + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestConfigInspect(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) + + out, err := d.Cmd("config", "inspect", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var configs []swarm.Config + c.Assert(json.Unmarshal([]byte(out), &configs), checker.IsNil) + c.Assert(configs, checker.HasLen, 1) +} + +func (s *DockerSwarmSuite) TestConfigInspectMultiple(c *check.C) { + d := s.AddDaemon(c, true, true) + + testNames := []string{ + "test0", + "test1", + } + for _, n := range testNames { + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: n, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, n) + + } + + args := []string{ + "config", + "inspect", + } + args = append(args, testNames...) + out, err := d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var configs []swarm.Config + c.Assert(json.Unmarshal([]byte(out), &configs), checker.IsNil) + c.Assert(configs, checker.HasLen, 2) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_ls_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_ls_test.go new file mode 100644 index 000000000..5c0701261 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_ls_test.go @@ -0,0 +1,125 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestConfigList(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName0 := "test0" + testName1 := "test1" + + // create config test0 + id0 := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName0, + Labels: map[string]string{"type": "test"}, + }, + Data: []byte("TESTINGDATA0"), + }) + c.Assert(id0, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id0)) + + config := d.GetConfig(c, id0) + c.Assert(config.Spec.Name, checker.Equals, testName0) + + // create config test1 + id1 := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName1, + Labels: map[string]string{"type": "production"}, + }, + Data: []byte("TESTINGDATA1"), + }) + c.Assert(id1, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id1)) + + config = d.GetConfig(c, id1) + c.Assert(config.Spec.Name, checker.Equals, testName1) + + // test by command `docker config ls` + out, err := d.Cmd("config", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test filter by name `docker config ls --filter name=xxx` + args := []string{ + "config", + "ls", + "--filter", + "name=test0", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName1) + + // test filter by id `docker config ls --filter id=xxx` + args = []string{ + "config", + "ls", + "--filter", + "id=" + id1, + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test filter by label `docker config ls --filter label=xxx` + args = []string{ + "config", + "ls", + "--filter", + "label=type", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + args = []string{ + "config", + "ls", + "--filter", + "label=type=test", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName1) + + args = []string{ + "config", + "ls", + "--filter", + "label=type=production", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test invalid filter `docker config ls --filter noexisttype=xxx` + args = []string{ + "config", + "ls", + "--filter", + "noexisttype=test0", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.NotNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, "Error response from daemon: Invalid filter 'noexisttype'") +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go index efe2ea761..46fe456bd 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go @@ -9,7 +9,6 @@ import ( "runtime" "github.com/docker/docker/api" - "github.com/docker/docker/dockerversion" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/pkg/homedir" icmd "github.com/docker/docker/pkg/testutil/cmd" @@ -59,7 +58,7 @@ func (s *DockerSuite) TestConfigHTTPHeader(c *check.C) { c.Assert(headers["User-Agent"], checker.NotNil, check.Commentf("Missing User-Agent")) - c.Assert(headers["User-Agent"][0], checker.Equals, "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")", check.Commentf("Badly formatted User-Agent,out:%v", result.Combined())) + c.Assert(headers["User-Agent"][0], checker.Equals, "Docker-Client/"+os.Getenv("DOCKER_CLI_VERSION")+" ("+runtime.GOOS+")", check.Commentf("Badly formatted User-Agent,out:%v", result.Combined())) c.Assert(headers["Myheader"], checker.NotNil) c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("Missing/bad header,out:%v", result.Combined())) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go index 30cacf42b..116f24610 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go @@ -29,7 +29,7 @@ func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") defer os.RemoveAll(tmpDir) - err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir) + err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir, nil) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) @@ -44,7 +44,7 @@ func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") defer os.RemoveAll(tmpDir) - err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir) + err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir, nil) c.Assert(err, checker.NotNil) c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) @@ -65,7 +65,7 @@ func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { srcPath := containerCpPath(containerID, "/file1") dstPath := cpPath(tmpDir, "notExists", "file1") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) @@ -73,7 +73,7 @@ func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { // Try with a directory source. srcPath = containerCpPath(containerID, "/dir1") - err = runDockerCp(c, srcPath, dstPath) + err = runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) @@ -94,7 +94,7 @@ func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { srcPath := containerCpPath(containerID, "/file1") dstPath := cpPathTrailingSep(tmpDir, "file1") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) @@ -102,7 +102,7 @@ func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { // Try with a directory source. srcPath = containerCpPath(containerID, "/dir1") - err = runDockerCp(c, srcPath, dstPath) + err = runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) @@ -124,7 +124,7 @@ func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { srcPath := containerCpPath(containerID, "/file2") dstPath := cpPath(tmpDir, "symlinkToFile1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "file1"), checker.IsNil) @@ -136,7 +136,7 @@ func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { // should copy the file into the symlink target directory. dstPath = cpPath(tmpDir, "symlinkToDir1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) @@ -149,7 +149,7 @@ func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { // the contents of the source file. dstPath = cpPath(tmpDir, "brokenSymlinkToFileX") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "fileX"), checker.IsNil) @@ -163,7 +163,7 @@ func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { srcPath = containerCpPath(containerID, "/dir2") dstPath = cpPath(tmpDir, "symlinkToDir1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) @@ -177,7 +177,7 @@ func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { // should not modify the symlink. dstPath = cpPath(tmpDir, "brokenSymlinkToDirX") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "dirX"), checker.IsNil) @@ -217,7 +217,7 @@ func (s *DockerSuite) TestCpFromCaseA(c *check.C) { srcPath := containerCpPath(containerID, "/root/file1") dstPath := cpPath(tmpDir, "itWorks.txt") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) } @@ -235,7 +235,7 @@ func (s *DockerSuite) TestCpFromCaseB(c *check.C) { srcPath := containerCpPath(containerID, "/file1") dstDir := cpPathTrailingSep(tmpDir, "testDir") - err := runDockerCp(c, srcPath, dstDir) + err := runDockerCp(c, srcPath, dstDir, nil) c.Assert(err, checker.NotNil) c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) @@ -260,7 +260,7 @@ func (s *DockerSuite) TestCpFromCaseC(c *check.C) { // Ensure the local file starts with different content. c.Assert(fileContentEquals(c, dstPath, "file2\n"), checker.IsNil) - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) } @@ -285,7 +285,7 @@ func (s *DockerSuite) TestCpFromCaseD(c *check.C) { _, err := os.Stat(dstPath) c.Assert(os.IsNotExist(err), checker.True, check.Commentf("did not expect dstPath %q to exist", dstPath)) - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) @@ -299,7 +299,7 @@ func (s *DockerSuite) TestCpFromCaseD(c *check.C) { dstDir = cpPathTrailingSep(tmpDir, "dir1") - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) } @@ -319,7 +319,7 @@ func (s *DockerSuite) TestCpFromCaseE(c *check.C) { dstDir := cpPath(tmpDir, "testDir") dstPath := filepath.Join(dstDir, "file1-1") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) @@ -330,7 +330,7 @@ func (s *DockerSuite) TestCpFromCaseE(c *check.C) { dstDir = cpPathTrailingSep(tmpDir, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } @@ -351,7 +351,7 @@ func (s *DockerSuite) TestCpFromCaseF(c *check.C) { srcDir := containerCpPath(containerID, "/root/dir1") dstFile := cpPath(tmpDir, "file1") - err := runDockerCp(c, srcDir, dstFile) + err := runDockerCp(c, srcDir, dstFile, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) @@ -376,7 +376,7 @@ func (s *DockerSuite) TestCpFromCaseG(c *check.C) { resultDir := filepath.Join(dstDir, "dir1") dstPath := filepath.Join(resultDir, "file1-1") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) @@ -390,7 +390,7 @@ func (s *DockerSuite) TestCpFromCaseG(c *check.C) { dstDir = cpPathTrailingSep(tmpDir, "dir2") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } @@ -410,7 +410,7 @@ func (s *DockerSuite) TestCpFromCaseH(c *check.C) { dstDir := cpPath(tmpDir, "testDir") dstPath := filepath.Join(dstDir, "file1-1") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) @@ -421,7 +421,7 @@ func (s *DockerSuite) TestCpFromCaseH(c *check.C) { dstDir = cpPathTrailingSep(tmpDir, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } @@ -443,7 +443,7 @@ func (s *DockerSuite) TestCpFromCaseI(c *check.C) { srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." dstFile := cpPath(tmpDir, "file1") - err := runDockerCp(c, srcDir, dstFile) + err := runDockerCp(c, srcDir, dstFile, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) @@ -468,7 +468,7 @@ func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { dstDir := cpPath(tmpDir, "dir2") dstPath := filepath.Join(dstDir, "file1-1") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) @@ -482,7 +482,7 @@ func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { dstDir = cpPathTrailingSep(tmpDir, "dir2") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go index 80dc56289..f7ed45919 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go @@ -28,7 +28,7 @@ const ( // Ensure that an all-local path case returns an error. func (s *DockerSuite) TestCpLocalOnly(c *check.C) { - err := runDockerCp(c, "foo", "bar") + err := runDockerCp(c, "foo", "bar", nil) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "must specify at least one container source") diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go index 30e152dbe..97e9aa123 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go @@ -31,7 +31,7 @@ func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "file1") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) @@ -50,7 +50,7 @@ func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { srcPath := cpPathTrailingSep(tmpDir, "file1") dstPath := containerCpPath(containerID, "testDir") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) @@ -71,7 +71,7 @@ func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/notExists", "file1") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) @@ -79,7 +79,7 @@ func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { // Try with a directory source. srcPath = cpPath(tmpDir, "dir1") - err = runDockerCp(c, srcPath, dstPath) + err = runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) @@ -104,7 +104,7 @@ func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { // The client should encounter an error trying to stat the destination // and then be unable to copy since the destination is asserted to be a // directory but does not exist. - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExist error, but got %T: %s", err, err)) @@ -116,7 +116,7 @@ func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { // then decide to extract to the parent directory instead with a rebased // name in the source archive, but this directory would overwrite the // existing file with the same name. - err = runDockerCp(c, srcPath, dstPath) + err = runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCannotOverwriteNonDirWithDir(err), checker.True, check.Commentf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)) @@ -144,7 +144,7 @@ func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { srcPath := cpPath(testVol, "file2") dstPath := containerCpPath(containerID, "/vol2/symlinkToFile1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"), checker.IsNil) @@ -156,7 +156,7 @@ func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { // This should copy the file into the symlink target directory. dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) @@ -169,7 +169,7 @@ func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { // contents of the source file. dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToFileX") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"), checker.IsNil) @@ -183,7 +183,7 @@ func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { srcPath = cpPath(testVol, "/dir2") dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) @@ -197,7 +197,7 @@ func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { // should not modify the symlink. dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToDirX") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"), checker.IsNil) @@ -238,7 +238,7 @@ func (s *DockerSuite) TestCpToCaseA(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/root/itWorks.txt") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) } @@ -259,7 +259,7 @@ func (s *DockerSuite) TestCpToCaseB(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstDir := containerCpPathTrailingSep(containerID, "testDir") - err := runDockerCp(c, srcPath, dstDir) + err := runDockerCp(c, srcPath, dstDir, nil) c.Assert(err, checker.NotNil) c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) @@ -285,7 +285,7 @@ func (s *DockerSuite) TestCpToCaseC(c *check.C) { // Ensure the container's file starts with the original content. c.Assert(containerStartOutputEquals(c, containerID, "file2\n"), checker.IsNil) - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // Should now contain file1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) @@ -312,7 +312,7 @@ func (s *DockerSuite) TestCpToCaseD(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) // Should now contain file1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) @@ -330,7 +330,7 @@ func (s *DockerSuite) TestCpToCaseD(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) // Should now contain file1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) @@ -353,7 +353,7 @@ func (s *DockerSuite) TestCpToCaseE(c *check.C) { srcDir := cpPath(tmpDir, "dir1") dstDir := containerCpPath(containerID, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -367,7 +367,7 @@ func (s *DockerSuite) TestCpToCaseE(c *check.C) { dstDir = containerCpPathTrailingSep(containerID, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -389,7 +389,7 @@ func (s *DockerSuite) TestCpToCaseF(c *check.C) { srcDir := cpPath(tmpDir, "dir1") dstFile := containerCpPath(containerID, "/root/file1") - err := runDockerCp(c, srcDir, dstFile) + err := runDockerCp(c, srcDir, dstFile, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) @@ -416,7 +416,7 @@ func (s *DockerSuite) TestCpToCaseG(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -434,7 +434,7 @@ func (s *DockerSuite) TestCpToCaseG(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -457,7 +457,7 @@ func (s *DockerSuite) TestCpToCaseH(c *check.C) { srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." dstDir := containerCpPath(containerID, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -471,7 +471,7 @@ func (s *DockerSuite) TestCpToCaseH(c *check.C) { dstDir = containerCpPathTrailingSep(containerID, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -494,7 +494,7 @@ func (s *DockerSuite) TestCpToCaseI(c *check.C) { srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." dstFile := containerCpPath(containerID, "/root/file1") - err := runDockerCp(c, srcDir, dstFile) + err := runDockerCp(c, srcDir, dstFile, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) @@ -522,7 +522,7 @@ func (s *DockerSuite) TestCpToCaseJ(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -539,7 +539,7 @@ func (s *DockerSuite) TestCpToCaseJ(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -563,7 +563,7 @@ func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/root/shouldNotExist") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err)) @@ -590,7 +590,7 @@ func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/vol_ro/shouldNotExist") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrVolumeReadonly error, but got %T: %s", err, err)) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go index e369d80e1..fa55b6ee2 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go @@ -14,6 +14,29 @@ import ( "github.com/go-check/check" ) +func (s *DockerSuite) TestCpToContainerWithPermissions(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + tmpDir := getTestDir(c, "test-cp-to-host-with-permissions") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerName := "permtest" + + _, exc := dockerCmd(c, "create", "--name", containerName, "debian:jessie", "/bin/bash", "-c", "stat -c '%u %g %a' /permdirtest /permdirtest/permtest") + c.Assert(exc, checker.Equals, 0) + defer dockerCmd(c, "rm", "-f", containerName) + + srcPath := cpPath(tmpDir, "permdirtest") + dstPath := containerCpPath(containerName, "/") + c.Assert(runDockerCp(c, srcPath, dstPath, []string{"-a"}), checker.IsNil) + + out, err := startContainerGetOutput(c, containerName) + c.Assert(err, checker.IsNil, check.Commentf("output: %v", out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "2 2 700\n65534 65534 400", check.Commentf("output: %v", out)) +} + // Check ownership is root, both in non-userns and userns enabled modes func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon) @@ -29,7 +52,7 @@ func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/tmpvol", "file1") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.IsNil) stat, err := system.Stat(filepath.Join(tmpVolDir, "file1")) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils_test.go similarity index 78% rename from fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go rename to fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils_test.go index 3c51ece1e..48aff9061 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils_test.go @@ -7,6 +7,7 @@ import ( "os" "os/exec" "path/filepath" + "runtime" "strings" "github.com/docker/docker/integration-cli/checker" @@ -26,6 +27,9 @@ type fileData struct { filetype fileType path string contents string + uid int + gid int + mode int } func (fd fileData) creationCommand() string { @@ -55,31 +59,33 @@ func mkFilesCommand(fds []fileData) string { } var defaultFileData = []fileData{ - {ftRegular, "file1", "file1"}, - {ftRegular, "file2", "file2"}, - {ftRegular, "file3", "file3"}, - {ftRegular, "file4", "file4"}, - {ftRegular, "file5", "file5"}, - {ftRegular, "file6", "file6"}, - {ftRegular, "file7", "file7"}, - {ftDir, "dir1", ""}, - {ftRegular, "dir1/file1-1", "file1-1"}, - {ftRegular, "dir1/file1-2", "file1-2"}, - {ftDir, "dir2", ""}, - {ftRegular, "dir2/file2-1", "file2-1"}, - {ftRegular, "dir2/file2-2", "file2-2"}, - {ftDir, "dir3", ""}, - {ftRegular, "dir3/file3-1", "file3-1"}, - {ftRegular, "dir3/file3-2", "file3-2"}, - {ftDir, "dir4", ""}, - {ftRegular, "dir4/file3-1", "file4-1"}, - {ftRegular, "dir4/file3-2", "file4-2"}, - {ftDir, "dir5", ""}, - {ftSymlink, "symlinkToFile1", "file1"}, - {ftSymlink, "symlinkToDir1", "dir1"}, - {ftSymlink, "brokenSymlinkToFileX", "fileX"}, - {ftSymlink, "brokenSymlinkToDirX", "dirX"}, - {ftSymlink, "symlinkToAbsDir", "/root"}, + {ftRegular, "file1", "file1", 0, 0, 0666}, + {ftRegular, "file2", "file2", 0, 0, 0666}, + {ftRegular, "file3", "file3", 0, 0, 0666}, + {ftRegular, "file4", "file4", 0, 0, 0666}, + {ftRegular, "file5", "file5", 0, 0, 0666}, + {ftRegular, "file6", "file6", 0, 0, 0666}, + {ftRegular, "file7", "file7", 0, 0, 0666}, + {ftDir, "dir1", "", 0, 0, 0777}, + {ftRegular, "dir1/file1-1", "file1-1", 0, 0, 0666}, + {ftRegular, "dir1/file1-2", "file1-2", 0, 0, 0666}, + {ftDir, "dir2", "", 0, 0, 0666}, + {ftRegular, "dir2/file2-1", "file2-1", 0, 0, 0666}, + {ftRegular, "dir2/file2-2", "file2-2", 0, 0, 0666}, + {ftDir, "dir3", "", 0, 0, 0666}, + {ftRegular, "dir3/file3-1", "file3-1", 0, 0, 0666}, + {ftRegular, "dir3/file3-2", "file3-2", 0, 0, 0666}, + {ftDir, "dir4", "", 0, 0, 0666}, + {ftRegular, "dir4/file3-1", "file4-1", 0, 0, 0666}, + {ftRegular, "dir4/file3-2", "file4-2", 0, 0, 0666}, + {ftDir, "dir5", "", 0, 0, 0666}, + {ftSymlink, "symlinkToFile1", "file1", 0, 0, 0666}, + {ftSymlink, "symlinkToDir1", "dir1", 0, 0, 0666}, + {ftSymlink, "brokenSymlinkToFileX", "fileX", 0, 0, 0666}, + {ftSymlink, "brokenSymlinkToDirX", "dirX", 0, 0, 0666}, + {ftSymlink, "symlinkToAbsDir", "/root", 0, 0, 0666}, + {ftDir, "permdirtest", "", 2, 2, 0700}, + {ftRegular, "permdirtest/permtest", "perm_test", 65534, 65534, 0400}, } func defaultMkContentCommand() string { @@ -91,12 +97,16 @@ func makeTestContentInDir(c *check.C, dir string) { path := filepath.Join(dir, filepath.FromSlash(fd.path)) switch fd.filetype { case ftRegular: - c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(0666)), checker.IsNil) + c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(fd.mode)), checker.IsNil) case ftDir: - c.Assert(os.Mkdir(path, os.FileMode(0777)), checker.IsNil) + c.Assert(os.Mkdir(path, os.FileMode(fd.mode)), checker.IsNil) case ftSymlink: c.Assert(os.Symlink(fd.contents, path), checker.IsNil) } + + if fd.filetype != ftSymlink && runtime.GOOS != "windows" { + c.Assert(os.Chown(path, fd.uid, fd.gid), checker.IsNil) + } } } @@ -178,10 +188,16 @@ func containerCpPathTrailingSep(containerID string, pathElements ...string) stri return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...)) } -func runDockerCp(c *check.C, src, dst string) (err error) { - c.Logf("running `docker cp %s %s`", src, dst) +func runDockerCp(c *check.C, src, dst string, params []string) (err error) { + c.Logf("running `docker cp %s %s %s`", strings.Join(params, " "), src, dst) - args := []string{"cp", src, dst} + args := []string{"cp"} + + for _, param := range params { + args = append(args, param) + } + + args = append(args, src, dst) out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) if err != nil { diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go index 46ff4634a..d4eb985a3 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go @@ -10,9 +10,10 @@ import ( "time" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/docker/go-connections/nat" "github.com/go-check/check" @@ -293,24 +294,23 @@ func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-create") // Try create - icmd.RunCmd(icmd.Command(dockerBinary, "create", repoName), trustedCmd).Assert(c, SuccessTagging) - - dockerCmd(c, "rmi", repoName) + cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) // Try untrusted create to ensure we pushed the tag to the registry - icmd.RunCmd(icmd.Command(dockerBinary, "create", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloadedOnStderr) + cli.Docker(cli.Args("create", "--disable-content-trust=true", repoName)).Assert(c, SuccessDownloadedOnStderr) } func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) withTagName := fmt.Sprintf("%s:latest", repoName) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", withTagName) - dockerCmd(c, "push", withTagName) - dockerCmd(c, "rmi", withTagName) + cli.DockerCmd(c, "tag", "busybox", withTagName) + cli.DockerCmd(c, "push", withTagName) + cli.DockerCmd(c, "rmi", withTagName) // Try trusted create on untrusted tag - icmd.RunCmd(icmd.Command(dockerBinary, "create", withTagName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("create", withTagName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 1, Err: fmt.Sprintf("does not have trust data for %s", repoName), }) @@ -320,36 +320,10 @@ func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-isolated-create") // Try create - icmd.RunCmd(icmd.Command(dockerBinary, "--config", "/tmp/docker-isolated-create", "create", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.Docker(cli.Args("--config", "/tmp/docker-isolated-create", "create", repoName), trustedCmd).Assert(c, SuccessTagging) defer os.RemoveAll("/tmp/docker-isolated-create") - dockerCmd(c, "rmi", repoName) -} - -func (s *DockerTrustSuite) TestCreateWhenCertExpired(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := s.setupTrustedImage(c, "trusted-create-expired") - - // Certificates have 10 years of expiration - elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) - - testutil.RunAtDifferentDate(elevenYearsFromNow, func() { - // Try create - icmd.RunCmd(icmd.Cmd{ - Command: []string{dockerBinary, "create", repoName}, - }, trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "could not validate the path to a trusted root", - }) - }) - - testutil.RunAtDifferentDate(elevenYearsFromNow, func() { - // Try create - result := icmd.RunCmd(icmd.Command(dockerBinary, "create", "--disable-content-trust", repoName), trustedCmd) - c.Assert(result.Error, check.Not(check.IsNil)) - c.Assert(string(result.Combined()), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", result.Combined())) - - }) + cli.DockerCmd(c, "rmi", repoName) } func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { @@ -358,16 +332,13 @@ func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { c.Assert(err, check.IsNil) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - dockerCmd(c, "rmi", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) // Try create - icmd.RunCmd(icmd.Command(dockerBinary, "create", repoName), trustedCmd).Assert(c, SuccessTagging) - - dockerCmd(c, "rmi", repoName) + cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) // Kill the notary server, start a new "evil" one. s.not.Close() @@ -376,13 +347,13 @@ func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. // tag an image and upload it to the private registry - dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) // Push up to the new server - icmd.RunCmd(icmd.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) // Now, try creating with the original client from this new trust server. This should fail because the new root is invalid. - icmd.RunCmd(icmd.Command(dockerBinary, "create", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 1, Err: "could not rotate trust to a new trusted root", }) @@ -438,19 +409,21 @@ RUN chmod 755 /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] CMD echo foobar` - ctx := fakeContext(c, dockerfile, map[string]string{ - "entrypoint.sh": `#!/bin/sh + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "entrypoint.sh": `#!/bin/sh echo "I am an entrypoint" exec "$@"`, - }) + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "create", "--entrypoint=", name, "echo", "foo") + out := cli.DockerCmd(c, "create", "--entrypoint=", name, "echo", "foo").Combined() id := strings.TrimSpace(out) c.Assert(id, check.Not(check.Equals), "") - out, _ = dockerCmd(c, "start", "-a", id) + out = cli.DockerCmd(c, "start", "-a", id).Combined() c.Assert(strings.TrimSpace(out), check.Equals, "foo") } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go index 0e7cb27c4..5954c57aa 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go @@ -5,6 +5,7 @@ package main import ( "bufio" "bytes" + "context" "encoding/json" "fmt" "io" @@ -21,9 +22,17 @@ import ( "syscall" "time" + "crypto/tls" + "crypto/x509" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/opts" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/testutil" @@ -535,32 +544,6 @@ func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { } } -func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) { - // TODO: skip or update for Windows daemon - os.Remove("/etc/docker/key.json") - k1, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - c.Fatalf("Error generating private key: %s", err) - } - if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { - c.Fatalf("Error creating .docker directory: %s", err) - } - if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { - c.Fatalf("Error saving private key: %s", err) - } - - s.d.Start(c) - s.d.Stop(c) - - k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") - if err != nil { - c.Fatalf("Error opening key file") - } - if k1.KeyID() != k2.KeyID() { - c.Fatalf("Key not migrated") - } -} - // GH#11320 - verify that the daemon exits on failure properly // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required @@ -986,7 +969,7 @@ func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) } if nproc != "2048" { - c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + c.Fatalf("expected `ulimit -p` to be 2048, got: %s", nproc) } // Now restart daemon with a new default @@ -1008,7 +991,7 @@ func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) } if nproc != "2048" { - c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + c.Fatalf("expected `ulimit -p` to be 2048, got: %s", nproc) } } @@ -1429,7 +1412,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { } // os.Kill should kill daemon ungracefully, leaving behind container mounts. -// A subsequent daemon restart shoud clean up said mounts. +// A subsequent daemon restart should clean up said mounts. func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *check.C) { d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ Experimental: testEnv.ExperimentalDaemon(), @@ -1713,7 +1696,7 @@ func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { } // FIXME(vdemeester) Use a new daemon instance instead of the Suite one -func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTLSHost(c *check.C) { +func (s *DockerDaemonSuite) TestDaemonStartWithDefaultTLSHost(c *check.C) { s.d.UseDefaultTLSHost = true defer func() { s.d.UseDefaultTLSHost = false @@ -1743,6 +1726,33 @@ func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTLSHost(c *check.C) { if !strings.Contains(out, "Server") { c.Fatalf("docker version should return information of server side") } + + // ensure when connecting to the server that only a single acceptable CA is requested + contents, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) + rootCert, err := helpers.ParseCertificatePEM(contents) + c.Assert(err, checker.IsNil) + rootPool := x509.NewCertPool() + rootPool.AddCert(rootCert) + + var certRequestInfo *tls.CertificateRequestInfo + conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort), &tls.Config{ + RootCAs: rootPool, + GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { + certRequestInfo = cri + cert, err := tls.LoadX509KeyPair("fixtures/https/client-cert.pem", "fixtures/https/client-key.pem") + if err != nil { + return nil, err + } + return &cert, nil + }, + }) + c.Assert(err, checker.IsNil) + conn.Close() + + c.Assert(certRequestInfo, checker.NotNil) + c.Assert(certRequestInfo.AcceptableCAs, checker.HasLen, 1) + c.Assert(certRequestInfo.AcceptableCAs[0], checker.DeepEquals, rootCert.RawSubject) } func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *check.C) { @@ -1783,7 +1793,7 @@ func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) { // create a 2MiB image and mount it as graph root // Why in a container? Because `mount` sometimes behaves weirdly and often fails outright on this test in debian:jessie (which is what the test suite runs under if run from the Makefile) - dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=2 count=0") + dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=3 count=0") icmd.RunCommand("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img")).Assert(c, icmd.Success) result := icmd.RunCommand("losetup", "-f", "--show", filepath.Join(testDir, "testfs.img")) @@ -2678,7 +2688,7 @@ func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { `) configPath := filepath.Join(d.Root, "containers", id, "config.v2.json") - err = ioutil.WriteFile(configPath, config, 600) + c.Assert(ioutil.WriteFile(configPath, config, 600), checker.IsNil) d.Start(c) out, err = d.Cmd("inspect", "--type=container", "--format={{ json .Mounts }}", id) @@ -2804,11 +2814,15 @@ func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *check.C) { testRequires(c, DaemonIsLinux) s.d.StartWithBusybox(c, "--live-restore") - out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && top") + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && touch /adduser_end && top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) s.d.WaitRun("top") + // Wait for shell command to be completed + _, err = s.d.Cmd("exec", "top", "sh", "-c", `for i in $(seq 1 5); do if [ -e /adduser_end ]; then rm -f /adduser_end && break; else sleep 1 && false; fi; done`) + c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for shell command to be completed")) + out1, err := s.d.Cmd("exec", "-u", "test", "top", "id") // uid=100(test) gid=101(test) groups=101(test) c.Assert(err, check.IsNil, check.Commentf("Output: %s", out1)) @@ -2818,7 +2832,7 @@ func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *check.C) { out2, err := s.d.Cmd("exec", "-u", "test", "top", "id") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out2)) - c.Assert(out1, check.Equals, out2, check.Commentf("Output: before restart '%s', after restart '%s'", out1, out2)) + c.Assert(out2, check.Equals, out1, check.Commentf("Output: before restart '%s', after restart '%s'", out1, out2)) out, err = s.d.Cmd("stop", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) @@ -2970,3 +2984,45 @@ func (s *DockerDaemonSuite) TestShmSizeReload(c *check.C) { c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(strings.TrimSpace(out), check.Equals, fmt.Sprintf("%v", size)) } + +// TestFailedPluginRemove makes sure that a failed plugin remove does not block +// the daemon from starting +func (s *DockerDaemonSuite) TestFailedPluginRemove(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, SameHostDaemon) + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{}) + d.Start(c) + cli, err := client.NewClient(d.Sock(), api.DefaultVersion, nil, nil) + c.Assert(err, checker.IsNil) + + ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) + defer cancel() + + name := "test-plugin-rm-fail" + out, err := cli.PluginInstall(ctx, name, types.PluginInstallOptions{ + Disabled: true, + AcceptAllPermissions: true, + RemoteRef: "cpuguy83/docker-logdriver-test", + }) + c.Assert(err, checker.IsNil) + defer out.Close() + io.Copy(ioutil.Discard, out) + + ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + p, _, err := cli.PluginInspectWithRaw(ctx, name) + c.Assert(err, checker.IsNil) + + // simulate a bad/partial removal by removing the plugin config. + configPath := filepath.Join(d.Root, "plugins", p.ID, "config.json") + c.Assert(os.Remove(configPath), checker.IsNil) + + d.Restart(c) + ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + _, err = cli.Ping(ctx) + c.Assert(err, checker.IsNil) + + _, _, err = cli.PluginInspectWithRaw(ctx, name) + // plugin should be gone since the config.json is gone + c.Assert(err, checker.NotNil) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go index b41cc896d..3e95a7378 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go @@ -5,13 +5,14 @@ import ( "time" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/go-check/check" ) // ensure that an added file shows up in docker diff func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { containerCmd := `mkdir /foo; echo xyzzy > /foo/bar` - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd).Combined() // Wait for it to exit as cannot diff a running container on Windows, and // it will take a few seconds to exit. Also there's no way in Windows to @@ -20,13 +21,12 @@ func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { containerID := strings.TrimSpace(out) lookingFor := "A /foo/bar" if testEnv.DaemonPlatform() == "windows" { - err := waitExited(containerID, 60*time.Second) - c.Assert(err, check.IsNil) + cli.WaitExited(c, containerID, 60*time.Second) lookingFor = "C Files/foo/bar" } cleanCID := strings.TrimSpace(out) - out, _ = dockerCmd(c, "diff", cleanCID) + out = cli.DockerCmd(c, "diff", cleanCID).Combined() found := false for _, line := range strings.Split(out, "\n") { diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go index a7014e9b3..0bbc98684 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go @@ -15,6 +15,7 @@ import ( eventtypes "github.com/docker/docker/api/types/events" eventstestutils "github.com/docker/docker/daemon/events/testutils" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" "github.com/docker/docker/integration-cli/request" "github.com/docker/docker/pkg/testutil" @@ -118,7 +119,7 @@ func (s *DockerSuite) TestEventsLimit(c *check.C) { out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) events := strings.Split(out, "\n") nEvents := len(events) - 1 - c.Assert(nEvents, checker.Equals, 64, check.Commentf("events should be limited to 64, but received %d", nEvents)) + c.Assert(nEvents, checker.Equals, 256, check.Commentf("events should be limited to 256, but received %d", nEvents)) } func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { @@ -451,16 +452,16 @@ func (s *DockerSuite) TestEventsCommit(c *check.C) { // Problematic on Windows as cannot commit a running container testRequires(c, DaemonIsLinux) - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) cID := strings.TrimSpace(out) - c.Assert(waitRun(cID), checker.IsNil) + cli.WaitRun(c, cID) - dockerCmd(c, "commit", "-m", "test", cID) - dockerCmd(c, "stop", cID) - c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) + cli.DockerCmd(c, "commit", "-m", "test", cID) + cli.DockerCmd(c, "stop", cID) + cli.WaitExited(c, cID, 5*time.Second) until := daemonUnixTime(c) - out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + out = cli.DockerCmd(c, "events", "-f", "container="+cID, "--until="+until).Combined() c.Assert(out, checker.Contains, "commit", check.Commentf("Missing 'commit' log event")) } @@ -494,7 +495,7 @@ func (s *DockerSuite) TestEventsCopy(c *check.C) { } func (s *DockerSuite) TestEventsResize(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cID := strings.TrimSpace(out) c.Assert(waitRun(cID), checker.IsNil) @@ -514,9 +515,9 @@ func (s *DockerSuite) TestEventsAttach(c *check.C) { // TODO Windows CI: Figure out why this test fails intermittently (TP5). testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-di", "busybox", "cat") + out := cli.DockerCmd(c, "run", "-di", "busybox", "cat").Combined() cID := strings.TrimSpace(out) - c.Assert(waitRun(cID), checker.IsNil) + cli.WaitRun(c, cID) cmd := exec.Command(dockerBinary, "attach", cID) stdin, err := cmd.StdinPipe() @@ -537,11 +538,11 @@ func (s *DockerSuite) TestEventsAttach(c *check.C) { c.Assert(stdin.Close(), checker.IsNil) - dockerCmd(c, "kill", cID) - c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) + cli.DockerCmd(c, "kill", cID) + cli.WaitExited(c, cID, 5*time.Second) until := daemonUnixTime(c) - out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + out = cli.DockerCmd(c, "events", "-f", "container="+cID, "--until="+until).Combined() c.Assert(out, checker.Contains, "attach", check.Commentf("Missing 'attach' log event")) } @@ -560,7 +561,7 @@ func (s *DockerSuite) TestEventsTop(c *check.C) { // Problematic on Windows as Windows does not support top testRequires(c, DaemonIsLinux) - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cID := strings.TrimSpace(out) c.Assert(waitRun(cID), checker.IsNil) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go index 4d5777d12..fe053aa59 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go @@ -428,7 +428,7 @@ func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c)) c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, default-runtime=runc, default-shm-size=67108864, insecure-registries=[], labels=[\"bar=foo\"], live-restore=false, max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s, registry-mirrors=[], runtimes=runc:{docker-runc []}, shutdown-timeout=10)", daemonID, daemonName)) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (allow-nondistributable-artifacts=[], cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, default-runtime=runc, default-shm-size=67108864, insecure-registries=[], labels=[\"bar=foo\"], live-restore=false, max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s, registry-mirrors=[], runtimes=runc:{docker-runc []}, shutdown-timeout=10)", daemonID, daemonName)) } func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go index 4cad5722b..be228ab2d 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go @@ -16,6 +16,7 @@ import ( "time" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" "github.com/docker/docker/integration-cli/request" icmd "github.com/docker/docker/pkg/testutil/cmd" @@ -70,7 +71,7 @@ func (s *DockerSuite) TestExecInteractive(c *check.C) { } func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) cleanedContainerID := strings.TrimSpace(out) c.Assert(waitRun(cleanedContainerID), check.IsNil) dockerCmd(c, "restart", cleanedContainerID) @@ -137,13 +138,12 @@ func (s *DockerSuite) TestExecExitStatus(c *check.C) { func (s *DockerSuite) TestExecPausedContainer(c *check.C) { testRequires(c, IsPausable) - defer unpauseAllContainers(c) - out, _ := runSleepingContainer(c, "-d", "--name", "testing") + out := runSleepingContainer(c, "-d", "--name", "testing") ContainerID := strings.TrimSpace(out) dockerCmd(c, "pause", "testing") - out, _, err := dockerCmdWithError("exec", "-i", "-t", ContainerID, "echo", "hello") + out, _, err := dockerCmdWithError("exec", ContainerID, "echo", "hello") c.Assert(err, checker.NotNil, check.Commentf("container should fail to exec new command if it is paused")) expected := ContainerID + " is paused, unpause the container before exec" @@ -305,7 +305,7 @@ func (s *DockerSuite) TestExecCgroup(c *check.C) { } func (s *DockerSuite) TestExecInspectID(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") id := strings.TrimSuffix(out, "\n") out = inspectField(c, id, "ExecIDs") @@ -389,7 +389,10 @@ func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { // Not applicable on Windows to Windows CI. testRequires(c, SameHostDaemon, DaemonIsLinux) for _, fn := range []string{"resolv.conf", "hosts"} { - deleteAllContainers(c) + containers := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + if containers != "" { + cli.DockerCmd(c, append([]string{"rm", "-fv"}, strings.Split(strings.TrimSpace(containers), "\n")...)...) + } content := runCommandAndReadContainerFile(c, fn, dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn)) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go index d736c4bdd..16023c9a7 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go @@ -111,7 +111,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex } respond := func(w http.ResponseWriter, data interface{}) { - w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json") + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") switch t := data.(type) { case error: fmt.Fprintln(w, fmt.Sprintf(`{"Err": %q}`, t.Error())) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go index d6d1999c2..5fe417c2c 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go @@ -616,3 +616,18 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c out, _ = s.d.Cmd("run", "-w", "/foo", "-v", "testumount:/foo", "busybox", "true") c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) } + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnCp(c *check.C) { + s.d.StartWithBusybox(c) + s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--name=test") + + out, _ := s.d.Cmd("run", "-d", "--name=test", "-v", "test:/foo", "busybox", "/bin/sh", "-c", "touch /test && top") + c.Assert(s.ec.mounts, checker.Equals, 1, check.Commentf(out)) + + out, _ = s.d.Cmd("cp", "test:/test", "/tmp/test") + c.Assert(s.ec.mounts, checker.Equals, 2, check.Commentf(out)) + c.Assert(s.ec.unmounts, checker.Equals, 1, check.Commentf(out)) + + out, _ = s.d.Cmd("kill", "test") + c.Assert(s.ec.unmounts, checker.Equals, 2, check.Commentf(out)) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go index 3e9d048f8..0f78a41d8 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go @@ -139,3 +139,26 @@ func (s *DockerSuite) TestHealth(c *check.C) { c.Check(out, checker.Equals, "[CMD cat /my status]\n") } + +// Github #33021 +func (s *DockerSuite) TestUnsetEnvVarHealthCheck(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + imageName := "testhealth" + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox +HEALTHCHECK --interval=1s --timeout=5s --retries=5 CMD /bin/sh -c "sleep 1" +ENTRYPOINT /bin/sh -c "sleep 600"`)) + + name := "env_test_health" + // No health status before starting + dockerCmd(c, "run", "-d", "--name", name, "-e", "FOO", imageName) + defer func() { + dockerCmd(c, "rm", "-f", name) + dockerCmd(c, "rmi", imageName) + }() + + // Start + dockerCmd(c, "start", name) + waitForHealthStatus(c, name, "starting", "healthy") + +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go index d463db450..dccbe1262 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go @@ -100,7 +100,7 @@ func (s *DockerSuite) TestImagesFilterLabelMatch(c *check.C) { } // Regression : #15659 -func (s *DockerSuite) TestImagesFilterLabelWithCommit(c *check.C) { +func (s *DockerSuite) TestCommitWithFilterLabel(c *check.C) { // Create a container dockerCmd(c, "run", "--name", "bar", "busybox", "/bin/sh") // Commit with labels "using changes" diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go index 5eb2f0f42..d75974dfc 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go @@ -146,7 +146,7 @@ func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) { func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) { testRequires(c, IsPausable) - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "pause", cleanedContainerID) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go index 330cfc9a1..96e2ee451 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go @@ -53,10 +53,7 @@ func (s *DockerSuite) TestInspectDefault(c *check.C) { } func (s *DockerSuite) TestInspectStatus(c *check.C) { - if testEnv.DaemonPlatform() != "windows" { - defer unpauseAllContainers(c) - } - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") out = strings.TrimSpace(out) inspectOut := inspectField(c, out, "State.Status") diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go index db3696357..3273ecf1f 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go @@ -7,45 +7,48 @@ import ( "time" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/go-check/check" ) func (s *DockerSuite) TestKillContainer(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) - c.Assert(waitRun(cleanedContainerID), check.IsNil) + cli.WaitRun(c, cleanedContainerID) - dockerCmd(c, "kill", cleanedContainerID) - c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + cli.DockerCmd(c, "kill", cleanedContainerID) + cli.WaitExited(c, cleanedContainerID, 10*time.Second) - out, _ = dockerCmd(c, "ps", "-q") + out = cli.DockerCmd(c, "ps", "-q").Combined() c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) } func (s *DockerSuite) TestKillOffStoppedContainer(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) - dockerCmd(c, "stop", cleanedContainerID) - c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + cli.DockerCmd(c, "stop", cleanedContainerID) + cli.WaitExited(c, cleanedContainerID, 10*time.Second) - _, _, err := dockerCmdWithError("kill", "-s", "30", cleanedContainerID) - c.Assert(err, check.Not(check.IsNil), check.Commentf("Container %s is not running", cleanedContainerID)) + cli.Docker(cli.Args("kill", "-s", "30", cleanedContainerID)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) } func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { // TODO Windows: Windows does not yet support -u (Feb 2016). testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top") + out := cli.DockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top").Combined() cleanedContainerID := strings.TrimSpace(out) - c.Assert(waitRun(cleanedContainerID), check.IsNil) + cli.WaitRun(c, cleanedContainerID) - dockerCmd(c, "kill", cleanedContainerID) - c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + cli.DockerCmd(c, "kill", cleanedContainerID) + cli.WaitExited(c, cleanedContainerID, 10*time.Second) - out, _ = dockerCmd(c, "ps", "-q") + out = cli.DockerCmd(c, "ps", "-q").Combined() c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) } @@ -69,38 +72,38 @@ func (s *DockerSuite) TestKillWithSignal(c *check.C) { func (s *DockerSuite) TestKillWithStopSignalWithSameSignalShouldDisableRestartPolicy(c *check.C) { // Cannot port to Windows - does not support signals int the same way as Linux does testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--stop-signal=TERM", "--restart=always", "busybox", "top") + out := cli.DockerCmd(c, "run", "-d", "--stop-signal=TERM", "--restart=always", "busybox", "top").Combined() cid := strings.TrimSpace(out) - c.Assert(waitRun(cid), check.IsNil) + cli.WaitRun(c, cid) // Let docker send a TERM signal to the container // It will kill the process and disable the restart policy - dockerCmd(c, "kill", "-s", "TERM", cid) - c.Assert(waitExited(cid, 10*time.Second), check.IsNil) + cli.DockerCmd(c, "kill", "-s", "TERM", cid) + cli.WaitExited(c, cid, 10*time.Second) - out, _ = dockerCmd(c, "ps", "-q") + out = cli.DockerCmd(c, "ps", "-q").Combined() c.Assert(out, checker.Not(checker.Contains), cid, check.Commentf("killed container is still running")) } func (s *DockerSuite) TestKillWithStopSignalWithDifferentSignalShouldKeepRestartPolicy(c *check.C) { // Cannot port to Windows - does not support signals int the same way as Linux does testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--stop-signal=CONT", "--restart=always", "busybox", "top") + out := cli.DockerCmd(c, "run", "-d", "--stop-signal=CONT", "--restart=always", "busybox", "top").Combined() cid := strings.TrimSpace(out) - c.Assert(waitRun(cid), check.IsNil) + cli.WaitRun(c, cid) // Let docker send a TERM signal to the container // It will kill the process, but not disable the restart policy - dockerCmd(c, "kill", "-s", "TERM", cid) - c.Assert(waitRestart(cid, 10*time.Second), check.IsNil) + cli.DockerCmd(c, "kill", "-s", "TERM", cid) + cli.WaitRestart(c, cid, 10*time.Second) // Restart policy should still be in place, so it should be still running - c.Assert(waitRun(cid), check.IsNil) + cli.WaitRun(c, cid) } // FIXME(vdemeester) should be a unit test func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cid := strings.TrimSpace(out) c.Assert(waitRun(cid), check.IsNil) @@ -111,7 +114,7 @@ func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { running := inspectField(c, cid, "State.Running") c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) - out, _ = runSleepingContainer(c, "-d") + out = runSleepingContainer(c, "-d") cid = strings.TrimSpace(out) c.Assert(waitRun(cid), check.IsNil) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go index f80503576..cb261bed8 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go @@ -16,7 +16,7 @@ func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { // run the command and block until it's done err := cmd.Run() - c.Assert(err, checker.NotNil) //"Expected non nil err when loginning in & TTY not available" + c.Assert(err, checker.NotNil) //"Expected non nil err when logging in & TTY not available" } func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistry(c *check.C) { @@ -28,17 +28,3 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistry(c *check.C) // now it's fine dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) } - -func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistryDeprecatedEmailFlag(c *check.C) { - // Test to make sure login still works with the deprecated -e and --email flags - // wrong credentials - out, _, err := dockerCmdWithError("login", "-u", s.reg.Username(), "-p", "WRONGPASSWORD", "-e", s.reg.Email(), privateRegistryURL) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "401 Unauthorized") - - // now it's fine - // -e flag - dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), "-e", s.reg.Email(), privateRegistryURL) - // --email flag - dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), "--email", s.reg.Email(), privateRegistryURL) -} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go index 49ee1f786..5076ceba0 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go @@ -13,6 +13,10 @@ import ( ) func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) { + + // @TODO TestLogoutWithExternalAuth expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported + s.d.StartWithBusybox(c, "--disable-legacy-registry=false") + osPath := os.Getenv("PATH") defer os.Setenv("PATH", osPath) @@ -28,6 +32,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) tmp, err := ioutil.TempDir("", "integration-cli-") c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmp) externalAuthConfig := `{ "credsStore": "shell-test" }` @@ -35,24 +40,27 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) c.Assert(err, checker.IsNil) - dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + _, err = s.d.Cmd("--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + c.Assert(err, checker.IsNil) b, err := ioutil.ReadFile(configPath) c.Assert(err, checker.IsNil) c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") c.Assert(string(b), checker.Contains, privateRegistryURL) - dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) - dockerCmd(c, "--config", tmp, "push", repoName) - - dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + _, err = s.d.Cmd("--config", tmp, "tag", "busybox", repoName) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("--config", tmp, "push", repoName) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("--config", tmp, "logout", privateRegistryURL) + c.Assert(err, checker.IsNil) b, err = ioutil.ReadFile(configPath) c.Assert(err, checker.IsNil) c.Assert(string(b), checker.Not(checker.Contains), privateRegistryURL) // check I cannot pull anymore - out, _, err := dockerCmdWithError("--config", tmp, "pull", repoName) + out, err := s.d.Cmd("--config", tmp, "pull", repoName) c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found") } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go index 7d040c86b..bb6eca13b 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go @@ -7,18 +7,23 @@ import ( "strings" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/go-check/check" ) func startServerContainer(c *check.C, msg string, port int) string { name := "server" cmd := []string{ + "run", + "--name", + name, "-d", "-p", fmt.Sprintf("%d:%d", port, port), "busybox", "sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port), } - c.Assert(waitForContainer(name, cmd...), check.IsNil) + cli.DockerCmd(c, cmd...) + cli.WaitRun(c, name) return name } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go index 7e63e9bf9..deb8f6916 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go @@ -44,7 +44,7 @@ func (s *DockerSuite) TestNetHostname(c *check.C) { c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHostname.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=container", "busybox", "ps") - c.Assert(out, checker.Contains, "--net: invalid net mode: invalid container format container:") + c.Assert(out, checker.Contains, "Invalid network mode: invalid container format container:") out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps") c.Assert(out, checker.Contains, "network weird not found") diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go index f95dcad4d..28581bf50 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go @@ -18,6 +18,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions/v1p20" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/stringid" icmd "github.com/docker/docker/pkg/testutil/cmd" @@ -333,7 +334,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkCreateHostBind(c *check.C) { dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") assertNwIsAvailable(c, "testbind") - out, _ := runSleepingContainer(c, "--net=testbind", "-p", "5000:5000") + out := runSleepingContainer(c, "--net=testbind", "-p", "5000:5000") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) out, _ = dockerCmd(c, "ps") @@ -1150,7 +1151,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c out, err := s.d.Cmd("run", "-d", "--name", cName, "--net=host", "--restart=always", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) - // verfiy container has finished starting before killing daemon + // verify container has finished starting before killing daemon err = s.d.WaitRun(cName) c.Assert(err, checker.IsNil) } @@ -1797,18 +1798,16 @@ func (s *DockerNetworkSuite) TestConntrackFlowsLeak(c *check.C) { testRequires(c, IsAmd64, DaemonIsLinux, Network) // Create a new network - dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") + cli.DockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") assertNwIsAvailable(c, "testbind") // Launch the server, this will remain listening on an exposed port and reply to any request in a ping/pong fashion cmd := "while true; do echo hello | nc -w 1 -lu 8080; done" - _, _, err := dockerCmdWithError("run", "-d", "--name", "server", "--net", "testbind", "-p", "8080:8080/udp", "appropriate/nc", "sh", "-c", cmd) - c.Assert(err, check.IsNil) + cli.DockerCmd(c, "run", "-d", "--name", "server", "--net", "testbind", "-p", "8080:8080/udp", "appropriate/nc", "sh", "-c", cmd) // Launch a container client, here the objective is to create a flow that is natted in order to expose the bug cmd = "echo world | nc -q 1 -u 192.168.10.1 8080" - _, _, err = dockerCmdWithError("run", "-d", "--name", "client", "--net=host", "appropriate/nc", "sh", "-c", cmd) - c.Assert(err, check.IsNil) + cli.DockerCmd(c, "run", "-d", "--name", "client", "--net=host", "appropriate/nc", "sh", "-c", cmd) // Get all the flows using netlink flows, err := netlink.ConntrackTableList(netlink.ConntrackTable, syscall.AF_INET) @@ -1826,8 +1825,7 @@ func (s *DockerNetworkSuite) TestConntrackFlowsLeak(c *check.C) { c.Assert(flowMatch, checker.Equals, 1) // Now delete the server, this will trigger the conntrack cleanup - err = deleteContainer("server") - c.Assert(err, checker.IsNil) + cli.DockerCmd(c, "rm", "-fv", "server") // Fetch again all the flows and validate that there is no server flow in the conntrack laying around flows, err = netlink.ConntrackTableList(netlink.ConntrackTable, syscall.AF_INET) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go index 2ff5329ee..5822329f2 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go @@ -4,23 +4,25 @@ import ( "strings" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/go-check/check" ) func (s *DockerSuite) TestPause(c *check.C) { testRequires(c, IsPausable) - defer unpauseAllContainers(c) name := "testeventpause" runSleepingContainer(c, "-d", "--name", name) - dockerCmd(c, "pause", name) - pausedContainers := getPausedContainers(c) + cli.DockerCmd(c, "pause", name) + pausedContainers := strings.Fields( + cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined(), + ) c.Assert(len(pausedContainers), checker.Equals, 1) - dockerCmd(c, "unpause", name) + cli.DockerCmd(c, "unpause", name) - out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + out := cli.DockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)).Combined() events := strings.Split(strings.TrimSpace(out), "\n") actions := eventActionsByIDAndType(c, events, name, "container") @@ -30,7 +32,6 @@ func (s *DockerSuite) TestPause(c *check.C) { func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { testRequires(c, IsPausable) - defer unpauseAllContainers(c) containers := []string{ "testpausewithmorecontainers1", @@ -39,13 +40,15 @@ func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { for _, name := range containers { runSleepingContainer(c, "-d", "--name", name) } - dockerCmd(c, append([]string{"pause"}, containers...)...) - pausedContainers := getPausedContainers(c) + cli.DockerCmd(c, append([]string{"pause"}, containers...)...) + pausedContainers := strings.Fields( + cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined(), + ) c.Assert(len(pausedContainers), checker.Equals, len(containers)) - dockerCmd(c, append([]string{"unpause"}, containers...)...) + cli.DockerCmd(c, append([]string{"unpause"}, containers...)...) - out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + out := cli.DockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)).Combined() events := strings.Split(strings.TrimSpace(out), "\n") for _, name := range containers { diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_logdriver_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_logdriver_test.go index c5029e252..d74256656 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_logdriver_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_logdriver_test.go @@ -1,9 +1,13 @@ package main import ( + "encoding/json" + "net/http" "strings" + "github.com/docker/docker/api/types" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" "github.com/go-check/check" ) @@ -25,3 +29,21 @@ func (s *DockerSuite) TestPluginLogDriver(c *check.C) { dockerCmd(c, "plugin", "disable", pluginName) dockerCmd(c, "plugin", "rm", pluginName) } + +// Make sure log drivers are listed in info, and v2 plugins are not. +func (s *DockerSuite) TestPluginLogDriverInfoList(c *check.C) { + testRequires(c, IsAmd64, DaemonIsLinux) + pluginName := "cpuguy83/docker-logdriver-test" + + dockerCmd(c, "plugin", "install", pluginName) + status, body, err := request.SockRequest("GET", "/info", nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + var info types.Info + err = json.Unmarshal(body, &info) + c.Assert(err, checker.IsNil) + drivers := strings.Join(info.Plugins.Log, " ") + c.Assert(drivers, checker.Contains, "json-file") + c.Assert(drivers, checker.Not(checker.Contains), pluginName) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go index 0daa490c9..e1fcaf2c3 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go @@ -3,11 +3,14 @@ package main import ( "fmt" "io/ioutil" + "net/http" "os" "path/filepath" "strings" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/go-check/check" ) @@ -304,31 +307,26 @@ func (s *DockerTrustSuite) TestPluginTrustedInstall(c *check.C) { trustedName := s.setupTrustedplugin(c, pNameWithTag, "trusted-plugin-install") - icmd.RunCmd(icmd.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", trustedName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", trustedName), trustedCmd).Assert(c, icmd.Expected{ Out: trustedName, }) - out, _, err := dockerCmdWithError("plugin", "ls") - c.Assert(err, checker.IsNil) + out := cli.DockerCmd(c, "plugin", "ls").Combined() c.Assert(out, checker.Contains, "true") - out, _, err = dockerCmdWithError("plugin", "disable", trustedName) - c.Assert(err, checker.IsNil) + out = cli.DockerCmd(c, "plugin", "disable", trustedName).Combined() c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) - out, _, err = dockerCmdWithError("plugin", "enable", trustedName) - c.Assert(err, checker.IsNil) + out = cli.DockerCmd(c, "plugin", "enable", trustedName).Combined() c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) - out, _, err = dockerCmdWithError("plugin", "rm", "-f", trustedName) - c.Assert(err, checker.IsNil) + out = cli.DockerCmd(c, "plugin", "rm", "-f", trustedName).Combined() c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) // Try untrusted pull to ensure we pushed the tag to the registry - icmd.RunCmd(icmd.Command(dockerBinary, "plugin", "install", "--disable-content-trust=true", "--grant-all-permissions", trustedName), trustedCmd).Assert(c, SuccessDownloaded) + cli.Docker(cli.Args("plugin", "install", "--disable-content-trust=true", "--grant-all-permissions", trustedName), trustedCmd).Assert(c, SuccessDownloaded) - out, _, err = dockerCmdWithError("plugin", "ls") - c.Assert(err, checker.IsNil) + out = cli.DockerCmd(c, "plugin", "ls").Combined() c.Assert(out, checker.Contains, "true") } @@ -338,12 +336,12 @@ func (s *DockerTrustSuite) TestPluginUntrustedInstall(c *check.C) { pluginName := fmt.Sprintf("%v/dockercliuntrusted/plugintest:latest", privateRegistryURL) // install locally and push to private registry - dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", pluginName, pNameWithTag) - dockerCmd(c, "plugin", "push", pluginName) - dockerCmd(c, "plugin", "rm", "-f", pluginName) + cli.DockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", pluginName, pNameWithTag) + cli.DockerCmd(c, "plugin", "push", pluginName) + cli.DockerCmd(c, "plugin", "rm", "-f", pluginName) // Try trusted install on untrusted plugin - icmd.RunCmd(icmd.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", pluginName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", pluginName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 1, Err: "Error: remote trust data does not exist", }) @@ -459,3 +457,24 @@ func (s *DockerSuite) TestPluginUpgrade(c *check.C) { dockerCmd(c, "volume", "inspect", "bananas") dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "ls -lh /apple/core") } + +func (s *DockerSuite) TestPluginMetricsCollector(c *check.C) { + testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64) + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{}) + d.Start(c) + defer d.Stop(c) + + name := "cpuguy83/docker-metrics-plugin-test:latest" + r := cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", name), cli.Daemon(d)) + c.Assert(r.Error, checker.IsNil, check.Commentf(r.Combined())) + + // plugin lisens on localhost:19393 and proxies the metrics + resp, err := http.Get("http://localhost:19393/metrics") + c.Assert(err, checker.IsNil) + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + c.Assert(err, checker.IsNil) + // check that a known metric is there... don't expect this metric to change over time.. probably safe + c.Assert(string(b), checker.Contains, "container_actions") +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go index f7c2063fd..bea4f4fbd 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/daemon" "github.com/go-check/check" ) @@ -45,7 +46,8 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { serviceName := "testprunesvc" replicas := 1 - out, err := d.Cmd("service", "create", "--name", serviceName, + out, err := d.Cmd("service", "create", "--no-resolve-image", + "--name", serviceName, "--replicas", strconv.Itoa(replicas), "--network", "n3", "busybox", "top") @@ -96,41 +98,41 @@ func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) { } func (s *DockerSuite) TestPruneContainerUntil(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox") + out := cli.DockerCmd(c, "run", "-d", "busybox").Combined() id1 := strings.TrimSpace(out) - c.Assert(waitExited(id1, 5*time.Second), checker.IsNil) + cli.WaitExited(c, id1, 5*time.Second) until := daemonUnixTime(c) - out, _ = dockerCmd(c, "run", "-d", "busybox") + out = cli.DockerCmd(c, "run", "-d", "busybox").Combined() id2 := strings.TrimSpace(out) - c.Assert(waitExited(id2, 5*time.Second), checker.IsNil) + cli.WaitExited(c, id2, 5*time.Second) - out, _ = dockerCmd(c, "container", "prune", "--force", "--filter", "until="+until) + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "until="+until).Combined() c.Assert(strings.TrimSpace(out), checker.Contains, id1) c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc") + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) c.Assert(strings.TrimSpace(out), checker.Contains, id2) } func (s *DockerSuite) TestPruneContainerLabel(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--label", "foo", "busybox") + out := cli.DockerCmd(c, "run", "-d", "--label", "foo", "busybox").Combined() id1 := strings.TrimSpace(out) - c.Assert(waitExited(id1, 5*time.Second), checker.IsNil) + cli.WaitExited(c, id1, 5*time.Second) - out, _ = dockerCmd(c, "run", "-d", "--label", "bar", "busybox") + out = cli.DockerCmd(c, "run", "-d", "--label", "bar", "busybox").Combined() id2 := strings.TrimSpace(out) - c.Assert(waitExited(id2, 5*time.Second), checker.IsNil) + cli.WaitExited(c, id2, 5*time.Second) - out, _ = dockerCmd(c, "run", "-d", "busybox") + out = cli.DockerCmd(c, "run", "-d", "busybox").Combined() id3 := strings.TrimSpace(out) - c.Assert(waitExited(id3, 5*time.Second), checker.IsNil) + cli.WaitExited(c, id3, 5*time.Second) - out, _ = dockerCmd(c, "run", "-d", "--label", "foobar", "busybox") + out = cli.DockerCmd(c, "run", "-d", "--label", "foobar", "busybox").Combined() id4 := strings.TrimSpace(out) - c.Assert(waitExited(id4, 5*time.Second), checker.IsNil) + cli.WaitExited(c, id4, 5*time.Second) // Add a config file of label=foobar, that will have no impact if cli is label!=foobar config := `{"pruneFilters": ["label=foobar"]}` @@ -141,35 +143,35 @@ func (s *DockerSuite) TestPruneContainerLabel(c *check.C) { c.Assert(err, checker.IsNil) // With config.json only, prune based on label=foobar - out, _ = dockerCmd(c, "--config", d, "container", "prune", "--force") + out = cli.DockerCmd(c, "--config", d, "container", "prune", "--force").Combined() c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) c.Assert(strings.TrimSpace(out), checker.Contains, id4) - out, _ = dockerCmd(c, "container", "prune", "--force", "--filter", "label=foo") + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "label=foo").Combined() c.Assert(strings.TrimSpace(out), checker.Contains, id1) c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc") + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) c.Assert(strings.TrimSpace(out), checker.Contains, id2) c.Assert(strings.TrimSpace(out), checker.Contains, id3) - out, _ = dockerCmd(c, "container", "prune", "--force", "--filter", "label!=bar") + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "label!=bar").Combined() c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) c.Assert(strings.TrimSpace(out), checker.Contains, id3) - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc") + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() c.Assert(strings.TrimSpace(out), checker.Contains, id2) c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) // With config.json label=foobar and CLI label!=foobar, CLI label!=foobar supersede - out, _ = dockerCmd(c, "--config", d, "container", "prune", "--force", "--filter", "label!=foobar") + out = cli.DockerCmd(c, "--config", d, "container", "prune", "--force", "--filter", "label!=foobar").Combined() c.Assert(strings.TrimSpace(out), checker.Contains, id2) - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc") + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go index 510fce78b..98a20f426 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go @@ -19,17 +19,17 @@ import ( ) func (s *DockerSuite) TestPsListContainersBase(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") firstID := strings.TrimSpace(out) - out, _ = runSleepingContainer(c, "-d") + out = runSleepingContainer(c, "-d") secondID := strings.TrimSpace(out) // not long running out, _ = dockerCmd(c, "run", "-d", "busybox", "true") thirdID := strings.TrimSpace(out) - out, _ = runSleepingContainer(c, "-d") + out = runSleepingContainer(c, "-d") fourthID := strings.TrimSpace(out) // make sure the second is running @@ -228,42 +228,42 @@ func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { func (s *DockerSuite) TestPsListContainersFilterHealth(c *check.C) { // Test legacy no health check - out, _ := runSleepingContainer(c, "--name=none_legacy") + out := runSleepingContainer(c, "--name=none_legacy") containerID := strings.TrimSpace(out) - waitForContainer(containerID) + cli.WaitRun(c, containerID) - out, _ = dockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none") + out = cli.DockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none").Combined() containerOut := strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for legacy none filter, output: %q", containerID, containerOut, out)) // Test no health check specified explicitly - out, _ = runSleepingContainer(c, "--name=none", "--no-healthcheck") + out = runSleepingContainer(c, "--name=none", "--no-healthcheck") containerID = strings.TrimSpace(out) - waitForContainer(containerID) + cli.WaitRun(c, containerID) - out, _ = dockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none") + out = cli.DockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none").Combined() containerOut = strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for none filter, output: %q", containerID, containerOut, out)) // Test failing health check - out, _ = runSleepingContainer(c, "--name=failing_container", "--health-cmd=exit 1", "--health-interval=1s") + out = runSleepingContainer(c, "--name=failing_container", "--health-cmd=exit 1", "--health-interval=1s") containerID = strings.TrimSpace(out) waitForHealthStatus(c, "failing_container", "starting", "unhealthy") - out, _ = dockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=unhealthy") + out = cli.DockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=unhealthy").Combined() containerOut = strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for unhealthy filter, output: %q", containerID, containerOut, out)) // Check passing healthcheck - out, _ = runSleepingContainer(c, "--name=passing_container", "--health-cmd=exit 0", "--health-interval=1s") + out = runSleepingContainer(c, "--name=passing_container", "--health-cmd=exit 0", "--health-interval=1s") containerID = strings.TrimSpace(out) waitForHealthStatus(c, "passing_container", "starting", "healthy") - out, _ = dockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy") + out = cli.DockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy").Combined() containerOut = strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for healthy filter, output: %q", containerID, containerOut, out)) } @@ -473,11 +473,11 @@ func (s *DockerSuite) TestPsRightTagName(c *check.C) { dockerCmd(c, "tag", "busybox", tag) var id1 string - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id1 = strings.TrimSpace(string(out)) var id2 string - out, _ = runSleepingContainerInImage(c, tag) + out = runSleepingContainerInImage(c, tag) id2 = strings.TrimSpace(string(out)) var imageID string @@ -485,7 +485,7 @@ func (s *DockerSuite) TestPsRightTagName(c *check.C) { imageID = strings.TrimSpace(string(out)) var id3 string - out, _ = runSleepingContainerInImage(c, imageID) + out = runSleepingContainerInImage(c, imageID) id3 = strings.TrimSpace(string(out)) out, _ = dockerCmd(c, "ps", "--no-trunc") @@ -638,7 +638,7 @@ func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) c.Assert(err, checker.IsNil) - out, _ := runSleepingContainer(c, "--name=test") + out := runSleepingContainer(c, "--name=test") id := strings.TrimSpace(out) out, _ = dockerCmd(c, "--config", d, "ps", "-q") @@ -746,7 +746,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { fields = strings.Fields(lines[1]) c.Assert(fields, checker.HasLen, 2) - annonymounsVolumeID := fields[1] + anonymousVolumeID := fields[1] fields = strings.Fields(lines[2]) c.Assert(fields[1], checker.Equals, "ps-volume-test") @@ -771,7 +771,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { c.Assert(lines, checker.HasLen, 2) fields = strings.Fields(lines[0]) - c.Assert(fields[1], checker.Equals, annonymounsVolumeID) + c.Assert(fields[1], checker.Equals, anonymousVolumeID) fields = strings.Fields(lines[1]) c.Assert(fields[1], checker.Equals, "ps-volume-test") @@ -898,35 +898,25 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { func (s *DockerSuite) TestPsByOrder(c *check.C) { name1 := "xyz-abc" - out, err := runSleepingContainer(c, "--name", name1) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + out := runSleepingContainer(c, "--name", name1) container1 := strings.TrimSpace(out) name2 := "xyz-123" - out, err = runSleepingContainer(c, "--name", name2) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + out = runSleepingContainer(c, "--name", name2) container2 := strings.TrimSpace(out) name3 := "789-abc" - out, err = runSleepingContainer(c, "--name", name3) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + out = runSleepingContainer(c, "--name", name3) name4 := "789-123" - out, err = runSleepingContainer(c, "--name", name4) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + out = runSleepingContainer(c, "--name", name4) // Run multiple time should have the same result - out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") - c.Assert(err, checker.NotNil) + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz").Combined() c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) // Run multiple time should have the same result - out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") - c.Assert(err, checker.NotNil) + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz").Combined() c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go index 0b1be6cd9..fd91edb81 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go @@ -98,11 +98,11 @@ func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { for record := range recordChan { if len(record.option) == 0 { c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) - c.Assert(record.out, checker.Contains, fmt.Sprintf("repository %s not found: does not exist or no pull access", record.e.repo), check.Commentf("expected image not found error messages")) + c.Assert(record.out, checker.Contains, fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", record.e.repo), check.Commentf("expected image not found error messages")) } else { // pull -a on a nonexistent registry should fall back as well c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) - c.Assert(record.out, checker.Contains, fmt.Sprintf("repository %s not found", record.e.repo), check.Commentf("expected image not found error messages")) + c.Assert(record.out, checker.Contains, fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", record.e.repo), check.Commentf("expected image not found error messages")) c.Assert(record.out, checker.Not(checker.Contains), "unauthorized", check.Commentf(`message should not contain "unauthorized"`)) } } @@ -258,10 +258,13 @@ func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { } func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) { + // @TODO TestPullNoCredentialsNotFound expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported + s.d.StartWithBusybox(c, "--disable-legacy-registry=false") + // we don't care about the actual image, we just want to see image not found // because that means v2 call returned 401 and we fell back to v1 which usually // gives a 404 (in this case the test registry doesn't handle v1 at all) - out, _, err := dockerCmdWithError("pull", privateRegistryURL+"/busybox") + out, err := s.d.Cmd("pull", privateRegistryURL+"/busybox") c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "Error: image busybox:latest not found") } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go index 59674852b..d9628d971 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go @@ -3,11 +3,10 @@ package main import ( "fmt" "io/ioutil" - "time" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" - "github.com/docker/docker/pkg/testutil" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/go-check/check" ) @@ -16,61 +15,36 @@ func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-pull") // Try pull - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) - dockerCmd(c, "rmi", repoName) + cli.DockerCmd(c, "rmi", repoName) // Try untrusted pull to ensure we pushed the tag to the registry - icmd.RunCmd(icmd.Command(dockerBinary, "pull", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloaded) + cli.Docker(cli.Args("pull", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloaded) } func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-isolated-pull") // Try pull (run from isolated directory without trust information) - icmd.RunCmd(icmd.Command(dockerBinary, "--config", "/tmp/docker-isolated", "pull", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.Docker(cli.Args("--config", "/tmp/docker-isolated", "pull", repoName), trustedCmd).Assert(c, SuccessTagging) - dockerCmd(c, "rmi", repoName) + cli.DockerCmd(c, "rmi", repoName) } func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) // Try trusted pull on untrusted tag - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 1, Err: "Error: remote trust data does not exist", }) } -func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := s.setupTrustedImage(c, "trusted-cert-expired") - - // Certificates have 10 years of expiration - elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) - - testutil.RunAtDifferentDate(elevenYearsFromNow, func() { - // Try pull - icmd.RunCmd(icmd.Cmd{ - Command: []string{dockerBinary, "pull", repoName}, - }, trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "could not validate the path to a trusted root", - }) - }) - - testutil.RunAtDifferentDate(elevenYearsFromNow, func() { - // Try pull - icmd.RunCmd(icmd.Cmd{ - Command: []string{dockerBinary, "pull", "--disable-content-trust", repoName}, - }, trustedCmd).Assert(c, SuccessDownloaded) - }) -} - func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") @@ -79,14 +53,14 @@ func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { } // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - dockerCmd(c, "rmi", repoName) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) // Try pull - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmd).Assert(c, SuccessTagging) - dockerCmd(c, "rmi", repoName) + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) // Kill the notary server, start a new "evil" one. s.not.Close() @@ -96,71 +70,46 @@ func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. // tag an image and upload it to the private registry - dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) // Push up to the new server - icmd.RunCmd(icmd.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) // Now, try pulling with the original client from this new trust server. This should fail because the new root is invalid. - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 1, Err: "could not rotate trust to a new trusted root", }) } -func (s *DockerTrustSuite) TestTrustedPullWithExpiredSnapshot(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppull/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - dockerCmd(c, "rmi", repoName) - - // Snapshots last for three years. This should be expired - fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) - - testutil.RunAtDifferentDate(fourYearsLater, func() { - // Try pull - icmd.RunCmd(icmd.Cmd{ - Command: []string{dockerBinary, "pull", repoName}, - }, trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "repository out-of-date", - }) - }) -} - func (s *DockerTrustSuite) TestTrustedOfflinePull(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-offline-pull") - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmdWithServer("https://invalidnotaryserver")).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("pull", repoName), trustedCmdWithServer("https://invalidnotaryserver")).Assert(c, icmd.Expected{ ExitCode: 1, Err: "error contacting notary server", }) // Do valid trusted pull to warm cache - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmd).Assert(c, SuccessTagging) - dockerCmd(c, "rmi", repoName) + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) // Try pull again with invalid notary server, should use cache - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmdWithServer("https://invalidnotaryserver")).Assert(c, SuccessTagging) + cli.Docker(cli.Args("pull", repoName), trustedCmdWithServer("https://invalidnotaryserver")).Assert(c, SuccessTagging) } func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete") // tag the image and upload it to the private registry - buildImageSuccessfully(c, repoName, build.WithDockerfile(` + cli.BuildCmd(c, repoName, build.WithDockerfile(` FROM busybox CMD echo trustedpulldelete `)) - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - dockerCmd(c, "rmi", repoName) + cli.DockerCmd(c, "rmi", repoName) // Try pull - result := icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmd) - result.Assert(c, icmd.Success) + result := cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Success) matches := digestRegex.FindStringSubmatch(result.Combined()) c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", result.Combined())) @@ -174,7 +123,7 @@ func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { c.Assert(byDigestID, checker.Equals, imageID) // rmi of tag should also remove the digest reference - dockerCmd(c, "rmi", repoName) + cli.DockerCmd(c, "rmi", repoName) _, err := inspectFieldWithError(imageByDigest, "Id") c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed")) @@ -189,12 +138,12 @@ func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { targetName := fmt.Sprintf("%s:latest", repoName) // Push with targets first, initializing the repo - dockerCmd(c, "tag", "busybox", targetName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", targetName), trustedCmd).Assert(c, icmd.Success) + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) s.assertTargetInRoles(c, repoName, "latest", "targets") // Try pull, check we retrieve from targets role - icmd.RunCmd(icmd.Command(dockerBinary, "-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ Err: "retrieving target for targets role", }) @@ -205,21 +154,21 @@ func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { // try a pull, check that we can still pull because we can still read the // old tag in the targets role - icmd.RunCmd(icmd.Command(dockerBinary, "-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ Err: "retrieving target for targets role", }) // try a pull -a, check that it succeeds because we can still pull from the // targets role - icmd.RunCmd(icmd.Command(dockerBinary, "-D", "pull", "-a", repoName), trustedCmd).Assert(c, icmd.Success) + cli.Docker(cli.Args("-D", "pull", "-a", repoName), trustedCmd).Assert(c, icmd.Success) // Push, should sign with targets/releases - dockerCmd(c, "tag", "busybox", targetName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", targetName), trustedCmd).Assert(c, icmd.Success) + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases") // Try pull, check we retrieve from targets/releases role - icmd.RunCmd(icmd.Command(dockerBinary, "-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ Err: "retrieving target for targets/releases role", }) @@ -228,12 +177,12 @@ func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { s.notaryImportKey(c, repoName, "targets/other", s.not.keys[1].Private) s.notaryPublish(c, repoName) - dockerCmd(c, "tag", "busybox", targetName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", targetName), trustedCmd).Assert(c, icmd.Success) + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases", "targets/other") // Try pull, check we retrieve from targets/releases role - icmd.RunCmd(icmd.Command(dockerBinary, "-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ Err: "retrieving target for targets/releases role", }) } @@ -251,22 +200,22 @@ func (s *DockerTrustSuite) TestTrustedPullIgnoresOtherDelegationRoles(c *check.C s.notaryPublish(c, repoName) // Push should write to the delegation role, not targets - dockerCmd(c, "tag", "busybox", targetName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", targetName), trustedCmd).Assert(c, icmd.Success) + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) s.assertTargetInRoles(c, repoName, "latest", "targets/other") s.assertTargetNotInRoles(c, repoName, "latest", "targets") // Try pull - we should fail, since pull will only pull from the targets/releases // role or the targets role - dockerCmd(c, "tag", "busybox", targetName) - icmd.RunCmd(icmd.Command(dockerBinary, "-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 1, Err: "No trust data for", }) // try a pull -a: we should fail since pull will only pull from the targets/releases // role or the targets role - icmd.RunCmd(icmd.Command(dockerBinary, "-D", "pull", "-a", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("-D", "pull", "-a", repoName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 1, Err: "No trusted tags for", }) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go index 9fae636df..2ae206df7 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go @@ -10,13 +10,12 @@ import ( "path/filepath" "strings" "sync" - "time" "github.com/docker/distribution/reference" - cliconfig "github.com/docker/docker/cli/config" + "github.com/docker/docker/cli/config" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" - "github.com/docker/docker/pkg/testutil" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/go-check/check" ) @@ -285,17 +284,17 @@ func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) // Try pull after push - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ Out: "Status: Image is up to date", }) // Assert that we rotated the snapshot key to the server by checking our local keystore - contents, err := ioutil.ReadDir(filepath.Join(cliconfig.Dir(), "trust/private/tuf_keys", privateRegistryURL, "dockerclitrusted/pushtest")) + contents, err := ioutil.ReadDir(filepath.Join(config.Dir(), "trust/private/tuf_keys", privateRegistryURL, "dockerclitrusted/pushtest")) c.Assert(err, check.IsNil, check.Commentf("Unable to read local tuf key files")) // Check that we only have 1 key (targets key) c.Assert(contents, checker.HasLen, 1) @@ -304,12 +303,12 @@ func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmdWithPassphrases("12345678", "12345678")).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("push", repoName), trustedCmdWithPassphrases("12345678", "12345678")).Assert(c, SuccessSigningAndPushing) // Try pull after push - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ Out: "Status: Image is up to date", }) } @@ -317,10 +316,10 @@ func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) // Using a name that doesn't resolve to an address makes this test faster - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmdWithServer("https://server.invalid:81/")).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("push", repoName), trustedCmdWithServer("https://server.invalid:81/")).Assert(c, icmd.Expected{ ExitCode: 1, Err: "error contacting notary server", }) @@ -329,9 +328,9 @@ func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) - result := icmd.RunCmd(icmd.Command(dockerBinary, "push", "--disable-content-trust", repoName), trustedCmdWithServer("https://server.invalid:81/")) + result := cli.Docker(cli.Args("push", "--disable-content-trust", repoName), trustedCmdWithServer("https://server.invalid:81/")) result.Assert(c, icmd.Success) c.Assert(result.Combined(), check.Not(checker.Contains), "Error establishing connection to notary repository", check.Commentf("Missing expected output on trusted push with --disable-content-trust:")) } @@ -339,13 +338,13 @@ func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) // Try pull after push - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ Out: "Status: Image is up to date", }) } @@ -353,76 +352,34 @@ func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) // Do a trusted push - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) // Do another trusted push - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - dockerCmd(c, "rmi", repoName) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) // Try pull to ensure the double push did not break our ability to pull - icmd.RunCmd(icmd.Command(dockerBinary, "pull", repoName), trustedCmd).Assert(c, SuccessDownloaded) + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessDownloaded) } func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) // Push with default passphrases - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) // Push with wrong passphrases - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmdWithPassphrases("12345678", "87654321")).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("push", repoName), trustedCmdWithPassphrases("12345678", "87654321")).Assert(c, icmd.Expected{ ExitCode: 1, Err: "could not find necessary signing keys", }) } -func (s *DockerTrustSuite) TestTrustedPushWithExpiredSnapshot(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := fmt.Sprintf("%v/dockercliexpiredsnapshot/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // Snapshots last for three years. This should be expired - fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) - - testutil.RunAtDifferentDate(fourYearsLater, func() { - // Push with wrong passphrases - icmd.RunCmd(icmd.Cmd{ - Command: []string{dockerBinary, "push", repoName}, - }, trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "repository out-of-date", - }) - }) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExpiredTimestamp(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppush/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // The timestamps expire in two weeks. Lets check three - threeWeeksLater := time.Now().Add(time.Hour * 24 * 21) - - // Should succeed because the server transparently re-signs one - testutil.RunAtDifferentDate(threeWeeksLater, func() { - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), - trustedCmd).Assert(c, SuccessSigningAndPushing) - }) -} - func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegationOnly(c *check.C) { testRequires(c, NotaryHosting) repoName := fmt.Sprintf("%v/dockerclireleasedelegationinitfirst/trusted", privateRegistryURL) @@ -434,17 +391,17 @@ func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegationOnly(c *check.C) s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) + cli.DockerCmd(c, "tag", "busybox", targetName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) // check to make sure that the target has been added to targets/releases and not targets s.assertTargetInRoles(c, repoName, "latest", "targets/releases") s.assertTargetNotInRoles(c, repoName, "latest", "targets") // Try pull after push - os.RemoveAll(filepath.Join(cliconfig.Dir(), "trust")) + os.RemoveAll(filepath.Join(config.Dir(), "trust")) - icmd.RunCmd(icmd.Command(dockerBinary, "pull", targetName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ Out: "Status: Image is up to date", }) } @@ -468,9 +425,9 @@ func (s *DockerTrustSuite) TestTrustedPushSignsAllFirstLevelRolesWeHaveKeysFor(c s.notaryPublish(c, repoName) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) + cli.DockerCmd(c, "tag", "busybox", targetName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) // check to make sure that the target has been added to targets/role1 and targets/role2, and // not targets (because there are delegations) or targets/role3 (due to missing key) or @@ -479,10 +436,10 @@ func (s *DockerTrustSuite) TestTrustedPushSignsAllFirstLevelRolesWeHaveKeysFor(c s.assertTargetNotInRoles(c, repoName, "latest", "targets") // Try pull after push - os.RemoveAll(filepath.Join(cliconfig.Dir(), "trust")) + os.RemoveAll(filepath.Join(config.Dir(), "trust")) // pull should fail because none of these are the releases role - icmd.RunCmd(icmd.Command(dockerBinary, "pull", targetName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 1, }) } @@ -504,9 +461,9 @@ func (s *DockerTrustSuite) TestTrustedPushSignsForRolesWithKeysAndValidPaths(c * s.notaryPublish(c, repoName) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) + cli.DockerCmd(c, "tag", "busybox", targetName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) // check to make sure that the target has been added to targets/role1 and targets/role4, and // not targets (because there are delegations) or targets/role2 (due to path restrictions) or @@ -515,10 +472,10 @@ func (s *DockerTrustSuite) TestTrustedPushSignsForRolesWithKeysAndValidPaths(c * s.assertTargetNotInRoles(c, repoName, "latest", "targets") // Try pull after push - os.RemoveAll(filepath.Join(cliconfig.Dir(), "trust")) + os.RemoveAll(filepath.Join(config.Dir(), "trust")) // pull should fail because none of these are the releases role - icmd.RunCmd(icmd.Command(dockerBinary, "pull", targetName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 1, }) } @@ -534,9 +491,9 @@ func (s *DockerTrustSuite) TestTrustedPushDoesntSignTargetsIfDelegationsExist(c // do not import any delegations key // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) + cli.DockerCmd(c, "tag", "busybox", targetName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", targetName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 1, Err: "no valid signing keys", }) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go index 5e1c00aa8..6cbe6e7e6 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go @@ -2,7 +2,9 @@ package main import ( "fmt" + "io/ioutil" "net/http" + "os" "regexp" "github.com/docker/docker/integration-cli/registry" @@ -47,9 +49,14 @@ func regexpCheckUA(c *check.C, ua string) { c.Assert(bMatchUpstreamUA, check.Equals, true, check.Commentf("(Upstream) Docker Client User-Agent malformed")) } +// registerUserAgentHandler registers a handler for the `/v2/*` endpoint. +// Note that a 404 is returned to prevent the client to proceed. +// We are only checking if the client sent a valid User Agent string along +// with the request. func registerUserAgentHandler(reg *registry.Mock, result *string) { reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) + w.Write([]byte(`{"errors":[{"code": "UNSUPPORTED","message": "this is a mock registry"}]}`)) var ua string for k, v := range r.Header { if k == "User-Agent" { @@ -64,61 +71,33 @@ func registerUserAgentHandler(reg *registry.Mock, result *string) { // a registry, the registry should see a User-Agent string of the form // [docker engine UA] UpstreamClientSTREAM-CLIENT([client UA]) func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *check.C) { - var ( - buildUA string - pullUA string - pushUA string - loginUA string - ) + var ua string - buildReg, err := registry.NewMock(c) - defer buildReg.Close() + reg, err := registry.NewMock(c) + defer reg.Close() c.Assert(err, check.IsNil) - registerUserAgentHandler(buildReg, &buildUA) - buildRepoName := fmt.Sprintf("%s/busybox", buildReg.URL()) + registerUserAgentHandler(reg, &ua) + repoName := fmt.Sprintf("%s/busybox", reg.URL()) - pullReg, err := registry.NewMock(c) - defer pullReg.Close() + s.d.StartWithBusybox(c, "--insecure-registry", reg.URL()) + + tmp, err := ioutil.TempDir("", "integration-cli-") c.Assert(err, check.IsNil) - registerUserAgentHandler(pullReg, &pullUA) - pullRepoName := fmt.Sprintf("%s/busybox", pullReg.URL()) + defer os.RemoveAll(tmp) - pushReg, err := registry.NewMock(c) - defer pushReg.Close() - c.Assert(err, check.IsNil) - registerUserAgentHandler(pushReg, &pushUA) - pushRepoName := fmt.Sprintf("%s/busybox", pushReg.URL()) - - loginReg, err := registry.NewMock(c) - defer loginReg.Close() - c.Assert(err, check.IsNil) - registerUserAgentHandler(loginReg, &loginUA) - - s.d.Start(c, - "--insecure-registry", buildReg.URL(), - "--insecure-registry", pullReg.URL(), - "--insecure-registry", pushReg.URL(), - "--insecure-registry", loginReg.URL(), - "--disable-legacy-registry=true") - - dockerfileName, cleanup1, err := makefile(fmt.Sprintf("FROM %s", buildRepoName)) + dockerfile, err := makefile(tmp, fmt.Sprintf("FROM %s", repoName)) c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup1() - s.d.Cmd("build", "--file", dockerfileName, ".") - regexpCheckUA(c, buildUA) - s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", loginReg.URL()) - regexpCheckUA(c, loginUA) + s.d.Cmd("build", "--file", dockerfile, tmp) + regexpCheckUA(c, ua) - s.d.Cmd("pull", pullRepoName) - regexpCheckUA(c, pullUA) + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL()) + regexpCheckUA(c, ua) - dockerfileName, cleanup2, err := makefile(`FROM scratch - ENV foo bar`) - c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup2() - s.d.Cmd("build", "-t", pushRepoName, "--file", dockerfileName, ".") + s.d.Cmd("pull", repoName) + regexpCheckUA(c, ua) - s.d.Cmd("push", pushRepoName) - regexpCheckUA(c, pushUA) + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + regexpCheckUA(c, ua) } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go index 895545f75..ea430227d 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go @@ -36,7 +36,7 @@ func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { } func (s *DockerSuite) TestRenameRunningContainerAndReuse(c *check.C) { - out, _ := runSleepingContainer(c, "--name", "first_name") + out := runSleepingContainer(c, "--name", "first_name") c.Assert(waitRun("first_name"), check.IsNil) newName := "new_name" @@ -46,7 +46,7 @@ func (s *DockerSuite) TestRenameRunningContainerAndReuse(c *check.C) { name := inspectField(c, ContainerID, "Name") c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) - out, _ = runSleepingContainer(c, "--name", "first_name") + out = runSleepingContainer(c, "--name", "first_name") c.Assert(waitRun("first_name"), check.IsNil) newContainerID := strings.TrimSpace(out) name = inspectField(c, newContainerID, "Name") @@ -113,7 +113,7 @@ func (s *DockerSuite) TestRenameAnonymousContainer(c *check.C) { } func (s *DockerSuite) TestRenameContainerWithSameName(c *check.C) { - out, _ := runSleepingContainer(c, "--name", "old") + out := runSleepingContainer(c, "--name", "old") ContainerID := strings.TrimSpace(out) out, _, err := dockerCmdWithError("rename", "old", "old") diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go index 57c1323a9..cf6b135ed 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go @@ -52,7 +52,7 @@ func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { // Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { prefix, slash := getPrefixAndSlashFromDaemonPlatform() - out, _ := runSleepingContainer(c, "-d", "-v", prefix+slash+"test") + out := runSleepingContainer(c, "-d", "-v", prefix+slash+"test") cleanedContainerID := strings.TrimSpace(out) out, err := inspectFilter(cleanedContainerID, "len .Mounts") @@ -166,7 +166,7 @@ func (s *DockerSuite) TestRestartContainerwithGoodContainer(c *check.C) { func (s *DockerSuite) TestRestartContainerSuccess(c *check.C) { testRequires(c, SameHostDaemon) - out, _ := runSleepingContainer(c, "-d", "--restart=always") + out := runSleepingContainer(c, "-d", "--restart=always") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) @@ -235,7 +235,7 @@ func (s *DockerSuite) TestRestartWithPolicyUserDefinedNetwork(c *check.C) { func (s *DockerSuite) TestRestartPolicyAfterRestart(c *check.C) { testRequires(c, SameHostDaemon) - out, _ := runSleepingContainer(c, "-d", "--restart=always") + out := runSleepingContainer(c, "-d", "--restart=always") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) @@ -294,7 +294,7 @@ func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) { } func (s *DockerSuite) TestRestartAutoRemoveContainer(c *check.C) { - out, _ := runSleepingContainer(c, "--rm") + out := runSleepingContainer(c, "--rm") id := strings.TrimSpace(string(out)) dockerCmd(c, "restart", id) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go index ed342e439..afbc4c2fa 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" "github.com/docker/docker/pkg/stringid" icmd "github.com/docker/docker/pkg/testutil/cmd" @@ -62,84 +63,78 @@ func (s *DockerSuite) TestRmiTag(c *check.C) { } func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'") - + out := cli.DockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'").Combined() containerID := strings.TrimSpace(out) // Wait for it to exit as cannot commit a running container on Windows, and // it will take a few seconds to exit if testEnv.DaemonPlatform() == "windows" { - err := waitExited(containerID, 60*time.Second) - c.Assert(err, check.IsNil) + cli.WaitExited(c, containerID, 60*time.Second) } - dockerCmd(c, "commit", containerID, "busybox-one") + cli.DockerCmd(c, "commit", containerID, "busybox-one") - imagesBefore, _ := dockerCmd(c, "images", "-a") - dockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") - dockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") + imagesBefore := cli.DockerCmd(c, "images", "-a").Combined() + cli.DockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") + cli.DockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") - imagesAfter, _ := dockerCmd(c, "images", "-a") + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() // tag busybox to create 2 more images with same imageID c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("docker images shows: %q\n", imagesAfter)) imgID := inspectField(c, "busybox-one:tag1", "Id") // run a container with the image - out, _ = runSleepingContainerInImage(c, "busybox-one") - + out = runSleepingContainerInImage(c, "busybox-one") containerID = strings.TrimSpace(out) // first checkout without force it fails - out, _, err := dockerCmdWithError("rmi", imgID) - expected := fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)) // rmi tagged in multiple repos should have failed without force - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, expected) + cli.Docker(cli.Args("rmi", imgID)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)), + }) - dockerCmd(c, "stop", containerID) - dockerCmd(c, "rmi", "-f", imgID) + cli.DockerCmd(c, "stop", containerID) + cli.DockerCmd(c, "rmi", "-f", imgID) - imagesAfter, _ = dockerCmd(c, "images", "-a") + imagesAfter = cli.DockerCmd(c, "images", "-a").Combined() // rmi -f failed, image still exists c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12], check.Commentf("ImageID:%q; ImagesAfter: %q", imgID, imagesAfter)) } func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'") - + out := cli.DockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'").Combined() containerID := strings.TrimSpace(out) // Wait for it to exit as cannot commit a running container on Windows, and // it will take a few seconds to exit if testEnv.DaemonPlatform() == "windows" { - err := waitExited(containerID, 60*time.Second) - c.Assert(err, check.IsNil) + cli.WaitExited(c, containerID, 60*time.Second) } - dockerCmd(c, "commit", containerID, "busybox-test") + cli.DockerCmd(c, "commit", containerID, "busybox-test") - imagesBefore, _ := dockerCmd(c, "images", "-a") - dockerCmd(c, "tag", "busybox-test", "utest:tag1") - dockerCmd(c, "tag", "busybox-test", "utest:tag2") - dockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") - dockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") + imagesBefore := cli.DockerCmd(c, "images", "-a").Combined() + cli.DockerCmd(c, "tag", "busybox-test", "utest:tag1") + cli.DockerCmd(c, "tag", "busybox-test", "utest:tag2") + cli.DockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") + cli.DockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") { - imagesAfter, _ := dockerCmd(c, "images", "-a") + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+4, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) } imgID := inspectField(c, "busybox-test", "Id") // first checkout without force it fails - out, _, err := dockerCmdWithError("rmi", imgID) - // rmi tagged in multiple repos should have failed without force - c.Assert(err, checker.NotNil) - // rmi tagged in multiple repos should have failed without force - c.Assert(out, checker.Contains, "(must be forced) - image is referenced in multiple repositories", check.Commentf("out: %s; err: %v;", out, err)) + cli.Docker(cli.Args("rmi", imgID)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "(must be forced) - image is referenced in multiple repositories", + }) - dockerCmd(c, "rmi", "-f", imgID) + cli.DockerCmd(c, "rmi", "-f", imgID) { - imagesAfter, _ := dockerCmd(c, "images", "-a") + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() // rmi failed, image still exists c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12]) } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go index e903e1a93..544cfdf9a 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go @@ -24,6 +24,7 @@ import ( "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringutils" @@ -2024,18 +2025,16 @@ func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { // TODO Windows. Network settings are not propagated back to inspect. testRequires(c, SameHostDaemon, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") + out := cli.DockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top").Combined() id := strings.TrimSpace(out) ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress") icmd.RunCommand("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT").Assert(c, icmd.Success) - if err := deleteContainer(id); err != nil { - c.Fatal(err) - } + cli.DockerCmd(c, "rm", "-fv", id) - dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") + cli.DockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") } func (s *DockerSuite) TestRunPortInUse(c *check.C) { @@ -2816,12 +2815,11 @@ func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { // run container with --rm should remove container if exit code != 0 func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { name := "flowers" - out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "ls", "/notexists") - if err == nil { - c.Fatal("Expected docker run to fail", out, err) - } + cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "ls", "/notexists")).Assert(c, icmd.Expected{ + ExitCode: 1, + }) - out = getAllContainers(c) + out := cli.DockerCmd(c, "ps", "-q", "-a").Combined() if out != "" { c.Fatal("Expected not to have containers", out) } @@ -2829,12 +2827,10 @@ func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check. func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { name := "sparkles" - out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "commandNotFound") - if err == nil { - c.Fatal("Expected docker run to fail", out, err) - } - - out = getAllContainers(c) + cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "commandNotFound")).Assert(c, icmd.Expected{ + ExitCode: 127, + }) + out := cli.DockerCmd(c, "ps", "-q", "-a").Combined() if out != "" { c.Fatal("Expected not to have containers", out) } @@ -3169,11 +3165,11 @@ func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-run") // Try run - icmd.RunCmd(icmd.Command(dockerBinary, "run", repoName), trustedCmd).Assert(c, SuccessTagging) - dockerCmd(c, "rmi", repoName) + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) // Try untrusted run to ensure we pushed the tag to the registry - icmd.RunCmd(icmd.Command(dockerBinary, "run", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloadedOnStderr) + cli.Docker(cli.Args("run", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloadedOnStderr) } func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { @@ -3181,44 +3177,17 @@ func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { testRequires(c, DaemonIsLinux) repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) // Try trusted run on untrusted tag - icmd.RunCmd(icmd.Command(dockerBinary, "run", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 125, Err: "does not have trust data for", }) } -func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - c.Skip("Currently changes system time, causing instability") - repoName := s.setupTrustedImage(c, "trusted-run-expired") - - // Certificates have 10 years of expiration - elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) - - testutil.RunAtDifferentDate(elevenYearsFromNow, func() { - // Try run - icmd.RunCmd(icmd.Cmd{ - Command: []string{dockerBinary, "run", repoName}, - }, trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "could not validate the path to a trusted root", - }) - }) - - testutil.RunAtDifferentDate(elevenYearsFromNow, func() { - // Try run - icmd.RunCmd(icmd.Cmd{ - Command: []string{dockerBinary, "run", "--disable-content-trust", repoName}, - }, trustedCmd).Assert(c, SuccessDownloaded) - }) -} - func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { // Windows does not support this functionality testRequires(c, DaemonIsLinux) @@ -3229,14 +3198,14 @@ func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { } // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - dockerCmd(c, "rmi", repoName) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) // Try run - icmd.RunCmd(icmd.Command(dockerBinary, "run", repoName), trustedCmd).Assert(c, SuccessTagging) - dockerCmd(c, "rmi", repoName) + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) // Kill the notary server, start a new "evil" one. s.not.Close() @@ -3247,13 +3216,13 @@ func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. // tag an image and upload it to the private registry - dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) // Push up to the new server - icmd.RunCmd(icmd.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) // Now, try running with the original client from this new trust server. This should fail because the new root is invalid. - icmd.RunCmd(icmd.Command(dockerBinary, "run", repoName), trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, icmd.Expected{ ExitCode: 125, Err: "could not rotate trust to a new trusted root", }) @@ -4174,22 +4143,25 @@ RUN chmod 755 /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] CMD echo foobar` - ctx := fakeContext(c, dockerfile, map[string]string{ - "entrypoint.sh": `#!/bin/sh + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "entrypoint.sh": `#!/bin/sh echo "I am an entrypoint" exec "$@"`, - }) + })) defer ctx.Close() - buildImageSuccessfully(c, name, withExternalBuildContext(ctx)) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo") + out := cli.DockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo").Combined() c.Assert(strings.TrimSpace(out), check.Equals, "foo") // CMD will be reset as well (the same as setting a custom entrypoint) - _, _, err := dockerCmdWithError("run", "--entrypoint=", "-t", name) - c.Assert(err, check.NotNil) - c.Assert(err.Error(), checker.Contains, "No command specified") + cli.Docker(cli.Args("run", "--entrypoint=", "-t", name)).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "No command specified", + }) } func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { @@ -4261,10 +4233,9 @@ func (s *DockerSuite) TestRunCredentialSpecWellFormed(c *check.C) { func (s *DockerSuite) TestRunServicingContainer(c *check.C) { testRequires(c, DaemonIsWindows, SameHostDaemon) - out, _ := dockerCmd(c, "run", "-d", testEnv.MinimalBaseImage(), "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255") + out := cli.DockerCmd(c, "run", "-d", testEnv.MinimalBaseImage(), "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255").Combined() containerID := strings.TrimSpace(out) - err := waitExited(containerID, 60*time.Second) - c.Assert(err, checker.IsNil) + cli.WaitExited(c, containerID, 60*time.Second) result := icmd.RunCommand("powershell", "echo", `(Get-WinEvent -ProviderName "Microsoft-Windows-Hyper-V-Compute" -FilterXPath 'Event[System[EventID=2010]]' -MaxEvents 1).Message`) result.Assert(c, icmd.Success) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go index bb5913a12..b3d1b0721 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go @@ -1015,18 +1015,6 @@ func (s *DockerSuite) TestRunSeccompProfileDenyUnshareUserns(c *check.C) { }) } -// TestRunSeccompProfileDenyUnusualSocketFamilies checks that rarely used socket families such as Appletalk are blocked by the default profile -func (s *DockerSuite) TestRunSeccompProfileDenyUnusualSocketFamilies(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled) - ensureSyscallTest(c) - - runCmd := exec.Command(dockerBinary, "run", "syscall-test", "appletalk-test") - _, _, err := runCommandWithOutput(runCmd) - if err != nil { - c.Fatal("expected opening appletalk socket family to fail") - } -} - // TestRunSeccompProfileDenyCloneUserns checks that 'docker run syscall-test' // with a the default seccomp profile exits with operation not permitted. func (s *DockerSuite) TestRunSeccompProfileDenyCloneUserns(c *check.C) { diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go index 0443400f5..deb061682 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go @@ -67,7 +67,7 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { n, err := pty.Read(buf) c.Assert(err, check.IsNil) //could not read tty output - c.Assert(string(buf[:n]), checker.Contains, "Cowardly refusing", check.Commentf("help output is not being yielded")) + c.Assert(string(buf[:n]), checker.Contains, "cowardly refusing", check.Commentf("help output is not being yielded")) } func (s *DockerSuite) TestSaveAndLoadWithProgressBar(c *check.C) { diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go index 1ff9b482c..6fc92c237 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go @@ -5,6 +5,7 @@ package main import ( "encoding/json" "fmt" + "path/filepath" "strings" "github.com/docker/docker/api/types" @@ -16,7 +17,7 @@ import ( func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--detach=true", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) id := strings.TrimSpace(out) @@ -75,7 +76,7 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - out, err := d.Cmd("service", "create", "--name", serviceName, "--secret", testName, "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--secret", testName, "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) @@ -90,23 +91,41 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { c.Assert(refs[0].File.Name, checker.Equals, testName) c.Assert(refs[0].File.UID, checker.Equals, "0") c.Assert(refs[0].File.GID, checker.Equals, "0") + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + d.DeleteSecret(c, testName) } -func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) { +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTargetPaths(c *check.C) { d := s.AddDaemon(c, true, true) - serviceName := "test-service-secret" - testName := "test_secret" - id := d.CreateSecret(c, swarm.SecretSpec{ - Annotations: swarm.Annotations{ - Name: testName, - }, - Data: []byte("TESTINGDATA"), - }) - c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - testTarget := "testing" + testPaths := map[string]string{ + "app": "/etc/secret", + "test_secret": "test_secret", + "relative_secret": "relative/secret", + "escapes_in_container": "../secret", + } - out, err := d.Cmd("service", "create", "--name", serviceName, "--secret", fmt.Sprintf("source=%s,target=%s", testName, testTarget), "busybox", "top") + var secretFlags []string + + for testName, testTarget := range testPaths { + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA " + testName + " " + testTarget), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secretFlags = append(secretFlags, "--secret", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + } + + serviceName := "svc" + serviceCmd := []string{"service", "create", "--no-resolve-image", "--name", serviceName} + serviceCmd = append(serviceCmd, secretFlags...) + serviceCmd = append(serviceCmd, "busybox", "top") + out, err := d.Cmd(serviceCmd...) c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) @@ -114,16 +133,234 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) { var refs []swarm.SecretReference c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, len(testPaths)) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for testName, testTarget := range testPaths { + path := testTarget + if !filepath.IsAbs(path) { + path = filepath.Join("/run/secrets", path) + } + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA "+testName+" "+testTarget) + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretReferencedTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: "mysecret", + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + serviceName := "svc" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--secret", "source=mysecret,target=target1", "--secret", "source=mysecret,target=target2", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 2) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for _, target := range []string{"target1", "target2"} { + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join("/run/secrets", target) + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA") + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigSimple(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-config" + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--config", testName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) c.Assert(refs, checker.HasLen, 1) - c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].ConfigName, checker.Equals, testName) c.Assert(refs[0].File, checker.Not(checker.IsNil)) - c.Assert(refs[0].File.Name, checker.Equals, testTarget) + c.Assert(refs[0].File.Name, checker.Equals, testName) + c.Assert(refs[0].File.UID, checker.Equals, "0") + c.Assert(refs[0].File.GID, checker.Equals, "0") + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + d.DeleteConfig(c, testName) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigSourceTargetPaths(c *check.C) { + d := s.AddDaemon(c, true, true) + + testPaths := map[string]string{ + "app": "/etc/config", + "test_config": "test_config", + "relative_config": "relative/config", + } + + var configFlags []string + + for testName, testTarget := range testPaths { + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA " + testName + " " + testTarget), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + configFlags = append(configFlags, "--config", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + } + + serviceName := "svc" + serviceCmd := []string{"service", "create", "--no-resolve-image", "--name", serviceName} + serviceCmd = append(serviceCmd, configFlags...) + serviceCmd = append(serviceCmd, "busybox", "top") + out, err := d.Cmd(serviceCmd...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, len(testPaths)) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for testName, testTarget := range testPaths { + path := testTarget + if !filepath.IsAbs(path) { + path = filepath.Join("/", path) + } + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA "+testName+" "+testTarget) + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigReferencedTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "myconfig", + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + serviceName := "svc" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--config", "source=myconfig,target=target1", "--config", "source=myconfig,target=target2", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 2) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for _, target := range []string{"target1", "target2"} { + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join("/", target) + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA") + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) } func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--detach=true", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null") c.Assert(err, checker.IsNil, check.Commentf(out)) id := strings.TrimSpace(out) @@ -173,3 +410,38 @@ func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { c.Assert(strings.TrimSpace(out), checker.HasPrefix, "tmpfs on /foo type tmpfs") c.Assert(strings.TrimSpace(out), checker.Contains, "size=1024k") } + +func (s *DockerSwarmSuite) TestServiceCreateWithNetworkAlias(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("network", "create", "--scope=swarm", "test_swarm_br") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--network=name=test_swarm_br,alias=srv_alias", "--name=alias_tst_container", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container alias config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .NetworkSettings.Networks.test_swarm_br.Aliases}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Make sure the only alias seen is the container-id + var aliases []string + c.Assert(json.Unmarshal([]byte(out), &aliases), checker.IsNil) + c.Assert(aliases, checker.HasLen, 1) + + c.Assert(task.Status.ContainerStatus.ContainerID, checker.Contains, aliases[0]) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go index 9aa619897..789838545 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go @@ -31,7 +31,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { c.Check(err, check.IsNil) serviceName := "healthServiceRun" - out, err := d.Cmd("service", "create", "--detach=true", "--name", serviceName, imageName, "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", serviceName, imageName, "top") c.Assert(err, checker.IsNil, check.Commentf(out)) id := strings.TrimSpace(out) @@ -92,7 +92,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { c.Check(err, check.IsNil) serviceName := "healthServiceStart" - out, err := d.Cmd("service", "create", "--detach=true", "--name", serviceName, imageName, "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", serviceName, imageName, "top") c.Assert(err, checker.IsNil, check.Commentf(out)) id := strings.TrimSpace(out) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_test.go index 340cbc035..d2ce36def 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_test.go @@ -31,7 +31,7 @@ func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { } for name, message := range services { - out, err := d.Cmd("service", "create", "--name", name, "busybox", + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", fmt.Sprintf("echo %s; tail -f /dev/null", message)) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -55,8 +55,14 @@ func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { // output. func countLogLines(d *daemon.Swarm, name string) func(*check.C) (interface{}, check.CommentInterface) { return func(c *check.C) (interface{}, check.CommentInterface) { - result := icmd.RunCmd(d.Command("service", "logs", "-t", name)) + result := icmd.RunCmd(d.Command("service", "logs", "-t", "--raw", name)) result.Assert(c, icmd.Expected{}) + // if this returns an emptystring, trying to split it later will return + // an array containing emptystring. a valid log line will NEVER be + // emptystring because we ask for the timestamp. + if result.Stdout() == "" { + return 0, check.Commentf("Empty stdout") + } lines := strings.Split(strings.TrimSpace(result.Stdout()), "\n") return len(lines), check.Commentf("output, %q", string(result.Stdout())) } @@ -68,7 +74,7 @@ func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *check.C) { name := "TestServiceLogsCompleteness" // make a service that prints 6 lines - out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", "for line in $(seq 0 5); do echo log test $line; done; sleep 100000") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 0 5); do echo log test $line; done; sleep 100000") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -95,7 +101,7 @@ func (s *DockerSwarmSuite) TestServiceLogsTail(c *check.C) { name := "TestServiceLogsTail" // make a service that prints 6 lines - out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", "for line in $(seq 1 6); do echo log test $line; done; sleep 100000") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 1 6); do echo log test $line; done; sleep 100000") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -119,7 +125,7 @@ func (s *DockerSwarmSuite) TestServiceLogsSince(c *check.C) { name := "TestServiceLogsSince" - out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", "for i in $(seq 1 3); do sleep .1; echo log$i; done; sleep 10000000") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for i in $(seq 1 3); do sleep .1; echo log$i; done; sleep 10000000") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) @@ -153,7 +159,7 @@ func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) { name := "TestServiceLogsFollow" - out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -201,12 +207,12 @@ func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *check.C) { result := icmd.RunCmd(d.Command( // create a service with the name - "service", "create", "--name", name, + "service", "create", "--no-resolve-image", "--name", name, // which has some number of replicas fmt.Sprintf("--replicas=%v", replicas), // which has this the task id as an environment variable templated in "--env", "TASK={{.Task.ID}}", - // and runs this command to print exaclty 6 logs lines + // and runs this command to print exactly 6 logs lines "busybox", "sh", "-c", "for line in $(seq 0 5); do echo $TASK log test $line; done; sleep 100000", )) result.Assert(c, icmd.Expected{}) @@ -253,7 +259,7 @@ func (s *DockerSwarmSuite) TestServiceLogsTTY(c *check.C) { result := icmd.RunCmd(d.Command( // create a service - "service", "create", + "service", "create", "--no-resolve-image", // name it $name "--name", name, // use a TTY @@ -277,9 +283,105 @@ func (s *DockerSwarmSuite) TestServiceLogsTTY(c *check.C) { // and make sure we have all the log lines waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 2) - cmd := d.Command("service", "logs", name) + cmd := d.Command("service", "logs", "--raw", name) result = icmd.RunCmd(cmd) // for some reason there is carriage return in the output. i think this is // just expected. c.Assert(result, icmd.Matches, icmd.Expected{Out: "out\r\nerr\r\n"}) } + +func (s *DockerSwarmSuite) TestServiceLogsNoHangDeletedContainer(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsNoHangDeletedContainer" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--no-resolve-image", + // name it $name + "--name", name, + // busybox image, shell string + "busybox", "sh", "-c", + // echo to stdout and stderr + "while true; do echo line; sleep 2; done", + )) + + // confirm that the command succeeded + c.Assert(result, icmd.Matches, icmd.Expected{}) + // get the service id + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 2) + + // now find and nuke the container + result = icmd.RunCmd(d.Command("ps", "-q")) + containerID := strings.TrimSpace(result.Stdout()) + c.Assert(containerID, checker.Not(checker.Equals), "") + result = icmd.RunCmd(d.Command("stop", containerID)) + c.Assert(result, icmd.Matches, icmd.Expected{Out: containerID}) + result = icmd.RunCmd(d.Command("rm", containerID)) + c.Assert(result, icmd.Matches, icmd.Expected{Out: containerID}) + + // run logs. use tail 2 to make sure we don't try to get a bunch of logs + // somehow and slow down execution time + cmd := d.Command("service", "logs", "--tail", "2", id) + // start the command and then wait for it to finish with a 3 second timeout + result = icmd.StartCmd(cmd) + result = icmd.WaitOnCmd(3*time.Second, result) + + // then, assert that the result matches expected. if the command timed out, + // if the command is timed out, result.Timeout will be true, but the + // Expected defaults to false + c.Assert(result, icmd.Matches, icmd.Expected{}) +} + +func (s *DockerSwarmSuite) TestServiceLogsDetails(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsDetails" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--no-resolve-image", + // name it $name + "--name", name, + // add an environment variable + "--env", "asdf=test1", + // add a log driver (without explicitly setting a driver, log-opt doesn't work) + "--log-driver", "json-file", + // add a log option to print the environment variable + "--log-opt", "env=asdf", + // busybox image, shell string + "busybox", "sh", "-c", + // make a log line + "echo LogLine; while true; do sleep 1; done;", + )) + + result.Assert(c, icmd.Expected{}) + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + + // make sure task has been deployed + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 1) + + // First, test without pretty printing + // call service logs with details. set raw to skip pretty printing + result = icmd.RunCmd(d.Command("service", "logs", "--raw", "--details", name)) + // in this case, we should get details and we should get log message, but + // there will also be context as details (which will fall after the detail + // we inserted in alphabetical order + c.Assert(result, icmd.Matches, icmd.Expected{Out: "asdf=test1"}) + c.Assert(result, icmd.Matches, icmd.Expected{Out: "LogLine"}) + + // call service logs with details. this time, don't pass raw + result = icmd.RunCmd(d.Command("service", "logs", "--details", id)) + // in this case, we should get details space logmessage as well. the context + // is part of the pretty part of the logline + c.Assert(result, icmd.Matches, icmd.Expected{Out: "asdf=test1 LogLine"}) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go index c96fa5cf8..8fb84fed8 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go @@ -14,11 +14,11 @@ func (s *DockerSwarmSuite) TestServiceScale(c *check.C) { d := s.AddDaemon(c, true, true) service1Name := "TestService1" - service1Args := append([]string{"service", "create", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...) + service1Args := append([]string{"service", "create", "--no-resolve-image", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...) // global mode service2Name := "TestService2" - service2Args := append([]string{"service", "create", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + service2Args := append([]string{"service", "create", "--no-resolve-image", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...) // Create services out, err := d.Cmd(service1Args...) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go index 1480d13e5..086ae773e 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go @@ -15,7 +15,7 @@ func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { d := s.AddDaemon(c, true, true) serviceName := "TestServiceUpdatePort" - serviceArgs := append([]string{"service", "create", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + serviceArgs := append([]string{"service", "create", "--no-resolve-image", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...) // Create a service with a port mapping of 8080:8081. out, err := d.Cmd(serviceArgs...) @@ -48,7 +48,7 @@ func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--name=test", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name=test", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) service := d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 0) @@ -100,7 +100,7 @@ func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { testTarget := "testing" serviceName := "test" - out, err := d.Cmd("service", "create", "--name", serviceName, "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // add secret @@ -128,3 +128,45 @@ func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) c.Assert(refs, checker.HasLen, 0) } + +func (s *DockerSwarmSuite) TestServiceUpdateConfigs(c *check.C) { + d := s.AddDaemon(c, true, true) + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + testTarget := "/testing" + serviceName := "test" + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // add config + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--config-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].ConfigName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) + + // remove + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--config-rm", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 0) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go index d754a2c77..91fe4d75c 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go @@ -15,6 +15,17 @@ import ( "github.com/go-check/check" ) +var cleanSpaces = func(s string) string { + lines := strings.Split(s, "\n") + for i, line := range lines { + spaceIx := strings.Index(line, " ") + if spaceIx > 0 { + lines[i] = line[:spaceIx+1] + strings.TrimLeft(line[spaceIx:], " ") + } + } + return strings.Join(lines, "\n") +} + func (s *DockerSwarmSuite) TestStackRemoveUnknown(c *check.C) { d := s.AddDaemon(c, true, true) @@ -59,13 +70,13 @@ func (s *DockerSwarmSuite) TestStackDeployComposeFile(c *check.C) { out, err = d.Cmd("stack", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "NAME SERVICES\n"+"testdeploy 2\n") + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n"+"testdeploy 2\n") out, err = d.Cmd("stack", "rm", testStackName) c.Assert(err, checker.IsNil) out, err = d.Cmd("stack", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "NAME SERVICES\n") + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n") } func (s *DockerSwarmSuite) TestStackDeployWithSecretsTwice(c *check.C) { @@ -180,7 +191,7 @@ func (s *DockerSwarmSuite) TestStackDeployWithDAB(c *check.C) { stackArgs = []string{"stack", "ls"} out, err = d.Cmd(stackArgs...) c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "NAME SERVICES\n"+"test 2\n") + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n"+"test 2\n") // rm stackArgs = []string{"stack", "rm", testStackName} out, err = d.Cmd(stackArgs...) @@ -191,5 +202,5 @@ func (s *DockerSwarmSuite) TestStackDeployWithDAB(c *check.C) { stackArgs = []string{"stack", "ls"} out, err = d.Cmd(stackArgs...) c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "NAME SERVICES\n") + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n") } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go index a77f7a465..2dd5fdf5f 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go @@ -94,7 +94,6 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) { func (s *DockerSuite) TestStartPausedContainer(c *check.C) { // Windows does not support pausing containers testRequires(c, IsPausable) - defer unpauseAllContainers(c) runSleepingContainer(c, "-d", "--name", "testing") @@ -160,7 +159,7 @@ func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { // err shouldn't be nil because start will fail c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) // output does not correspond to what was expected - c.Assert(out, checker.Contains, "You cannot start and attach multiple containers at once.") + c.Assert(out, checker.Contains, "you cannot start and attach multiple containers at once") } // confirm the state of all the containers be stopped @@ -174,17 +173,15 @@ func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { // Test case for #23716 func (s *DockerSuite) TestStartAttachWithRename(c *check.C) { testRequires(c, DaemonIsLinux) - dockerCmd(c, "create", "-t", "--name", "before", "busybox") + cli.DockerCmd(c, "create", "-t", "--name", "before", "busybox") go func() { - c.Assert(waitRun("before"), checker.IsNil) - dockerCmd(c, "rename", "before", "after") - dockerCmd(c, "stop", "--time=2", "after") + cli.WaitRun(c, "before") + cli.DockerCmd(c, "rename", "before", "after") + cli.DockerCmd(c, "stop", "--time=2", "after") }() // FIXME(vdemeester) the intent is not clear and potentially racey - result := icmd.RunCommand(dockerBinary, "start", "-a", "before") - result.Assert(c, icmd.Expected{ + result := cli.Docker(cli.Args("start", "-a", "before")).Assert(c, icmd.Expected{ ExitCode: 137, - Error: "exit status 137", }) c.Assert(result.Stderr(), checker.Not(checker.Contains), "No such container") } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go index d1dbd7332..9d40ce028 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/go-check/check" ) @@ -146,7 +147,7 @@ func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { } }() - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) id <- strings.TrimSpace(out)[:12] @@ -162,17 +163,17 @@ func (s *DockerSuite) TestStatsFormatAll(c *check.C) { // Windows does not support stats testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name=RunningOne", "busybox", "top") - c.Assert(waitRun("RunningOne"), check.IsNil) - dockerCmd(c, "run", "-d", "--name=ExitedOne", "busybox", "top") - dockerCmd(c, "stop", "ExitedOne") - c.Assert(waitExited("ExitedOne", 5*time.Second), check.IsNil) + cli.DockerCmd(c, "run", "-d", "--name=RunningOne", "busybox", "top") + cli.WaitRun(c, "RunningOne") + cli.DockerCmd(c, "run", "-d", "--name=ExitedOne", "busybox", "top") + cli.DockerCmd(c, "stop", "ExitedOne") + cli.WaitExited(c, "ExitedOne", 5*time.Second) - out, _ := dockerCmd(c, "stats", "--no-stream", "--format", "{{.Name}}") + out := cli.DockerCmd(c, "stats", "--no-stream", "--format", "{{.Name}}").Combined() c.Assert(out, checker.Contains, "RunningOne") c.Assert(out, checker.Not(checker.Contains), "ExitedOne") - out, _ = dockerCmd(c, "stats", "--all", "--no-stream", "--format", "{{.Name}}") + out = cli.DockerCmd(c, "stats", "--all", "--no-stream", "--format", "{{.Name}}").Combined() c.Assert(out, checker.Contains, "RunningOne") c.Assert(out, checker.Contains, "ExitedOne") } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go index f6b26ab5f..8b57c5dd2 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go @@ -23,6 +23,7 @@ import ( "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/testutil" icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/docker/pkg/testutil/tempfile" "github.com/docker/libnetwork/driverapi" "github.com/docker/libnetwork/ipamapi" remoteipam "github.com/docker/libnetwork/ipams/remote/api" @@ -53,11 +54,29 @@ func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) // passing an external CA (this is without starting a root rotation) does not fail - out, err = d.Cmd("swarm", "update", "--external-ca", "protocol=cfssl,url=https://something.org") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + cli.Docker(cli.Args("swarm", "update", "--external-ca", "protocol=cfssl,url=https://something.org", + "--external-ca", "protocol=cfssl,url=https://somethingelse.org,cacert=fixtures/https/ca.pem"), + cli.Daemon(d.Daemon)).Assert(c, icmd.Success) + + expected, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) spec = getSpec() - c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 1) + c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, string(expected)) + + // passing an invalid external CA fails + tempFile := tempfile.NewTempFile(c, "testfile", "fakecert") + defer tempFile.Remove() + + result := cli.Docker(cli.Args("swarm", "update", + "--external-ca", fmt.Sprintf("protocol=cfssl,url=https://something.org,cacert=%s", tempFile.Name())), + cli.Daemon(d.Daemon)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "must be in PEM format", + }) } func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { @@ -68,17 +87,34 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { return sw.Spec } + // passing an invalid external CA fails + tempFile := tempfile.NewTempFile(c, "testfile", "fakecert") + defer tempFile.Remove() + + result := cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", + "--external-ca", fmt.Sprintf("protocol=cfssl,url=https://somethingelse.org,cacert=%s", tempFile.Name())), + cli.Daemon(d.Daemon)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "must be in PEM format", + }) + cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", - "--external-ca", "protocol=cfssl,url=https://something.org"), + "--external-ca", "protocol=cfssl,url=https://something.org", + "--external-ca", "protocol=cfssl,url=https://somethingelse.org,cacert=fixtures/https/ca.pem"), cli.Daemon(d.Daemon)).Assert(c, icmd.Success) + expected, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) + spec := getSpec() c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) - c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 1) + c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, string(expected)) c.Assert(d.Leave(true), checker.IsNil) - time.Sleep(500 * time.Millisecond) // https://github.com/docker/swarmkit/issues/1421 cli.Docker(cli.Args("swarm", "init"), cli.Daemon(d.Daemon)).Assert(c, icmd.Success) spec = getSpec() @@ -133,7 +169,7 @@ func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // make sure task has been deployed. @@ -152,15 +188,15 @@ func (s *DockerSwarmSuite) TestSwarmServiceListFilter(c *check.C) { name1 := "redis-cluster-md5" name2 := "redis-cluster" name3 := "other-cluster" - out, err := d.Cmd("service", "create", "--name", name1, "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name1, "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - out, err = d.Cmd("service", "create", "--name", name2, "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name2, "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - out, err = d.Cmd("service", "create", "--name", name3, "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name3, "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -210,7 +246,7 @@ func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { d := s.AddDaemon(c, true, true) name := "redis-cluster-md5" - out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -237,7 +273,7 @@ func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) { d := s.AddDaemon(c, true, true) name := "top" - out, err := d.Cmd("service", "create", "--name", name, "--label", "x=y", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--label", "x=y", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -259,7 +295,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *check.C) { d := s.AddDaemon(c, true, true) name := "top" - out, err := d.Cmd("service", "create", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -412,7 +448,7 @@ func (s *DockerSwarmSuite) TestOverlayAttachableReleaseResourcesOnFailure(c *che out, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c1", "--ip", "10.10.9.33", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) - // Attempt to attach another contianer with same IP, must fail + // Attempt to attach another container with same IP, must fail _, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c2", "--ip", "10.10.9.33", "busybox", "top") c.Assert(err, checker.NotNil) @@ -445,7 +481,7 @@ func (s *DockerSwarmSuite) TestSwarmIngressNetwork(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Contains, "is already present") // It cannot be removed if it is being used - out, err = d.Cmd("service", "create", "--name", "srv1", "-p", "9000:8000", "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv1", "-p", "9000:8000", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) out, _, err = testutil.RunCommandPipelineWithOutput( exec.Command("echo", "Y"), @@ -464,7 +500,7 @@ func (s *DockerSwarmSuite) TestSwarmIngressNetwork(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf(out)) // A service which needs the ingress network cannot be created if no ingress is present - out, err = d.Cmd("service", "create", "--name", "srv2", "-p", "500:500", "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv2", "-p", "500:500", "busybox", "top") c.Assert(err, checker.NotNil) c.Assert(strings.TrimSpace(out), checker.Contains, "no ingress network is present") @@ -474,7 +510,7 @@ func (s *DockerSwarmSuite) TestSwarmIngressNetwork(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Contains, "no ingress network is present") // But services which do not need routing mesh can be created regardless - out, err = d.Cmd("service", "create", "--name", "srv3", "--endpoint-mode", "dnsrr", "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv3", "--endpoint-mode", "dnsrr", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) } @@ -492,7 +528,7 @@ func (s *DockerSwarmSuite) TestSwarmCreateServiceWithNoIngressNetwork(c *check.C // Make sure nothing panics because ingress network is missing out, err = d.Cmd("network", "create", "-d", "overlay", "another-network") c.Assert(err, checker.IsNil, check.Commentf(out)) - out, err = d.Cmd("service", "create", "--name", "srv4", "--network", "another-network", "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv4", "--network", "another-network", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) } @@ -502,7 +538,7 @@ func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *check.C) { d := s.AddDaemon(c, true, true) name := "redis-cluster-md5" - out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -536,7 +572,7 @@ func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *check.C) { c.Assert(out, checker.Not(checker.Contains), name+".3") name = "redis-cluster-sha1" - out, err = d.Cmd("service", "create", "--name", name, "--mode=global", "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--mode=global", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -565,7 +601,7 @@ func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *check.C) { bareID := strings.TrimSpace(out)[:12] // Create a service name := "busybox-top" - out, err = d.Cmd("service", "create", "--name", name, "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -784,7 +820,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *check.C) { c.Assert(err, checker.IsNil) name := "worker" - out, err := d.Cmd("service", "create", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -803,7 +839,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { // Without --tty expectedOutput := "none" - out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", ttyCheck) + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", ttyCheck) c.Assert(err, checker.IsNil) // Make sure task has been deployed. @@ -826,7 +862,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { // With --tty expectedOutput = "TTY" - out, err = d.Cmd("service", "create", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck) + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck) c.Assert(err, checker.IsNil) // Make sure task has been deployed. @@ -847,7 +883,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) { // Create a service name := "top" - _, err := d.Cmd("service", "create", "--name", name, "busybox", "top") + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top") c.Assert(err, checker.IsNil) // Make sure task has been deployed. @@ -882,7 +918,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceNetworkUpdate(c *check.C) { // Create a service name := "top" - result = icmd.RunCmd(d.Command("service", "create", "--network", "foo", "--network", "bar", "--name", name, "busybox", "top")) + result = icmd.RunCmd(d.Command("service", "create", "--no-resolve-image", "--network", "foo", "--network", "bar", "--name", name, "busybox", "top")) result.Assert(c, icmd.Success) // Make sure task has been deployed. @@ -909,7 +945,7 @@ func (s *DockerSwarmSuite) TestDNSConfig(c *check.C) { // Create a service name := "top" - _, err := d.Cmd("service", "create", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top") + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top") c.Assert(err, checker.IsNil) // Make sure task has been deployed. @@ -936,7 +972,7 @@ func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { // Create a service name := "top" - _, err := d.Cmd("service", "create", "--name", name, "busybox", "top") + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top") c.Assert(err, checker.IsNil) // Make sure task has been deployed. @@ -1455,7 +1491,7 @@ func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) { // Create a service name := "top" - _, err := d.Cmd("service", "create", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top") + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top") c.Assert(err, checker.IsNil) // Make sure task has been deployed. @@ -1498,7 +1534,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceInspectPretty(c *check.C) { d := s.AddDaemon(c, true, true) name := "top" - out, err := d.Cmd("service", "create", "--name", name, "--limit-cpu=0.5", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--limit-cpu=0.5", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) expectedOutput := ` @@ -1521,7 +1557,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") - out, err = d.Cmd("service", "create", "--network=foo", "--name", "top", "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--network=foo", "--name", "top", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // make sure task has been deployed. @@ -1539,8 +1575,7 @@ func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) { repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") name := "trusted" - serviceCmd := d.Command("-D", "service", "create", "--name", name, repoName, "top") - icmd.RunCmd(serviceCmd, trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("-D", "service", "create", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ Err: "resolved image tag to", }) @@ -1552,13 +1587,12 @@ func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) { repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) name = "untrusted" - serviceCmd = d.Command("service", "create", "--name", name, repoName, "top") - icmd.RunCmd(serviceCmd, trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("service", "create", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ ExitCode: 1, Err: "Error: remote trust data does not exist", }) @@ -1576,34 +1610,31 @@ func (s *DockerTrustedSwarmSuite) TestTrustedServiceUpdate(c *check.C) { name := "myservice" // Create a service without content trust - _, err := d.Cmd("service", "create", "--name", name, repoName, "top") - c.Assert(err, checker.IsNil) + cli.Docker(cli.Args("service", "create", "--no-resolve-image", "--name", name, repoName, "top"), cli.Daemon(d.Daemon)).Assert(c, icmd.Success) - out, err := d.Cmd("service", "inspect", "--pretty", name) - c.Assert(err, checker.IsNil, check.Commentf(out)) + result := cli.Docker(cli.Args("service", "inspect", "--pretty", name), cli.Daemon(d.Daemon)) + c.Assert(result.Error, checker.IsNil, check.Commentf(result.Combined())) // Daemon won't insert the digest because this is disabled by // DOCKER_SERVICE_PREFER_OFFLINE_IMAGE. - c.Assert(out, check.Not(checker.Contains), repoName+"@", check.Commentf(out)) + c.Assert(result.Combined(), check.Not(checker.Contains), repoName+"@", check.Commentf(result.Combined())) - serviceCmd := d.Command("-D", "service", "update", "--image", repoName, name) - icmd.RunCmd(serviceCmd, trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("-D", "service", "update", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ Err: "resolved image tag to", }) - out, err = d.Cmd("service", "inspect", "--pretty", name) - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) + cli.Docker(cli.Args("service", "inspect", "--pretty", name), cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + Out: repoName + "@", + }) // Try trusted service update on an untrusted tag. repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) - serviceCmd = d.Command("service", "update", "--image", repoName, name) - icmd.RunCmd(serviceCmd, trustedCmd).Assert(c, icmd.Expected{ + cli.Docker(cli.Args("service", "update", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ ExitCode: 1, Err: "Error: remote trust data does not exist", }) @@ -1660,13 +1691,13 @@ func (s *DockerSwarmSuite) TestSwarmServicePsMultipleServiceIDs(c *check.C) { d := s.AddDaemon(c, true, true) name1 := "top1" - out, err := d.Cmd("service", "create", "--detach=true", "--name", name1, "--replicas=3", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", name1, "--replicas=3", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") id1 := strings.TrimSpace(out) name2 := "top2" - out, err = d.Cmd("service", "create", "--detach=true", "--name", name2, "--replicas=3", "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", name2, "--replicas=3", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") id2 := strings.TrimSpace(out) @@ -1729,7 +1760,7 @@ func (s *DockerSwarmSuite) TestSwarmServicePsMultipleServiceIDs(c *check.C) { func (s *DockerSwarmSuite) TestSwarmPublishDuplicatePorts(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--detach=true", "--publish", "5005:80", "--publish", "5006:80", "--publish", "80", "--publish", "80", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--publish", "5005:80", "--publish", "5006:80", "--publish", "80", "--publish", "80", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) id := strings.TrimSpace(out) @@ -1789,7 +1820,7 @@ func (s *DockerSwarmSuite) TestSwarmReadonlyRootfs(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--name", "top", "--read-only", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top", "--read-only", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // make sure task has been deployed. @@ -1856,7 +1887,7 @@ func (s *DockerSwarmSuite) TestNetworkInspectWithDuplicateNames(c *check.C) { out, err = d.Cmd("network", "rm", n2.ID) c.Assert(err, checker.IsNil, check.Commentf(out)) - // Dupliates with name but with different driver + // Duplicates with name but with different driver networkCreateRequest.NetworkCreate.Driver = "overlay" status, body, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) @@ -1884,7 +1915,7 @@ func (s *DockerSwarmSuite) TestSwarmStopSignal(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--name", "top", "--stop-signal=SIGHUP", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top", "--stop-signal=SIGHUP", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // make sure task has been deployed. @@ -1910,11 +1941,11 @@ func (s *DockerSwarmSuite) TestSwarmStopSignal(c *check.C) { func (s *DockerSwarmSuite) TestSwarmServiceLsFilterMode(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--name", "top1", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top1", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - out, err = d.Cmd("service", "create", "--name", "top2", "--mode=global", "busybox", "top") + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "top2", "--mode=global", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -1937,3 +1968,238 @@ func (s *DockerSwarmSuite) TestSwarmServiceLsFilterMode(c *check.C) { c.Assert(out, checker.Contains, "top1") c.Assert(out, checker.Not(checker.Contains), "top2") } + +func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedDataPathAddr(c *check.C) { + d := s.AddDaemon(c, false, false) + + out, err := d.Cmd("swarm", "init", "--data-path-addr", "0.0.0.0") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "data path address must be a non-zero IP") + + out, err = d.Cmd("swarm", "init", "--data-path-addr", "0.0.0.0:2000") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "data path address must be a non-zero IP") +} + +func (s *DockerSwarmSuite) TestSwarmJoinLeave(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("swarm", "join-token", "-q", "worker") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + token := strings.TrimSpace(out) + + // Verify that back to back join/leave does not cause panics + d1 := s.AddDaemon(c, false, false) + for i := 0; i < 10; i++ { + out, err = d1.Cmd("swarm", "join", "--token", token, d.ListenAddr) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + _, err = d1.Cmd("swarm", "leave") + c.Assert(err, checker.IsNil) + } +} + +const defaultRetryCount = 10 + +func waitForEvent(c *check.C, d *daemon.Swarm, since string, filter string, event string, retry int) string { + if retry < 1 { + c.Fatalf("retry count %d is invalid. It should be no less than 1", retry) + return "" + } + var out string + for i := 0; i < retry; i++ { + until := daemonUnixTime(c) + var err error + if len(filter) > 0 { + out, err = d.Cmd("events", "--since", since, "--until", until, filter) + } else { + out, err = d.Cmd("events", "--since", since, "--until", until) + } + c.Assert(err, checker.IsNil, check.Commentf(out)) + if strings.Contains(out, event) { + return strings.TrimSpace(out) + } + // no need to sleep after last retry + if i < retry-1 { + time.Sleep(200 * time.Millisecond) + } + } + c.Fatalf("docker events output '%s' doesn't contain event '%s'", out, event) + return "" +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsSource(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + // create a network + out, err := d1.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + c.Assert(networkID, checker.Not(checker.Equals), "") + + // d1, d2 are managers that can get swarm events + waitForEvent(c, d1, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + waitForEvent(c, d2, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + + // d3 is a worker, not able to get cluster events + out = waitForEvent(c, d3, "0", "-f scope=swarm", "", 1) + c.Assert(out, checker.Not(checker.Contains), "network create ") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsScope(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // scope swarm filters cluster events + out = waitForEvent(c, d, "0", "-f scope=swarm", "service create "+serviceID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "container create ") + + // all events are returned if scope is not specified + waitForEvent(c, d, "0", "", "service create "+serviceID, 1) + waitForEvent(c, d, "0", "", "container create ", defaultRetryCount) + + // scope local only shows non-cluster events + out = waitForEvent(c, d, "0", "-f scope=local", "container create ", 1) + c.Assert(out, checker.Not(checker.Contains), "service create ") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsType(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // create a network + out, err = d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + c.Assert(networkID, checker.Not(checker.Equals), "") + + // filter by service + out = waitForEvent(c, d, "0", "-f type=service", "service create "+serviceID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "network create") + + // filter by network + out = waitForEvent(c, d, "0", "-f type=network", "network create "+networkID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "service create") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsService(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // validate service create event + waitForEvent(c, d, "0", "-f scope=swarm", "service create "+serviceID, defaultRetryCount) + + t1 := daemonUnixTime(c) + out, err = d.Cmd("service", "update", "--force", "--detach=false", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // wait for service update start + out = waitForEvent(c, d, t1, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "updatestate.new=updating") + + // allow service update complete. This is a service with 1 instance + time.Sleep(400 * time.Millisecond) + out = waitForEvent(c, d, t1, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "updatestate.new=completed, updatestate.old=updating") + + // scale service + t2 := daemonUnixTime(c) + out, err = d.Cmd("service", "scale", "test=3") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out = waitForEvent(c, d, t2, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "replicas.new=3, replicas.old=1") + + // remove service + t3 := daemonUnixTime(c) + out, err = d.Cmd("service", "rm", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitForEvent(c, d, t3, "-f scope=swarm", "service remove "+serviceID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsNode(c *check.C) { + d1 := s.AddDaemon(c, true, true) + s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + d3ID := d3.NodeID + waitForEvent(c, d1, "0", "-f scope=swarm", "node create "+d3ID, defaultRetryCount) + + t1 := daemonUnixTime(c) + out, err := d1.Cmd("node", "update", "--availability=pause", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filter by type + out = waitForEvent(c, d1, t1, "-f type=node", "node update "+d3ID, defaultRetryCount) + c.Assert(out, checker.Contains, "availability.new=pause, availability.old=active") + + t2 := daemonUnixTime(c) + out, err = d1.Cmd("node", "demote", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitForEvent(c, d1, t2, "-f type=node", "node update "+d3ID, defaultRetryCount) + + t3 := daemonUnixTime(c) + out, err = d1.Cmd("node", "rm", "-f", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filter by scope + waitForEvent(c, d1, t3, "-f scope=swarm", "node remove "+d3ID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a network + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + + waitForEvent(c, d, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + + // remove network + t1 := daemonUnixTime(c) + out, err = d.Cmd("network", "rm", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filtered by network + waitForEvent(c, d, t1, "-f type=network", "network remove "+networkID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsSecret(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + waitForEvent(c, d, "0", "-f scope=swarm", "secret create "+id, defaultRetryCount) + + t1 := daemonUnixTime(c) + d.DeleteSecret(c, id) + // filtered by secret + waitForEvent(c, d, t1, "-f type=secret", "secret remove "+id, defaultRetryCount) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go index be508a19c..cffabcc2a 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go @@ -15,7 +15,7 @@ import ( func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // Make sure task stays pending before plugin is available @@ -74,7 +74,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *check.C) { // create a global service to ensure that both nodes will have an instance serviceName := "my-service" - _, err = d1.Cmd("service", "create", "--name", serviceName, "--mode=global", "--network", networkName, "busybox", "top") + _, err = d1.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, "busybox", "top") c.Assert(err, checker.IsNil) // wait for tasks ready @@ -96,7 +96,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *check.C) { image := "busybox" // create a new global service again. - _, err = d1.Cmd("service", "create", "--name", serviceName, "--mode=global", "--network", networkName, image, "top") + _, err = d1.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, image, "top") c.Assert(err, checker.IsNil) waitAndAssert(c, defaultReconciliationTimeout, d1.CheckRunningTaskImages, checker.DeepEquals, diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go index c310a3c7a..ea32fc672 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go @@ -9,7 +9,7 @@ import ( ) func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) var expected icmd.Expected @@ -24,7 +24,7 @@ func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { } func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) out1, _ := dockerCmd(c, "top", cleanedContainerID) @@ -49,7 +49,7 @@ func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { // very different to Linux in this regard. func (s *DockerSuite) TestTopWindowsCoreProcesses(c *check.C) { testRequires(c, DaemonIsWindows) - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) out1, _ := dockerCmd(c, "top", cleanedContainerID) lookingFor := []string{"smss.exe", "csrss.exe", "wininit.exe", "services.exe", "lsass.exe", "CExecSvc.exe"} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go index d2c65b8b9..c898690c5 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go @@ -5,11 +5,13 @@ import ( "time" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/go-check/check" ) func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "sh", "-c", "sleep 1 && false") + out := cli.DockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "sh", "-c", "sleep 1 && false").Combined() timeout := 60 * time.Second if testEnv.DaemonPlatform() == "windows" { timeout = 180 * time.Second @@ -18,10 +20,9 @@ func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { id := strings.TrimSpace(string(out)) // update restart policy to on-failure:5 - dockerCmd(c, "update", "--restart=on-failure:5", id) + cli.DockerCmd(c, "update", "--restart=on-failure:5", id) - err := waitExited(id, timeout) - c.Assert(err, checker.IsNil) + cli.WaitExited(c, id, timeout) count := inspectField(c, id, "RestartCount") c.Assert(count, checker.Equals, "5") @@ -31,11 +32,12 @@ func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { } func (s *DockerSuite) TestUpdateRestartWithAutoRemoveFlag(c *check.C) { - out, _ := runSleepingContainer(c, "--rm") + out := runSleepingContainer(c, "--rm") id := strings.TrimSpace(out) // update restart policy for an AutoRemove container - out, _, err := dockerCmdWithError("update", "--restart=always", id) - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "Restart policy cannot be updated because AutoRemove is enabled for the container") + cli.Docker(cli.Args("update", "--restart=always", id)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Restart policy cannot be updated because AutoRemove is enabled for the container", + }) } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go index 77974c4ef..b82cdbde1 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go @@ -10,31 +10,19 @@ import ( "github.com/go-check/check" ) -func makefile(contents string) (string, func(), error) { - cleanup := func() { - - } - - f, err := ioutil.TempFile(".", "tmp") +func makefile(path string, contents string) (string, error) { + f, err := ioutil.TempFile(path, "tmp") if err != nil { - return "", cleanup, err + return "", err } err = ioutil.WriteFile(f.Name(), []byte(contents), os.ModePerm) if err != nil { - return "", cleanup, err + return "", err } - - cleanup = func() { - err := os.Remove(f.Name()) - if err != nil { - fmt.Println("Error removing tmpfile") - } - } - return f.Name(), cleanup, nil - + return f.Name(), nil } -// TestV2Only ensures that a daemon in v2-only mode does not +// TestV2Only ensures that a daemon by default does not // attempt to contact any v1 registry endpoints. func (s *DockerRegistrySuite) TestV2Only(c *check.C) { reg, err := registry.NewMock(c) @@ -51,22 +39,25 @@ func (s *DockerRegistrySuite) TestV2Only(c *check.C) { repoName := fmt.Sprintf("%s/busybox", reg.URL()) - s.d.Start(c, "--insecure-registry", reg.URL(), "--disable-legacy-registry=true") + s.d.Start(c, "--insecure-registry", reg.URL()) - dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.URL())) + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + dockerfileName, err := makefile(tmp, fmt.Sprintf("FROM %s/busybox", reg.URL())) c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup() - s.d.Cmd("build", "--file", dockerfileName, ".") + s.d.Cmd("build", "--file", dockerfileName, tmp) s.d.Cmd("run", repoName) - s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", reg.URL()) + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL()) s.d.Cmd("tag", "busybox", repoName) s.d.Cmd("push", repoName) s.d.Cmd("pull", repoName) } -// TestV1 starts a daemon in 'normal' mode +// TestV1 starts a daemon with legacy registries enabled // and ensure v1 endpoints are hit for the following operations: // login, push, pull, build & run func (s *DockerRegistrySuite) TestV1(c *check.C) { @@ -102,11 +93,14 @@ func (s *DockerRegistrySuite) TestV1(c *check.C) { s.d.Start(c, "--insecure-registry", reg.URL(), "--disable-legacy-registry=false") - dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.URL())) - c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup() + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) - s.d.Cmd("build", "--file", dockerfileName, ".") + dockerfileName, err := makefile(tmp, fmt.Sprintf("FROM %s/busybox", reg.URL())) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + + s.d.Cmd("build", "--file", dockerfileName, tmp) c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build")) repoName := fmt.Sprintf("%s/busybox", reg.URL()) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go index e8c837c9c..e0bf7cafe 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go @@ -34,7 +34,7 @@ func (s *DockerSuite) TestVolumeCLICreate(c *check.C) { func (s *DockerSuite) TestVolumeCLIInspect(c *check.C) { c.Assert( - exec.Command(dockerBinary, "volume", "inspect", "doesntexist").Run(), + exec.Command(dockerBinary, "volume", "inspect", "doesnotexist").Run(), check.Not(check.IsNil), check.Commentf("volume inspect should error on non-existent volume"), ) @@ -54,10 +54,10 @@ func (s *DockerSuite) TestVolumeCLIInspectMulti(c *check.C) { dockerCmd(c, "volume", "create", "test2") dockerCmd(c, "volume", "create", "test3") - result := dockerCmdWithResult("volume", "inspect", "--format={{ .Name }}", "test1", "test2", "doesntexist", "test3") + result := dockerCmdWithResult("volume", "inspect", "--format={{ .Name }}", "test1", "test2", "doesnotexist", "test3") c.Assert(result, icmd.Matches, icmd.Expected{ ExitCode: 1, - Err: "No such volume: doesntexist", + Err: "No such volume: doesnotexist", }) out := result.Stdout() @@ -185,7 +185,7 @@ func (s *DockerSuite) TestVolumeCLILsFilterDangling(c *check.C) { out, _ = dockerCmd(c, "volume", "ls", "--filter", "name=testisin") c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) - c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("execpeted volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) } @@ -234,7 +234,7 @@ func (s *DockerSuite) TestVolumeCLIRm(c *check.C) { dockerCmd(c, "volume", "rm", volumeID) c.Assert( - exec.Command("volume", "rm", "doesntexist").Run(), + exec.Command("volume", "rm", "doesnotexist").Run(), check.Not(check.IsNil), check.Commentf("volume rm should fail with non-existent volume"), ) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go index 1240392ca..f352050d3 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go @@ -155,7 +155,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) { _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) c.Assert(err, check.IsNil) // verify ipv6 connectivity to the explicit --ipv6 address second to first - c.Skip("Temporarily skipping while invesitigating sporadic v6 CI issues") + c.Skip("Temporarily skipping while investigating sporadic v6 CI issues") _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) c.Assert(err, check.IsNil) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go b/fn/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go index 547aa181e..1488c93b4 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go @@ -1,18 +1,13 @@ package main import ( - "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" - "net" "net/http" - "net/http/httptest" - "net/url" "os" - "os/exec" "path" "path/filepath" "strconv" @@ -22,11 +17,9 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" - "github.com/docker/docker/integration-cli/cli/build" "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/integration-cli/registry" "github.com/docker/docker/integration-cli/request" - "github.com/docker/docker/pkg/stringutils" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/go-check/check" ) @@ -36,44 +29,6 @@ func daemonHost() string { return request.DaemonHost() } -// FIXME(vdemeester) move this away are remove ignoreNoSuchContainer bool -func deleteContainer(container ...string) error { - return icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, container...)...).Compare(icmd.Success) -} - -func getAllContainers(c *check.C) string { - result := icmd.RunCommand(dockerBinary, "ps", "-q", "-a") - result.Assert(c, icmd.Success) - return result.Combined() -} - -// Deprecated -func deleteAllContainers(c *check.C) { - containers := getAllContainers(c) - if containers != "" { - err := deleteContainer(strings.Split(strings.TrimSpace(containers), "\n")...) - c.Assert(err, checker.IsNil) - } -} - -func getPausedContainers(c *check.C) []string { - result := icmd.RunCommand(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") - result.Assert(c, icmd.Success) - return strings.Fields(result.Combined()) -} - -func unpauseContainer(c *check.C, container string) { - dockerCmd(c, "unpause", container) -} - -// Deprecated -func unpauseAllContainers(c *check.C) { - containers := getPausedContainers(c) - for _, value := range containers { - unpauseContainer(c, value) - } -} - func deleteImages(images ...string) error { args := []string{dockerBinary, "rmi", "-f"} return icmd.RunCmd(icmd.Cmd{Command: append(args, images...)}).Error @@ -124,212 +79,6 @@ func getContainerCount(c *check.C) int { return 0 } -// FakeContext creates directories that can be used as a build context -type FakeContext struct { - Dir string -} - -// Add a file at a path, creating directories where necessary -func (f *FakeContext) Add(file, content string) error { - return f.addFile(file, []byte(content)) -} - -func (f *FakeContext) addFile(file string, content []byte) error { - fp := filepath.Join(f.Dir, filepath.FromSlash(file)) - dirpath := filepath.Dir(fp) - if dirpath != "." { - if err := os.MkdirAll(dirpath, 0755); err != nil { - return err - } - } - return ioutil.WriteFile(fp, content, 0644) - -} - -// Delete a file at a path -func (f *FakeContext) Delete(file string) error { - fp := filepath.Join(f.Dir, filepath.FromSlash(file)) - return os.RemoveAll(fp) -} - -// Close deletes the context -func (f *FakeContext) Close() error { - return os.RemoveAll(f.Dir) -} - -func fakeContextFromNewTempDir(c *check.C) *FakeContext { - tmp, err := ioutil.TempDir("", "fake-context") - c.Assert(err, checker.IsNil) - if err := os.Chmod(tmp, 0755); err != nil { - c.Fatal(err) - } - return fakeContextFromDir(tmp) -} - -func fakeContextFromDir(dir string) *FakeContext { - return &FakeContext{dir} -} - -func fakeContextWithFiles(c *check.C, files map[string]string) *FakeContext { - ctx := fakeContextFromNewTempDir(c) - for file, content := range files { - if err := ctx.Add(file, content); err != nil { - ctx.Close() - c.Fatal(err) - } - } - return ctx -} - -func fakeContextAddDockerfile(c *check.C, ctx *FakeContext, dockerfile string) { - if err := ctx.Add("Dockerfile", dockerfile); err != nil { - ctx.Close() - c.Fatal(err) - } -} - -func fakeContext(c *check.C, dockerfile string, files map[string]string) *FakeContext { - ctx := fakeContextWithFiles(c, files) - fakeContextAddDockerfile(c, ctx, dockerfile) - return ctx -} - -// FakeStorage is a static file server. It might be running locally or remotely -// on test host. -type FakeStorage interface { - Close() error - URL() string - CtxDir() string -} - -func fakeBinaryStorage(c *check.C, archives map[string]*bytes.Buffer) FakeStorage { - ctx := fakeContextFromNewTempDir(c) - for name, content := range archives { - if err := ctx.addFile(name, content.Bytes()); err != nil { - c.Fatal(err) - } - } - return fakeStorageWithContext(c, ctx) -} - -// fakeStorage returns either a local or remote (at daemon machine) file server -func fakeStorage(c *check.C, files map[string]string) FakeStorage { - ctx := fakeContextWithFiles(c, files) - return fakeStorageWithContext(c, ctx) -} - -// fakeStorageWithContext returns either a local or remote (at daemon machine) file server -func fakeStorageWithContext(c *check.C, ctx *FakeContext) FakeStorage { - if testEnv.LocalDaemon() { - return newLocalFakeStorage(c, ctx) - } - return newRemoteFileServer(c, ctx) -} - -// localFileStorage is a file storage on the running machine -type localFileStorage struct { - *FakeContext - *httptest.Server -} - -func (s *localFileStorage) URL() string { - return s.Server.URL -} - -func (s *localFileStorage) CtxDir() string { - return s.FakeContext.Dir -} - -func (s *localFileStorage) Close() error { - defer s.Server.Close() - return s.FakeContext.Close() -} - -func newLocalFakeStorage(c *check.C, ctx *FakeContext) *localFileStorage { - handler := http.FileServer(http.Dir(ctx.Dir)) - server := httptest.NewServer(handler) - return &localFileStorage{ - FakeContext: ctx, - Server: server, - } -} - -// remoteFileServer is a containerized static file server started on the remote -// testing machine to be used in URL-accepting docker build functionality. -type remoteFileServer struct { - host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 - container string - image string - ctx *FakeContext -} - -func (f *remoteFileServer) URL() string { - u := url.URL{ - Scheme: "http", - Host: f.host} - return u.String() -} - -func (f *remoteFileServer) CtxDir() string { - return f.ctx.Dir -} - -func (f *remoteFileServer) Close() error { - defer func() { - if f.ctx != nil { - f.ctx.Close() - } - if f.image != "" { - deleteImages(f.image) - } - }() - if f.container == "" { - return nil - } - return deleteContainer(f.container) -} - -func newRemoteFileServer(c *check.C, ctx *FakeContext) *remoteFileServer { - var ( - image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) - container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) - ) - - ensureHTTPServerImage(c) - - // Build the image - fakeContextAddDockerfile(c, ctx, `FROM httpserver -COPY . /static`) - buildImageSuccessfully(c, image, build.WithoutCache, withExternalBuildContext(ctx)) - - // Start the container - dockerCmd(c, "run", "-d", "-P", "--name", container, image) - - // Find out the system assigned port - out, _ := dockerCmd(c, "port", container, "80/tcp") - fileserverHostPort := strings.Trim(out, "\n") - _, port, err := net.SplitHostPort(fileserverHostPort) - if err != nil { - c.Fatalf("unable to parse file server host:port: %v", err) - } - - dockerHostURL, err := url.Parse(daemonHost()) - if err != nil { - c.Fatalf("unable to parse daemon host URL: %v", err) - } - - host, _, err := net.SplitHostPort(dockerHostURL.Host) - if err != nil { - c.Fatalf("unable to parse docker daemon host:port: %v", err) - } - - return &remoteFileServer{ - container: container, - image: image, - host: fmt.Sprintf("%s:%s", host, port), - ctx: ctx} -} - func inspectFieldAndUnmarshall(c *check.C, name, field string, output interface{}) { str := inspectFieldJSON(c, name, field) err := json.Unmarshal([]byte(str), output) @@ -452,145 +201,12 @@ func buildImage(name string, cmdOperators ...cli.CmdOperator) *icmd.Result { return cli.Docker(cli.Build(name), cmdOperators...) } -func withExternalBuildContext(ctx *FakeContext) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Dir = ctx.Dir - cmd.Command = append(cmd.Command, ".") - return nil - } -} - -func withBuildContext(c *check.C, contextOperators ...func(*FakeContext) error) func(*icmd.Cmd) func() { - ctx := fakeContextFromNewTempDir(c) - for _, op := range contextOperators { - if err := op(ctx); err != nil { - c.Fatal(err) - } - } - return func(cmd *icmd.Cmd) func() { - cmd.Dir = ctx.Dir - cmd.Command = append(cmd.Command, ".") - return closeBuildContext(c, ctx) - } -} - -func withFile(name, content string) func(*FakeContext) error { - return func(ctx *FakeContext) error { - return ctx.Add(name, content) - } -} - -func closeBuildContext(c *check.C, ctx *FakeContext) func() { - return func() { - if err := ctx.Close(); err != nil { - c.Fatal(err) - } - } -} - +// Deprecated: use trustedcmd func trustedBuild(cmd *icmd.Cmd) func() { trustedCmd(cmd) return nil } -type gitServer interface { - URL() string - Close() error -} - -type localGitServer struct { - *httptest.Server -} - -func (r *localGitServer) Close() error { - r.Server.Close() - return nil -} - -func (r *localGitServer) URL() string { - return r.Server.URL -} - -type fakeGit struct { - root string - server gitServer - RepoURL string -} - -func (g *fakeGit) Close() { - g.server.Close() - os.RemoveAll(g.root) -} - -func newFakeGit(c *check.C, name string, files map[string]string, enforceLocalServer bool) *fakeGit { - ctx := fakeContextWithFiles(c, files) - defer ctx.Close() - curdir, err := os.Getwd() - if err != nil { - c.Fatal(err) - } - defer os.Chdir(curdir) - - if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { - c.Fatalf("error trying to init repo: %s (%s)", err, output) - } - err = os.Chdir(ctx.Dir) - if err != nil { - c.Fatal(err) - } - if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { - c.Fatalf("error trying to set 'user.name': %s (%s)", err, output) - } - if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { - c.Fatalf("error trying to set 'user.email': %s (%s)", err, output) - } - if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { - c.Fatalf("error trying to add files to repo: %s (%s)", err, output) - } - if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { - c.Fatalf("error trying to commit to repo: %s (%s)", err, output) - } - - root, err := ioutil.TempDir("", "docker-test-git-repo") - if err != nil { - c.Fatal(err) - } - repoPath := filepath.Join(root, name+".git") - if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { - os.RemoveAll(root) - c.Fatalf("error trying to clone --bare: %s (%s)", err, output) - } - err = os.Chdir(repoPath) - if err != nil { - os.RemoveAll(root) - c.Fatal(err) - } - if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { - os.RemoveAll(root) - c.Fatalf("error trying to git update-server-info: %s (%s)", err, output) - } - err = os.Chdir(curdir) - if err != nil { - os.RemoveAll(root) - c.Fatal(err) - } - - var server gitServer - if !enforceLocalServer { - // use fakeStorage server, which might be local or remote (at test daemon) - server = fakeStorageWithContext(c, fakeContextFromDir(root)) - } else { - // always start a local http server on CLI test machine - httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) - server = &localGitServer{httpServer} - } - return &fakeGit{ - root: root, - server: server, - RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), - } -} - // Write `content` to the file at path `dst`, creating it if necessary, // as well as any missing directories. // The file is truncated if it already exists. @@ -740,38 +356,21 @@ func createTmpFile(c *check.C, content string) string { return filename } -func waitForContainer(contID string, args ...string) error { - args = append([]string{dockerBinary, "run", "--name", contID}, args...) - result := icmd.RunCmd(icmd.Cmd{Command: args}) - if result.Error != nil { - return result.Error - } - return waitRun(contID) -} - -// waitRestart will wait for the specified container to restart once -func waitRestart(contID string, duration time.Duration) error { - return waitInspect(contID, "{{.RestartCount}}", "1", duration) -} - // waitRun will wait for the specified container to be running, maximum 5 seconds. +// Deprecated: use cli.WaitFor func waitRun(contID string) error { return waitInspect(contID, "{{.State.Running}}", "true", 5*time.Second) } -// waitExited will wait for the specified container to state exit, subject -// to a maximum time limit in seconds supplied by the caller -func waitExited(contID string, duration time.Duration) error { - return waitInspect(contID, "{{.State.Status}}", "exited", duration) -} - // waitInspect will wait for the specified container to have the specified string // in the inspect output. It will wait until the specified timeout (in seconds) // is reached. +// Deprecated: use cli.WaitFor func waitInspect(name, expr, expected string, timeout time.Duration) error { return waitInspectWithArgs(name, expr, expected, timeout) } +// Deprecated: use cli.WaitFor func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { return daemon.WaitInspectWithArgs(dockerBinary, name, expr, expected, timeout, arg...) } @@ -786,18 +385,18 @@ func getInspectBody(c *check.C, version, id string) []byte { // Run a long running idle task in a background container using the // system-specific default image and command. -func runSleepingContainer(c *check.C, extraArgs ...string) (string, int) { +func runSleepingContainer(c *check.C, extraArgs ...string) string { return runSleepingContainerInImage(c, defaultSleepImage, extraArgs...) } // Run a long running idle task in a background container using the specified // image and the system-specific command. -func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) (string, int) { +func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) string { args := []string{"run", "-d"} args = append(args, extraArgs...) args = append(args, image) args = append(args, sleepCommandForDaemonPlatform()...) - return dockerCmd(c, args...) + return cli.DockerCmd(c, args...).Combined() } // minimalBaseImage returns the name of the minimal base image for the current diff --git a/fn/vendor/github.com/docker/docker/integration-cli/environment/clean.go b/fn/vendor/github.com/docker/docker/integration-cli/environment/clean.go index b27838337..809baa7b5 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/environment/clean.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/environment/clean.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "regexp" "strings" "github.com/docker/docker/api/types" @@ -50,14 +51,17 @@ func getPausedContainers(t testingT, dockerBinary string) []string { return strings.Fields(result.Combined()) } +var alreadyExists = regexp.MustCompile(`Error response from daemon: removal of container (\w+) is already in progress`) + func deleteAllContainers(t testingT, dockerBinary string) { containers := getAllContainers(t, dockerBinary) if len(containers) > 0 { result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, containers...)...) if result.Error != nil { // If the error is "No such container: ..." this means the container doesn't exists anymore, - // we can safely ignore that one. - if strings.Contains(result.Stderr(), "No such container") { + // or if it is "... removal of container ... is already in progress" it will be removed eventually. + // We can safely ignore those. + if strings.Contains(result.Stderr(), "No such container") || alreadyExists.MatchString(result.Stderr()) { return } t.Fatalf("error removing containers %v : %v (%s)", containers, result.Error, result.Combined()) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/environment/environment.go b/fn/vendor/github.com/docker/docker/integration-cli/environment/environment.go index 7d2d04450..a8a104590 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/environment/environment.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/environment/environment.go @@ -16,11 +16,19 @@ import ( "golang.org/x/net/context" ) -const ( - // DefaultDockerBinary is the name of the docker binary - DefaultDockerBinary = "docker" +var ( + // DefaultClientBinary is the name of the docker binary + DefaultClientBinary = os.Getenv("TEST_CLIENT_BINARY") ) +func init() { + if DefaultClientBinary == "" { + // TODO: to be removed once we no longer depend on the docker cli for integration tests + //panic("TEST_CLIENT_BINARY must be set") + DefaultClientBinary = "docker" + } +} + // Execution holds informations about the test execution environment. type Execution struct { daemonPlatform string @@ -99,11 +107,7 @@ func New() (*Execution, error) { } } - var dockerBinary = DefaultDockerBinary - if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { - dockerBinary = dockerBin - } - dockerBinary, err = exec.LookPath(dockerBinary) + dockerBinary, err := exec.LookPath(DefaultClientBinary) if err != nil { return nil, err } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/fixtures/plugin/basic/basic.go b/fn/vendor/github.com/docker/docker/integration-cli/fixtures/plugin/basic/basic.go new file mode 100644 index 000000000..892272826 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/integration-cli/fixtures/plugin/basic/basic.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + "net" + "net/http" + "os" + "path/filepath" +) + +func main() { + p, err := filepath.Abs(filepath.Join("run", "docker", "plugins")) + if err != nil { + panic(err) + } + if err := os.MkdirAll(p, 0755); err != nil { + panic(err) + } + l, err := net.Listen("unix", filepath.Join(p, "basic.sock")) + if err != nil { + panic(err) + } + + mux := http.NewServeMux() + server := http.Server{ + Addr: l.Addr().String(), + Handler: http.NewServeMux(), + } + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1.1+json") + fmt.Println(w, `{"Implements": ["dummy"]}`) + }) + server.Serve(l) +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/fixtures/plugin/plugin.go b/fn/vendor/github.com/docker/docker/integration-cli/fixtures/plugin/plugin.go new file mode 100644 index 000000000..1be616973 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/integration-cli/fixtures/plugin/plugin.go @@ -0,0 +1,183 @@ +package plugin + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/plugin" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// CreateOpt is is passed used to change the defualt plugin config before +// creating it +type CreateOpt func(*Config) + +// Config wraps types.PluginConfig to provide some extra state for options +// extra customizations on the plugin details, such as using a custom binary to +// create the plugin with. +type Config struct { + *types.PluginConfig + binPath string +} + +// WithBinary is a CreateOpt to set an custom binary to create the plugin with. +// This binary must be statically compiled. +func WithBinary(bin string) CreateOpt { + return func(cfg *Config) { + cfg.binPath = bin + } +} + +// CreateClient is the interface used for `BuildPlugin` to interact with the +// daemon. +type CreateClient interface { + PluginCreate(context.Context, io.Reader, types.PluginCreateOptions) error +} + +// Create creates a new plugin with the specified name +func Create(ctx context.Context, c CreateClient, name string, opts ...CreateOpt) error { + tmpDir, err := ioutil.TempDir("", "create-test-plugin") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + tar, err := makePluginBundle(tmpDir, opts...) + if err != nil { + return err + } + defer tar.Close() + + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + return c.PluginCreate(ctx, tar, types.PluginCreateOptions{RepoName: name}) +} + +// TODO(@cpuguy83): we really shouldn't have to do this... +// The manager panics on init when `Executor` is not set. +type dummyExecutor struct{} + +func (dummyExecutor) Client(libcontainerd.Backend) (libcontainerd.Client, error) { return nil, nil } +func (dummyExecutor) Cleanup() {} +func (dummyExecutor) UpdateOptions(...libcontainerd.RemoteOption) error { return nil } + +// CreateInRegistry makes a plugin (locally) and pushes it to a registry. +// This does not use a dockerd instance to create or push the plugin. +// If you just want to create a plugin in some daemon, use `Create`. +// +// This can be useful when testing plugins on swarm where you don't really want +// the plugin to exist on any of the daemons (immediately) and there needs to be +// some way to distribute the plugin. +func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, opts ...CreateOpt) error { + tmpDir, err := ioutil.TempDir("", "create-test-plugin-local") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + inPath := filepath.Join(tmpDir, "plugin") + if err := os.MkdirAll(inPath, 0755); err != nil { + return errors.Wrap(err, "error creating plugin root") + } + + tar, err := makePluginBundle(inPath, opts...) + if err != nil { + return err + } + defer tar.Close() + + managerConfig := plugin.ManagerConfig{ + Store: plugin.NewStore(), + RegistryService: registry.NewService(registry.ServiceOptions{V2Only: true}), + Root: filepath.Join(tmpDir, "root"), + ExecRoot: "/run/docker", // manager init fails if not set + Executor: dummyExecutor{}, + LogPluginEvent: func(id, name, action string) {}, // panics when not set + } + manager, err := plugin.NewManager(managerConfig) + if err != nil { + return errors.Wrap(err, "error creating plugin manager") + } + + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + if err := manager.CreateFromContext(ctx, tar, &types.PluginCreateOptions{RepoName: repo}); err != nil { + return err + } + + if auth == nil { + auth = &types.AuthConfig{} + } + err = manager.Push(ctx, repo, nil, auth, ioutil.Discard) + return errors.Wrap(err, "error pushing plugin") +} + +func makePluginBundle(inPath string, opts ...CreateOpt) (io.ReadCloser, error) { + p := &types.PluginConfig{ + Interface: types.PluginConfigInterface{ + Socket: "basic.sock", + Types: []types.PluginInterfaceType{{Capability: "docker.dummy/1.0"}}, + }, + Entrypoint: []string{"/basic"}, + } + cfg := &Config{ + PluginConfig: p, + } + for _, o := range opts { + o(cfg) + } + if cfg.binPath == "" { + binPath, err := ensureBasicPluginBin() + if err != nil { + return nil, err + } + cfg.binPath = binPath + } + + configJSON, err := json.Marshal(p) + if err != nil { + return nil, err + } + if err := ioutil.WriteFile(filepath.Join(inPath, "config.json"), configJSON, 0644); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(p.Entrypoint[0])), 0755); err != nil { + return nil, errors.Wrap(err, "error creating plugin rootfs dir") + } + if err := archive.NewDefaultArchiver().CopyFileWithTar(cfg.binPath, filepath.Join(inPath, "rootfs", p.Entrypoint[0])); err != nil { + return nil, errors.Wrap(err, "error copying plugin binary to rootfs path") + } + tar, err := archive.Tar(inPath, archive.Uncompressed) + return tar, errors.Wrap(err, "error making plugin archive") +} + +func ensureBasicPluginBin() (string, error) { + name := "docker-basic-plugin" + p, err := exec.LookPath(name) + if err == nil { + return p, nil + } + + goBin, err := exec.LookPath("go") + if err != nil { + return "", err + } + installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name) + cmd := exec.Command(goBin, "build", "-o", installPath, "./"+filepath.Join("fixtures", "plugin", "basic")) + cmd.Env = append(cmd.Env, "CGO_ENABLED=0") + if out, err := cmd.CombinedOutput(); err != nil { + return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out)) + } + return installPath, nil +} diff --git a/fn/vendor/github.com/docker/docker/integration-cli/fixtures_linux_daemon_test.go b/fn/vendor/github.com/docker/docker/integration-cli/fixtures_linux_daemon_test.go index 496851470..895f976a1 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/fixtures_linux_daemon_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/fixtures_linux_daemon_test.go @@ -60,7 +60,7 @@ func ensureSyscallTest(c *check.C) { gcc, err := exec.LookPath("gcc") c.Assert(err, checker.IsNil, check.Commentf("could not find gcc")) - tests := []string{"userns", "ns", "acct", "setuid", "setgid", "socket", "raw", "appletalk"} + tests := []string{"userns", "ns", "acct", "setuid", "setgid", "socket", "raw"} for _, test := range tests { out, err := exec.Command(gcc, "-g", "-Wall", "-static", fmt.Sprintf("../contrib/syscall-test/%s.c", test), "-o", fmt.Sprintf("%s/%s-test", tmp, test)).CombinedOutput() c.Assert(err, checker.IsNil, check.Commentf(string(out))) diff --git a/fn/vendor/github.com/docker/docker/integration-cli/request/request.go b/fn/vendor/github.com/docker/docker/integration-cli/request/request.go index cb0e39953..6f2bf650d 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/request/request.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/request/request.go @@ -17,6 +17,7 @@ import ( "strings" "time" + "github.com/docker/docker/api" dclient "github.com/docker/docker/client" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/ioutils" @@ -100,7 +101,7 @@ func DoOnHost(host, endpoint string, modifiers ...func(*http.Request) error) (*h if err != nil { return nil, nil, err } - client, err := NewClient(host) + client, err := NewHTTPClient(host) if err != nil { return nil, nil, err } @@ -140,8 +141,8 @@ func New(host, endpoint string, modifiers ...func(*http.Request) error) (*http.R return req, nil } -// NewClient creates an http client for the specific host -func NewClient(host string) (*http.Client, error) { +// NewHTTPClient creates an http client for the specific host +func NewHTTPClient(host string) (*http.Client, error) { // FIXME(vdemeester) 10*time.Second timeout of SockRequest… ? proto, addr, _, err := dclient.ParseHost(host) if err != nil { @@ -163,6 +164,16 @@ func NewClient(host string) (*http.Client, error) { }, err } +// NewClient returns a new Docker API client +func NewClient() (dclient.APIClient, error) { + host := DaemonHost() + httpClient, err := NewHTTPClient(host) + if err != nil { + return nil, err + } + return dclient.NewClient(host, api.DefaultVersion, httpClient, nil) +} + // FIXME(vdemeester) httputil.ClientConn is deprecated, use http.Client instead (closer to actual client) // Deprecated: Use New instead of NewRequestClient // Deprecated: use request.Do (or Get, Delete, Post) instead @@ -217,13 +228,14 @@ func SockRequestRaw(method, endpoint string, data io.Reader, ct, daemon string, } resp, err := client.Do(req) + if err != nil { + client.Close() + return resp, nil, err + } body := ioutils.NewReadCloserWrapper(resp.Body, func() error { defer resp.Body.Close() return client.Close() }) - if err != nil { - client.Close() - } return resp, body, err } diff --git a/fn/vendor/github.com/docker/docker/integration-cli/trust_server_test.go b/fn/vendor/github.com/docker/docker/integration-cli/trust_server_test.go index ced1f43f5..e3f0674cf 100644 --- a/fn/vendor/github.com/docker/docker/integration-cli/trust_server_test.go +++ b/fn/vendor/github.com/docker/docker/integration-cli/trust_server_test.go @@ -13,6 +13,7 @@ import ( cliconfig "github.com/docker/docker/cli/config" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/docker/go-connections/tlsconfig" "github.com/go-check/check" @@ -47,14 +48,6 @@ var SuccessDownloaded = icmd.Expected{ Out: "Status: Downloaded", } -var SuccessTaggingOnStderr = icmd.Expected{ - Err: "Tagging", -} - -var SuccessSigningAndPushingOnStderr = icmd.Expected{ - Err: "Signing and pushing trust metadata", -} - var SuccessDownloadedOnStderr = icmd.Expected{ Err: "Status: Downloaded", } @@ -190,27 +183,24 @@ func (t *testNotary) Close() { os.RemoveAll(t.dir) } -// Deprecated: used trustedCmd instead -func trustedExecCmd(cmd *exec.Cmd) { +func trustedCmd(cmd *icmd.Cmd) func() { pwd := "12345678" cmd.Env = append(cmd.Env, trustEnv(notaryURL, pwd, pwd)...) + return nil } -func trustedCmd(cmd *icmd.Cmd) { - pwd := "12345678" - cmd.Env = append(cmd.Env, trustEnv(notaryURL, pwd, pwd)...) -} - -func trustedCmdWithServer(server string) func(*icmd.Cmd) { - return func(cmd *icmd.Cmd) { +func trustedCmdWithServer(server string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { pwd := "12345678" cmd.Env = append(cmd.Env, trustEnv(server, pwd, pwd)...) + return nil } } -func trustedCmdWithPassphrases(rootPwd, repositoryPwd string) func(*icmd.Cmd) { - return func(cmd *icmd.Cmd) { +func trustedCmdWithPassphrases(rootPwd, repositoryPwd string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { cmd.Env = append(cmd.Env, trustEnv(notaryURL, rootPwd, repositoryPwd)...) + return nil } } @@ -227,28 +217,18 @@ func trustEnv(server, rootPwd, repositoryPwd string) []string { func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - icmd.RunCmd(icmd.Command(dockerBinary, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - if out, status := dockerCmd(c, "rmi", repoName); status != 0 { - c.Fatalf("Error removing image %q\n%s", repoName, out) - } - + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) return repoName } func (s *DockerTrustSuite) setupTrustedplugin(c *check.C, source, name string) string { repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) // tag the image and upload it to the private registry - dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", repoName, source) - - icmd.RunCmd(icmd.Command(dockerBinary, "plugin", "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - if out, status := dockerCmd(c, "plugin", "rm", "-f", repoName); status != 0 { - c.Fatalf("Error removing plugin %q\n%s", repoName, out) - } - + cli.DockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", repoName, source) + cli.Docker(cli.Args("plugin", "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "plugin", "rm", "-f", repoName) return repoName } diff --git a/fn/vendor/github.com/docker/docker/keys/launchpad-ppa-zfs.asc b/fn/vendor/github.com/docker/docker/keys/launchpad-ppa-zfs.asc deleted file mode 100644 index 1c5b4deb6..000000000 --- a/fn/vendor/github.com/docker/docker/keys/launchpad-ppa-zfs.asc +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mI0ETjjRQwEEAN1t7LdXiXEDucAXemaXZphLeDSmUE2gHxj/b+Gqt1wRaCMAE1NU -rLOqTDNq8XPi4ZSp8Rr8R8jVupmKlt446ESGOadUO0AAjFyYe+YwZ65uYa69536k -T+PhcFepWm8YgJL1skn0u+qpHzMJLvLB6iyAP8fP5C19wjiY8TtpSEtLABEBAAG0 -JkxhdW5jaHBhZCBQUEEgZm9yIE5hdGl2ZSBaRlMgZm9yIExpbnV4iLgEEwECACIF -Ak440UMCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEBGWuoH2sPxh32cD -/2uniH9nyAKYI3/6X29pmRXcsuf1J+ZYqEnUIWT41ZBvNJHkbMiSgNC0lUvW4miq -LgHZrft2X3D1fUP6djnueTnFG/Rs/uVRCMU32YjmxW92nZc6StfNt35LT7CUd9xV -/6e3h5klln/xUsimOm9BcHglUXF7n8U39qw9JGV2sheo -=qkiU ------END PGP PUBLIC KEY BLOCK----- diff --git a/fn/vendor/github.com/docker/docker/layer/empty.go b/fn/vendor/github.com/docker/docker/layer/empty.go index 3b6ffc82f..cf04aa12f 100644 --- a/fn/vendor/github.com/docker/docker/layer/empty.go +++ b/fn/vendor/github.com/docker/docker/layer/empty.go @@ -54,3 +54,12 @@ func (el *emptyLayer) DiffSize() (size int64, err error) { func (el *emptyLayer) Metadata() (map[string]string, error) { return make(map[string]string), nil } + +func (el *emptyLayer) Platform() Platform { + return "" +} + +// IsEmpty returns true if the layer is an EmptyLayer +func IsEmpty(diffID DiffID) bool { + return diffID == DigestSHA256EmptyTar +} diff --git a/fn/vendor/github.com/docker/docker/layer/filestore.go b/fn/vendor/github.com/docker/docker/layer/filestore.go index 7ea418cd5..533f45481 100644 --- a/fn/vendor/github.com/docker/docker/layer/filestore.go +++ b/fn/vendor/github.com/docker/docker/layer/filestore.go @@ -226,6 +226,7 @@ func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, erro } f, err := gzip.NewReader(fz) if err != nil { + fz.Close() return nil, err } diff --git a/fn/vendor/github.com/docker/docker/layer/filestore_unix.go b/fn/vendor/github.com/docker/docker/layer/filestore_unix.go new file mode 100644 index 000000000..fe8a4f8b2 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/layer/filestore_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package layer + +// SetPlatform writes the "platform" file to the layer filestore +func (fm *fileMetadataTransaction) SetPlatform(platform Platform) error { + return nil +} + +// GetPlatform reads the "platform" file from the layer filestore +func (fms *fileMetadataStore) GetPlatform(layer ChainID) (Platform, error) { + return "", nil +} diff --git a/fn/vendor/github.com/docker/docker/layer/filestore_windows.go b/fn/vendor/github.com/docker/docker/layer/filestore_windows.go new file mode 100644 index 000000000..066456d8d --- /dev/null +++ b/fn/vendor/github.com/docker/docker/layer/filestore_windows.go @@ -0,0 +1,35 @@ +package layer + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +// SetPlatform writes the "platform" file to the layer filestore +func (fm *fileMetadataTransaction) SetPlatform(platform Platform) error { + if platform == "" { + return nil + } + return fm.ws.WriteFile("platform", []byte(platform), 0644) +} + +// GetPlatform reads the "platform" file from the layer filestore +func (fms *fileMetadataStore) GetPlatform(layer ChainID) (Platform, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "platform")) + if err != nil { + // For backwards compatibility, the platform file may not exist. Default to "windows" if missing. + if os.IsNotExist(err) { + return "windows", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if content != "windows" && content != "linux" { + return "", fmt.Errorf("invalid platform value: %s", content) + } + + return Platform(content), nil +} diff --git a/fn/vendor/github.com/docker/docker/layer/layer.go b/fn/vendor/github.com/docker/docker/layer/layer.go index 7b993ee4a..b3480a0cc 100644 --- a/fn/vendor/github.com/docker/docker/layer/layer.go +++ b/fn/vendor/github.com/docker/docker/layer/layer.go @@ -64,6 +64,14 @@ func (id ChainID) String() string { return string(id) } +// Platform is the platform of a layer +type Platform string + +// String returns a string rendition of layers target platform +func (id Platform) String() string { + return string(id) +} + // DiffID is the hash of an individual layer tar. type DiffID digest.Digest @@ -99,6 +107,9 @@ type Layer interface { // Parent returns the next layer in the layer chain. Parent() Layer + // Platform returns the platform of the layer + Platform() Platform + // Size returns the size of the entire layer chain. The size // is calculated from the total size of all files in the layers. Size() (int64, error) @@ -179,7 +190,7 @@ type CreateRWLayerOpts struct { // Store represents a backend for managing both // read-only and read-write layers. type Store interface { - Register(io.Reader, ChainID) (Layer, error) + Register(io.Reader, ChainID, Platform) (Layer, error) Get(ChainID) (Layer, error) Map() map[ChainID]Layer Release(Layer) ([]Metadata, error) @@ -197,7 +208,7 @@ type Store interface { // DescribableStore represents a layer store capable of storing // descriptors for layers. type DescribableStore interface { - RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) + RegisterWithDescriptor(io.Reader, ChainID, Platform, distribution.Descriptor) (Layer, error) } // MetadataTransaction represents functions for setting layer metadata @@ -208,6 +219,7 @@ type MetadataTransaction interface { SetDiffID(DiffID) error SetCacheID(string) error SetDescriptor(distribution.Descriptor) error + SetPlatform(Platform) error TarSplitWriter(compressInput bool) (io.WriteCloser, error) Commit(ChainID) error @@ -228,6 +240,7 @@ type MetadataStore interface { GetDiffID(ChainID) (DiffID, error) GetCacheID(ChainID) (string, error) GetDescriptor(ChainID) (distribution.Descriptor, error) + GetPlatform(ChainID) (Platform, error) TarSplitReader(ChainID) (io.ReadCloser, error) SetMountID(string, string) error diff --git a/fn/vendor/github.com/docker/docker/layer/layer_store.go b/fn/vendor/github.com/docker/docker/layer/layer_store.go index 5caa5d41f..75ac1e4f4 100644 --- a/fn/vendor/github.com/docker/docker/layer/layer_store.go +++ b/fn/vendor/github.com/docker/docker/layer/layer_store.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "io/ioutil" + "strings" "sync" "github.com/Sirupsen/logrus" @@ -13,6 +14,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" "github.com/opencontainers/go-digest" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" @@ -36,6 +38,8 @@ type layerStore struct { mountL sync.Mutex useTarSplit bool + + platform string } // StoreOptions are the options used to create a new Store instance @@ -44,10 +48,10 @@ type StoreOptions struct { MetadataStorePathTemplate string GraphDriver string GraphDriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap + IDMappings *idtools.IDMappings PluginGetter plugingetter.PluginGetter ExperimentalEnabled bool + Platform string } // NewStoreFromOptions creates a new Store instance @@ -55,8 +59,8 @@ func NewStoreFromOptions(options StoreOptions) (Store, error) { driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ Root: options.StorePath, DriverOptions: options.GraphDriverOptions, - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, + UIDMaps: options.IDMappings.UIDs(), + GIDMaps: options.IDMappings.GIDs(), ExperimentalEnabled: options.ExperimentalEnabled, }) if err != nil { @@ -69,13 +73,13 @@ func NewStoreFromOptions(options StoreOptions) (Store, error) { return nil, err } - return NewStoreFromGraphDriver(fms, driver) + return NewStoreFromGraphDriver(fms, driver, options.Platform) } // NewStoreFromGraphDriver creates a new Store instance using the provided // metadata store and graph driver. The metadata store will be used to restore // the Store. -func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { +func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, platform string) (Store, error) { caps := graphdriver.Capabilities{} if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { caps = capDriver.Capabilities() @@ -87,6 +91,7 @@ func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (St layerMap: map[ChainID]*roLayer{}, mounts: map[string]*mountedLayer{}, useTarSplit: !caps.ReproducesExactDiffs, + platform: platform, } ids, mounts, err := store.List() @@ -145,6 +150,11 @@ func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) } + platform, err := ls.store.GetPlatform(layer) + if err != nil { + return nil, fmt.Errorf("failed to get platform for %s: %s", layer, err) + } + cl = &roLayer{ chainID: layer, diffID: diff, @@ -153,6 +163,7 @@ func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { layerStore: ls, references: map[Layer]struct{}{}, descriptor: descriptor, + platform: platform, } if parent != "" { @@ -248,17 +259,25 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri return nil } -func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { - return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) +func (ls *layerStore) Register(ts io.Reader, parent ChainID, platform Platform) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, platform, distribution.Descriptor{}) } -func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { +func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, platform Platform, descriptor distribution.Descriptor) (Layer, error) { // err is used to hold the error which will always trigger // cleanup of creates sources but may not be an error returned // to the caller (already exists). var err error var pid string var p *roLayer + + // Integrity check - ensure we are creating something for the correct platform + if system.LCOWSupported() { + if strings.ToLower(ls.platform) != strings.ToLower(string(platform)) { + return nil, fmt.Errorf("cannot create entry for platform %q in layer store for platform %q", platform, ls.platform) + } + } + if string(parent) != "" { p = ls.get(parent) if p == nil { @@ -287,6 +306,7 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descr layerStore: ls, references: map[Layer]struct{}{}, descriptor: descriptor, + platform: platform, } if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { @@ -389,7 +409,6 @@ func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { if err != nil { return err } - err = ls.store.Remove(layer.chainID) if err != nil { return err @@ -519,7 +538,6 @@ func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWL if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { return nil, err } - if err = ls.saveMount(m); err != nil { return nil, err } diff --git a/fn/vendor/github.com/docker/docker/layer/layer_store_windows.go b/fn/vendor/github.com/docker/docker/layer/layer_store_windows.go index 1276a912c..ccbf6dd52 100644 --- a/fn/vendor/github.com/docker/docker/layer/layer_store_windows.go +++ b/fn/vendor/github.com/docker/docker/layer/layer_store_windows.go @@ -6,6 +6,6 @@ import ( "github.com/docker/distribution" ) -func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { - return ls.registerWithDescriptor(ts, parent, descriptor) +func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, platform Platform, descriptor distribution.Descriptor) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, platform, descriptor) } diff --git a/fn/vendor/github.com/docker/docker/layer/layer_test.go b/fn/vendor/github.com/docker/docker/layer/layer_test.go index 5c4da4f9a..8ec5b4df5 100644 --- a/fn/vendor/github.com/docker/docker/layer/layer_test.go +++ b/fn/vendor/github.com/docker/docker/layer/layer_test.go @@ -20,7 +20,8 @@ import ( func init() { graphdriver.ApplyUncompressedLayer = archive.UnpackLayer - vfs.CopyWithTar = archive.CopyWithTar + defaultArchiver := archive.NewDefaultArchiver() + vfs.CopyWithTar = defaultArchiver.CopyWithTar } func newVFSGraphDriver(td string) (graphdriver.Driver, error) { @@ -70,7 +71,7 @@ func newTestStore(t *testing.T) (Store, string, func()) { if err != nil { t.Fatal(err) } - ls, err := NewStoreFromGraphDriver(fms, graph) + ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) if err != nil { t.Fatal(err) } @@ -105,7 +106,7 @@ func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { } defer ts.Close() - layer, err := ls.Register(ts, parent) + layer, err := ls.Register(ts, parent, Platform(runtime.GOOS)) if err != nil { return nil, err } @@ -403,7 +404,7 @@ func TestStoreRestore(t *testing.T) { t.Fatal(err) } - ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver) + ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver, runtime.GOOS) if err != nil { t.Fatal(err) } @@ -498,7 +499,7 @@ func TestTarStreamStability(t *testing.T) { t.Fatal(err) } - layer1, err := ls.Register(bytes.NewReader(tar1), "") + layer1, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } @@ -517,7 +518,7 @@ func TestTarStreamStability(t *testing.T) { t.Fatal(err) } - layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID()) + layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID(), Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } @@ -685,12 +686,12 @@ func TestRegisterExistingLayer(t *testing.T) { t.Fatal(err) } - layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) + layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } - layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) + layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } @@ -725,12 +726,12 @@ func TestTarStreamVerification(t *testing.T) { t.Fatal(err) } - layer1, err := ls.Register(bytes.NewReader(tar1), "") + layer1, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } - layer2, err := ls.Register(bytes.NewReader(tar2), "") + layer2, err := ls.Register(bytes.NewReader(tar2), "", Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } diff --git a/fn/vendor/github.com/docker/docker/layer/migration_test.go b/fn/vendor/github.com/docker/docker/layer/migration_test.go index fe3b40c20..7364e6cdc 100644 --- a/fn/vendor/github.com/docker/docker/layer/migration_test.go +++ b/fn/vendor/github.com/docker/docker/layer/migration_test.go @@ -94,7 +94,7 @@ func TestLayerMigration(t *testing.T) { if err != nil { t.Fatal(err) } - ls, err := NewStoreFromGraphDriver(fms, graph) + ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) if err != nil { t.Fatal(err) } @@ -110,14 +110,14 @@ func TestLayerMigration(t *testing.T) { t.Fatal(err) } - layer1b, err := ls.Register(bytes.NewReader(tar1), "") + layer1b, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } assertReferences(t, layer1a, layer1b) // Attempt register, should be same - layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } @@ -222,7 +222,7 @@ func TestLayerMigrationNoTarsplit(t *testing.T) { if err != nil { t.Fatal(err) } - ls, err := NewStoreFromGraphDriver(fms, graph) + ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) if err != nil { t.Fatal(err) } @@ -238,7 +238,7 @@ func TestLayerMigrationNoTarsplit(t *testing.T) { t.Fatal(err) } - layer1b, err := ls.Register(bytes.NewReader(tar1), "") + layer1b, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } @@ -246,7 +246,7 @@ func TestLayerMigrationNoTarsplit(t *testing.T) { assertReferences(t, layer1a, layer1b) // Attempt register, should be same - layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), Platform(runtime.GOOS)) if err != nil { t.Fatal(err) } diff --git a/fn/vendor/github.com/docker/docker/layer/ro_layer.go b/fn/vendor/github.com/docker/docker/layer/ro_layer.go index 8b4cf8f0d..e03d78b4d 100644 --- a/fn/vendor/github.com/docker/docker/layer/ro_layer.go +++ b/fn/vendor/github.com/docker/docker/layer/ro_layer.go @@ -16,6 +16,7 @@ type roLayer struct { size int64 layerStore *layerStore descriptor distribution.Descriptor + platform Platform referenceCount int references map[Layer]struct{} @@ -142,6 +143,9 @@ func storeLayer(tx MetadataTransaction, layer *roLayer) error { return err } } + if err := tx.SetPlatform(layer.platform); err != nil { + return err + } return nil } diff --git a/fn/vendor/github.com/docker/docker/layer/ro_layer_unix.go b/fn/vendor/github.com/docker/docker/layer/ro_layer_unix.go new file mode 100644 index 000000000..1b36856f9 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/layer/ro_layer_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package layer + +func (rl *roLayer) Platform() Platform { + return "" +} diff --git a/fn/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/fn/vendor/github.com/docker/docker/layer/ro_layer_windows.go index 32bd7182a..6679bdfe8 100644 --- a/fn/vendor/github.com/docker/docker/layer/ro_layer_windows.go +++ b/fn/vendor/github.com/docker/docker/layer/ro_layer_windows.go @@ -7,3 +7,10 @@ var _ distribution.Describable = &roLayer{} func (rl *roLayer) Descriptor() distribution.Descriptor { return rl.descriptor } + +func (rl *roLayer) Platform() Platform { + if rl.platform == "" { + return "windows" + } + return rl.platform +} diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/client_linux.go b/fn/vendor/github.com/docker/docker/libcontainerd/client_linux.go index 93367a479..a6986b520 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/client_linux.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/client_linux.go @@ -9,7 +9,8 @@ import ( "time" "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" + containerd_runtime_types "github.com/containerd/containerd/runtime" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/mount" "github.com/golang/protobuf/ptypes" @@ -74,7 +75,10 @@ func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendly } } if specp.Capabilities != nil { - sp.Capabilities = specp.Capabilities + sp.Capabilities.Bounding = specp.Capabilities + sp.Capabilities.Effective = specp.Capabilities + sp.Capabilities.Inheritable = specp.Capabilities + sp.Capabilities.Permitted = specp.Capabilities } p := container.newProcess(processFriendlyName) @@ -94,7 +98,7 @@ func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendly Stdin: p.fifo(syscall.Stdin), Stdout: p.fifo(syscall.Stdout), Stderr: p.fifo(syscall.Stderr), - Capabilities: sp.Capabilities, + Capabilities: sp.Capabilities.Effective, ApparmorProfile: sp.ApparmorProfile, SelinuxLabel: sp.SelinuxLabel, NoNewPrivileges: sp.NoNewPrivileges, @@ -464,7 +468,7 @@ func (clnt *client) Restore(containerID string, attachStdio StdioCallback, optio cont, err := clnt.getContainerdContainer(containerID) // Get its last event ev, eerr := clnt.getContainerLastEvent(containerID) - if err != nil || cont.Status == "Stopped" { + if err != nil || containerd_runtime_types.State(cont.Status) == containerd_runtime_types.Stopped { if err != nil { logrus.Warnf("libcontainerd: failed to retrieve container %s state: %v", containerID, err) } diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/client_unix.go b/fn/vendor/github.com/docker/docker/libcontainerd/client_unix.go index 21e8fea66..6dbf3af06 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/client_unix.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/client_unix.go @@ -11,7 +11,7 @@ import ( "sync" "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" "github.com/docker/docker/pkg/idtools" specs "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/net/context" @@ -34,7 +34,7 @@ func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { } if os.IsNotExist(err) || fi.Mode()&1 == 0 { p = fmt.Sprintf("%s.%d.%d", p, uid, gid) - if err := idtools.MkdirAs(p, 0700, uid, gid); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAndChown(p, 0700, idtools.IDPair{uid, gid}); err != nil && !os.IsExist(err) { return "", err } } @@ -71,7 +71,7 @@ func (clnt *client) Create(containerID string, checkpoint string, checkpointDir } }() - if err := idtools.MkdirAllAs(container.dir, 0700, uid, gid); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAndChown(container.dir, 0700, idtools.IDPair{uid, gid}); err != nil && !os.IsExist(err) { return err } @@ -83,8 +83,7 @@ func (clnt *client) Create(containerID string, checkpoint string, checkpointDir if err := json.NewEncoder(f).Encode(spec); err != nil { return err } - - return container.start(checkpoint, checkpointDir, attachStdio) + return container.start(&spec, checkpoint, checkpointDir, attachStdio) } func (clnt *client) Signal(containerID string, sig int) error { diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/client_windows.go b/fn/vendor/github.com/docker/docker/libcontainerd/client_windows.go index 3d59e944f..455e8e5e6 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/client_windows.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/client_windows.go @@ -1,6 +1,7 @@ package libcontainerd import ( + "encoding/json" "errors" "fmt" "io" @@ -49,7 +50,6 @@ const defaultOwner = "docker" // | VolumePath | \\?\\Volume{GUIDa} | | // | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) | // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | -// | SandboxPath | | %root%\windowsfilter | // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | // +-----------------+--------------------------------------------+---------------------------------------------------+ // @@ -59,7 +59,6 @@ const defaultOwner = "docker" // "SystemType": "Container", // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", // "Owner": "docker", -// "IsDummy": false, // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", // "IgnoreFlushesDuringBoot": true, // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", @@ -80,7 +79,6 @@ const defaultOwner = "docker" // "SystemType": "Container", // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", // "Owner": "docker", -// "IsDummy": false, // "IgnoreFlushesDuringBoot": true, // "Layers": [{ // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", @@ -88,7 +86,6 @@ const defaultOwner = "docker" // }], // "HostName": "475c2c58933b", // "MappedDirectories": [], -// "SandboxPath": "C:\\\\control\\\\windowsfilter", // "HvPartition": true, // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], // "DNSSearchList": "a.com,b.com,c.com", @@ -100,8 +97,17 @@ const defaultOwner = "docker" func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { clnt.lock(containerID) defer clnt.unlock(containerID) - logrus.Debugln("libcontainerd: client.Create() with spec", spec) + if b, err := json.Marshal(spec); err == nil { + logrus.Debugln("libcontainerd: client.Create() with spec", string(b)) + } + osName := spec.Platform.OS + if osName == "windows" { + return clnt.createWindows(containerID, checkpoint, checkpointDir, spec, attachStdio, options...) + } + return clnt.createLinux(containerID, checkpoint, checkpointDir, spec, attachStdio, options...) +} +func (clnt *client) createWindows(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { configuration := &hcsshim.ContainerConfig{ SystemType: "Container", Name: containerID, @@ -128,8 +134,8 @@ func (clnt *client) Create(containerID string, checkpoint string, checkpointDir if spec.Windows.Resources.CPU.Shares != nil { configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) } - if spec.Windows.Resources.CPU.Percent != nil { - configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000 + if spec.Windows.Resources.CPU.Maximum != nil { + configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum) } } if spec.Windows.Resources.Memory != nil { @@ -159,7 +165,6 @@ func (clnt *client) Create(containerID string, checkpoint string, checkpointDir } if h, ok := option.(*HyperVIsolationOption); ok { configuration.HvPartition = h.IsHyperV - configuration.SandboxPath = h.SandboxPath continue } if l, ok := option.(*LayerOption); ok { @@ -270,17 +275,115 @@ func (clnt *client) Create(containerID string, checkpoint string, checkpointDir // Call start, and if it fails, delete the container from our // internal structure, start will keep HCS in sync by deleting the // container there. - logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID) + logrus.Debugf("libcontainerd: createWindows() id=%s, Calling start()", containerID) if err := container.start(attachStdio); err != nil { clnt.deleteContainer(containerID) return err } - logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID) + logrus.Debugf("libcontainerd: createWindows() id=%s completed successfully", containerID) return nil } +func (clnt *client) createLinux(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { + logrus.Debugf("libcontainerd: createLinux(): containerId %s ", containerID) + + // TODO @jhowardmsft LCOW Support: This needs to be configurable, not hard-coded. + // However, good-enough for the LCOW bring-up. + configuration := &hcsshim.ContainerConfig{ + HvPartition: true, + Name: containerID, + SystemType: "container", + ContainerType: "linux", + Owner: defaultOwner, + TerminateOnLastHandleClosed: true, + HvRuntime: &hcsshim.HvRuntime{ + ImagePath: `c:\Program Files\Linux Containers`, + LinuxKernelFile: `bootx64.efi`, + LinuxInitrdFile: `initrd.img`, + }, + } + + var layerOpt *LayerOption + for _, option := range options { + if l, ok := option.(*LayerOption); ok { + layerOpt = l + } + } + + // We must have a layer option with at least one path + if layerOpt == nil || layerOpt.LayerPaths == nil { + return fmt.Errorf("no layer option or paths were supplied to the runtime") + } + + // LayerFolderPath (writeable layer) + Layers (Guid + path) + configuration.LayerFolderPath = layerOpt.LayerFolderPath + for _, layerPath := range layerOpt.LayerPaths { + _, filename := filepath.Split(layerPath) + g, err := hcsshim.NameToGuid(filename) + if err != nil { + return err + } + configuration.Layers = append(configuration.Layers, hcsshim.Layer{ + ID: g.ToString(), + Path: filepath.Join(layerPath, "layer.vhd"), + }) + } + + for _, option := range options { + if n, ok := option.(*NetworkEndpointsOption); ok { + configuration.EndpointList = n.Endpoints + configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery + if n.DNSSearchList != nil { + configuration.DNSSearchList = strings.Join(n.DNSSearchList, ",") + } + configuration.NetworkSharedContainerName = n.NetworkSharedContainerID + break + } + } + + hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) + if err != nil { + return err + } + + // Construct a container object for calling start on it. + container := &container{ + containerCommon: containerCommon{ + process: process{ + processCommon: processCommon{ + containerID: containerID, + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + ociSpec: spec, + hcsContainer: hcsContainer, + } + + container.options = options + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: createLinux() %v", err) + } + } + + // Call start, and if it fails, delete the container from our + // internal structure, start will keep HCS in sync by deleting the + // container there. + logrus.Debugf("libcontainerd: createLinux() id=%s, Calling start()", containerID) + if err := container.start(attachStdio); err != nil { + clnt.deleteContainer(containerID) + return err + } + + logrus.Debugf("libcontainerd: createLinux() id=%s completed successfully", containerID) + return nil +} + // AddProcess is the handler for adding a process to an already running // container. It's called through docker exec. It returns the system pid of the // exec'd process. @@ -297,13 +400,15 @@ func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendly // create stdin, even if it's not used - it will be closed shortly. Stderr // is only created if it we're not -t. createProcessParms := hcsshim.ProcessConfig{ - EmulateConsole: procToAdd.Terminal, CreateStdInPipe: true, CreateStdOutPipe: true, CreateStdErrPipe: !procToAdd.Terminal, } - createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height) - createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width) + if procToAdd.Terminal { + createProcessParms.EmulateConsole = true + createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width) + } // Take working directory from the process to add if it is defined, // otherwise take from the first process. @@ -315,7 +420,11 @@ func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendly // Configure the environment for the process createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) - createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") + if container.ociSpec.Platform.OS == "windows" { + createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") + } else { + createProcessParms.CommandArgs = procToAdd.Args + } createProcessParms.User = procToAdd.User.Username logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine) diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/container_unix.go b/fn/vendor/github.com/docker/docker/libcontainerd/container_unix.go index 61bab145f..be1699943 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/container_unix.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/container_unix.go @@ -13,7 +13,7 @@ import ( "time" "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" "github.com/docker/docker/pkg/ioutils" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/tonistiigi/fifo" @@ -90,12 +90,7 @@ func (ctr *container) spec() (*specs.Spec, error) { return &spec, nil } -func (ctr *container) start(checkpoint string, checkpointDir string, attachStdio StdioCallback) (err error) { - spec, err := ctr.spec() - if err != nil { - return nil - } - +func (ctr *container) start(spec *specs.Spec, checkpoint, checkpointDir string, attachStdio StdioCallback) (err error) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ready := make(chan struct{}) @@ -172,6 +167,7 @@ func (ctr *container) start(checkpoint string, checkpointDir string, attachStdio State: StateStart, Pid: ctr.systemPid, }}) + } func (ctr *container) newProcess(friendlyName string) *process { diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/container_windows.go b/fn/vendor/github.com/docker/docker/libcontainerd/container_windows.go index 753d2f2f9..af3e0ef57 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/container_windows.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/container_windows.go @@ -1,6 +1,7 @@ package libcontainerd import ( + "encoding/json" "fmt" "io" "io/ioutil" @@ -10,6 +11,7 @@ import ( "github.com/Microsoft/hcsshim" "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -80,9 +82,23 @@ func (ctr *container) start(attachStdio StdioCallback) error { // Configure the environment for the process createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) - createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") + if ctr.ociSpec.Platform.OS == "windows" { + createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") + } else { + createProcessParms.CommandArgs = ctr.ociSpec.Process.Args + } createProcessParms.User = ctr.ociSpec.Process.User.Username + // LCOW requires the raw OCI spec passed through HCS and onwards to GCS for the utility VM. + if system.LCOWSupported() && ctr.ociSpec.Platform.OS == "linux" { + ociBuf, err := json.Marshal(ctr.ociSpec) + if err != nil { + return err + } + ociRaw := json.RawMessage(ociBuf) + createProcessParms.OCISpecification = &ociRaw + } + // Start the command running in the container. newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) if err != nil { @@ -228,11 +244,14 @@ func (ctr *container) waitExit(process *process, isFirstProcessToStart bool) err if !isFirstProcessToStart { si.State = StateExitProcess } else { - updatePending, err := ctr.hcsContainer.HasPendingUpdates() - if err != nil { - logrus.Warnf("libcontainerd: HasPendingUpdates() failed (container may have been killed): %s", err) - } else { - si.UpdatePending = updatePending + // Pending updates is only applicable for WCOW + if ctr.ociSpec.Platform.OS == "windows" { + updatePending, err := ctr.hcsContainer.HasPendingUpdates() + if err != nil { + logrus.Warnf("libcontainerd: HasPendingUpdates() failed (container may have been killed): %s", err) + } else { + si.UpdatePending = updatePending + } } logrus.Debugf("libcontainerd: shutting down container %s", ctr.containerID) diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/process_unix.go b/fn/vendor/github.com/docker/docker/libcontainerd/process_unix.go index 506fca6e1..3b54e325b 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/process_unix.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/process_unix.go @@ -10,7 +10,7 @@ import ( goruntime "runtime" "strings" - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" "github.com/tonistiigi/fifo" "golang.org/x/net/context" "golang.org/x/sys/unix" diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/queue_unix.go b/fn/vendor/github.com/docker/docker/libcontainerd/queue_unix.go index b848b9872..66765f75e 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/queue_unix.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/queue_unix.go @@ -27,5 +27,11 @@ func (q *queue) append(id string, f func()) { } f() close(done) + + q.Lock() + if q.fns[id] == done { + delete(q.fns, id) + } + q.Unlock() }() } diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/queue_unix_test.go b/fn/vendor/github.com/docker/docker/libcontainerd/queue_unix_test.go new file mode 100644 index 000000000..bb49a5d4c --- /dev/null +++ b/fn/vendor/github.com/docker/docker/libcontainerd/queue_unix_test.go @@ -0,0 +1,33 @@ +// +build linux solaris + +package libcontainerd + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestSerialization(t *testing.T) { + var ( + q queue + serialization = 1 + ) + + q.append("aaa", func() { + //simulate a long time task + time.Sleep(10 * time.Millisecond) + require.EqualValues(t, serialization, 1) + serialization = 2 + }) + q.append("aaa", func() { + require.EqualValues(t, serialization, 2) + serialization = 3 + }) + q.append("aaa", func() { + require.EqualValues(t, serialization, 3) + serialization = 4 + }) + time.Sleep(20 * time.Millisecond) +} diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/remote_unix.go b/fn/vendor/github.com/docker/docker/libcontainerd/remote_unix.go index eebbc886c..a81a93cbd 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/remote_unix.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/remote_unix.go @@ -19,7 +19,7 @@ import ( "time" "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/system" "github.com/golang/protobuf/ptypes" @@ -49,7 +49,7 @@ type remote struct { stateDir string rpcAddr string startDaemon bool - closeManually bool + closedManually bool debugLog bool rpcConn *grpc.ClientConn clients []*client @@ -80,7 +80,7 @@ func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { } } - if err := system.MkdirAll(stateDir, 0700); err != nil { + if err := system.MkdirAll(stateDir, 0700, ""); err != nil { return nil, err } @@ -96,11 +96,13 @@ func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { // don't output the grpc reconnect logging grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) - dialOpts := append([]grpc.DialOption{grpc.WithInsecure()}, + dialOpts := []grpc.DialOption{ + grpc.WithInsecure(), + grpc.WithBackoffMaxDelay(2 * time.Second), grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", addr, timeout) }), - ) + } conn, err := grpc.Dial(r.rpcAddr, dialOpts...) if err != nil { return nil, fmt.Errorf("error connecting to containerd: %v", err) @@ -154,7 +156,7 @@ func (r *remote) handleConnectionChange() { logrus.Debugf("libcontainerd: containerd health check returned error: %v", err) if r.daemonPid != -1 { - if strings.Contains(err.Error(), "is closing") { + if r.closedManually { // Well, we asked for it to stop, just return return } @@ -180,7 +182,7 @@ func (r *remote) Cleanup() { if r.daemonPid == -1 { return } - r.closeManually = true + r.closedManually = true r.rpcConn.Close() // Ask the daemon to quit syscall.Kill(r.daemonPid, syscall.SIGTERM) @@ -280,10 +282,23 @@ func (r *remote) startEventsMonitor() error { er := &containerd.EventsRequest{ Timestamp: tsp, } - events, err := r.apiClient.Events(context.Background(), er, grpc.FailFast(false)) - if err != nil { - return err + + var events containerd.API_EventsClient + for { + events, err = r.apiClient.Events(context.Background(), er, grpc.FailFast(false)) + if err == nil { + break + } + logrus.Warnf("libcontainerd: failed to get events from containerd: %q", err) + + if r.closedManually { + // ignore error if grpc remote connection is closed manually + return nil + } + + <-time.After(100 * time.Millisecond) } + go r.handleEventStream(events) return nil } @@ -293,7 +308,7 @@ func (r *remote) handleEventStream(events containerd.API_EventsClient) { e, err := events.Recv() if err != nil { if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && - r.closeManually { + r.closedManually { // ignore error if grpc remote connection is closed manually return } @@ -414,6 +429,18 @@ func (r *remote) runContainerdDaemon() error { if err := cmd.Start(); err != nil { return err } + + // unless strictly necessary, do not add anything in between here + // as the reaper goroutine below needs to kick in as soon as possible + // and any "return" from code paths added here will defeat the reaper + // process. + + r.daemonWaitCh = make(chan struct{}) + go func() { + cmd.Wait() + close(r.daemonWaitCh) + }() // Reap our child when needed + logrus.Infof("libcontainerd: new containerd process, pid: %d", cmd.Process.Pid) if err := setOOMScore(cmd.Process.Pid, r.oomScore); err != nil { system.KillProcess(cmd.Process.Pid) @@ -424,11 +451,6 @@ func (r *remote) runContainerdDaemon() error { return err } - r.daemonWaitCh = make(chan struct{}) - go func() { - cmd.Wait() - close(r.daemonWaitCh) - }() // Reap our child when needed r.daemonPid = cmd.Process.Pid return nil } diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/types.go b/fn/vendor/github.com/docker/docker/libcontainerd/types.go index 3d981e337..c7ade6b18 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/types.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/types.go @@ -3,7 +3,7 @@ package libcontainerd import ( "io" - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/net/context" ) diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/types_linux.go b/fn/vendor/github.com/docker/docker/libcontainerd/types_linux.go index cc2a17aec..4f0635835 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/types_linux.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/types_linux.go @@ -1,7 +1,7 @@ package libcontainerd import ( - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -21,7 +21,7 @@ type Process struct { // Capabilities are linux capabilities that are kept for the container. Capabilities []string `json:"capabilities,omitempty"` // Rlimits specifies rlimit options to apply to the process. - Rlimits []specs.Rlimit `json:"rlimits,omitempty"` + Rlimits []specs.LinuxRlimit `json:"rlimits,omitempty"` // ApparmorProfile specifies the apparmor profile for the container. ApparmorProfile *string `json:"apparmorProfile,omitempty"` // SelinuxLabel specifies the selinux context that the container process is run as. diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/types_solaris.go b/fn/vendor/github.com/docker/docker/libcontainerd/types_solaris.go index dbafef669..2ab18eb0d 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/types_solaris.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/types_solaris.go @@ -1,7 +1,7 @@ package libcontainerd import ( - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" "github.com/opencontainers/runtime-spec/specs-go" ) diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/types_windows.go b/fn/vendor/github.com/docker/docker/libcontainerd/types_windows.go index cf4d640fa..317bfb020 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/types_windows.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/types_windows.go @@ -39,10 +39,9 @@ type FlushOption struct { } // HyperVIsolationOption is a CreateOption that indicates whether the runtime -// should start the container as a Hyper-V container, and if so, the sandbox path. +// should start the container as a Hyper-V container. type HyperVIsolationOption struct { - IsHyperV bool - SandboxPath string `json:",omitempty"` + IsHyperV bool } // LayerOption is a CreateOption that indicates to the runtime the layer folder diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/utils_linux.go b/fn/vendor/github.com/docker/docker/libcontainerd/utils_linux.go index 78828bcda..5fd5bf6de 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/utils_linux.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/utils_linux.go @@ -3,7 +3,7 @@ package libcontainerd import ( "syscall" - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -23,7 +23,7 @@ func getRootIDs(s specs.Spec) (int, int, error) { return uid, gid, nil } -func hostIDFromMap(id uint32, mp []specs.IDMapping) int { +func hostIDFromMap(id uint32, mp []specs.LinuxIDMapping) int { for _, m := range mp { if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { return int(m.HostID + id - m.ContainerID) @@ -42,7 +42,7 @@ func systemPid(ctr *containerd.Container) uint32 { return pid } -func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) { +func convertRlimits(sr []specs.LinuxRlimit) (cr []*containerd.Rlimit) { for _, r := range sr { cr = append(cr, &containerd.Rlimit{ Type: r.Type, diff --git a/fn/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go b/fn/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go index 49632b45e..10ae59980 100644 --- a/fn/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go +++ b/fn/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go @@ -3,7 +3,7 @@ package libcontainerd import ( "syscall" - containerd "github.com/docker/containerd/api/grpc/types" + containerd "github.com/containerd/containerd/api/grpc/types" "github.com/opencontainers/runtime-spec/specs-go" ) diff --git a/fn/vendor/github.com/docker/docker/man/Dockerfile b/fn/vendor/github.com/docker/docker/man/Dockerfile deleted file mode 100644 index 80e97ff01..000000000 --- a/fn/vendor/github.com/docker/docker/man/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM golang:1.7.5-alpine - -RUN apk add -U git bash curl gcc musl-dev make - -RUN mkdir -p /go/src /go/bin /go/pkg -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - git checkout $GLIDE && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/fn/vendor/github.com/docker/docker/man/Dockerfile.5.md b/fn/vendor/github.com/docker/docker/man/Dockerfile.5.md deleted file mode 100644 index 5191b1930..000000000 --- a/fn/vendor/github.com/docker/docker/man/Dockerfile.5.md +++ /dev/null @@ -1,474 +0,0 @@ -% DOCKERFILE(5) Docker User Manuals -% Zac Dover -% May 2014 -# NAME - -Dockerfile - automate the steps of creating a Docker image - -# INTRODUCTION - -The **Dockerfile** is a configuration file that automates the steps of creating -a Docker image. It is similar to a Makefile. Docker reads instructions from the -**Dockerfile** to automate the steps otherwise performed manually to create an -image. To build an image, create a file called **Dockerfile**. - -The **Dockerfile** describes the steps taken to assemble the image. When the -**Dockerfile** has been created, call the `docker build` command, using the -path of directory that contains **Dockerfile** as the argument. - -# SYNOPSIS - -INSTRUCTION arguments - -For example: - - FROM image - -# DESCRIPTION - -A Dockerfile is a file that automates the steps of creating a Docker image. -A Dockerfile is similar to a Makefile. - -# USAGE - - docker build . - - -- Runs the steps and commits them, building a final image. - The path to the source repository defines where to find the context of the - build. The build is run by the Docker daemon, not the CLI. The whole - context must be transferred to the daemon. The Docker CLI reports - `"Sending build context to Docker daemon"` when the context is sent to the - daemon. - - ``` - docker build -t repository/tag . - ``` - - -- specifies a repository and tag at which to save the new image if the build - succeeds. The Docker daemon runs the steps one-by-one, committing the result - to a new image if necessary, before finally outputting the ID of the new - image. The Docker daemon automatically cleans up the context it is given. - - Docker re-uses intermediate images whenever possible. This significantly - accelerates the *docker build* process. - -# FORMAT - - `FROM image` - - `FROM image:tag` - - `FROM image@digest` - - -- The **FROM** instruction sets the base image for subsequent instructions. A - valid Dockerfile must have **FROM** as its first instruction. The image can be any - valid image. It is easy to start by pulling an image from the public - repositories. - - -- **FROM** must be the first non-comment instruction in Dockerfile. - - -- **FROM** may appear multiple times within a single Dockerfile in order to create - multiple images. Make a note of the last image ID output by the commit before - each new **FROM** command. - - -- If no tag is given to the **FROM** instruction, Docker applies the - `latest` tag. If the used tag does not exist, an error is returned. - - -- If no digest is given to the **FROM** instruction, Docker applies the - `latest` tag. If the used tag does not exist, an error is returned. - -**MAINTAINER** - -- **MAINTAINER** sets the Author field for the generated images. - Useful for providing users with an email or url for support. - -**RUN** - -- **RUN** has two forms: - - ``` - # the command is run in a shell - /bin/sh -c - RUN - - # Executable form - RUN ["executable", "param1", "param2"] - ``` - - - -- The **RUN** instruction executes any commands in a new layer on top of the current - image and commits the results. The committed image is used for the next step in - Dockerfile. - - -- Layering **RUN** instructions and generating commits conforms to the core - concepts of Docker where commits are cheap and containers can be created from - any point in the history of an image. This is similar to source control. The - exec form makes it possible to avoid shell string munging. The exec form makes - it possible to **RUN** commands using a base image that does not contain `/bin/sh`. - - Note that the exec form is parsed as a JSON array, which means that you must - use double-quotes (") around words not single-quotes ('). - -**CMD** - -- **CMD** has three forms: - - ``` - # Executable form - CMD ["executable", "param1", "param2"]` - - # Provide default arguments to ENTRYPOINT - CMD ["param1", "param2"]` - - # the command is run in a shell - /bin/sh -c - CMD command param1 param2 - ``` - - -- There should be only one **CMD** in a Dockerfile. If more than one **CMD** is listed, only - the last **CMD** takes effect. - The main purpose of a **CMD** is to provide defaults for an executing container. - These defaults may include an executable, or they can omit the executable. If - they omit the executable, an **ENTRYPOINT** must be specified. - When used in the shell or exec formats, the **CMD** instruction sets the command to - be executed when running the image. - If you use the shell form of the **CMD**, the `` executes in `/bin/sh -c`: - - Note that the exec form is parsed as a JSON array, which means that you must - use double-quotes (") around words not single-quotes ('). - - ``` - FROM ubuntu - CMD echo "This is a test." | wc - - ``` - - -- If you run **command** without a shell, then you must express the command as a - JSON array and give the full path to the executable. This array form is the - preferred form of **CMD**. All additional parameters must be individually expressed - as strings in the array: - - ``` - FROM ubuntu - CMD ["/usr/bin/wc","--help"] - ``` - - -- To make the container run the same executable every time, use **ENTRYPOINT** in - combination with **CMD**. - If the user specifies arguments to `docker run`, the specified commands - override the default in **CMD**. - Do not confuse **RUN** with **CMD**. **RUN** runs a command and commits the result. - **CMD** executes nothing at build time, but specifies the intended command for - the image. - -**LABEL** - -- `LABEL = [= ...]`or - ``` - LABEL [ ] - LABEL [ ] - ... - ``` - The **LABEL** instruction adds metadata to an image. A **LABEL** is a - key-value pair. To specify a **LABEL** without a value, simply use an empty - string. To include spaces within a **LABEL** value, use quotes and - backslashes as you would in command-line parsing. - - ``` - LABEL com.example.vendor="ACME Incorporated" - LABEL com.example.vendor "ACME Incorporated" - LABEL com.example.vendor.is-beta "" - LABEL com.example.vendor.is-beta= - LABEL com.example.vendor.is-beta="" - ``` - - An image can have more than one label. To specify multiple labels, separate - each key-value pair by a space. - - Labels are additive including `LABEL`s in `FROM` images. As the system - encounters and then applies a new label, new `key`s override any previous - labels with identical keys. - - To display an image's labels, use the `docker inspect` command. - -**EXPOSE** - -- `EXPOSE [...]` - The **EXPOSE** instruction informs Docker that the container listens on the - specified network ports at runtime. Docker uses this information to - interconnect containers using links and to set up port redirection on the host - system. - -**ENV** - -- `ENV ` - The **ENV** instruction sets the environment variable to - the value ``. This value is passed to all future - **RUN**, **ENTRYPOINT**, and **CMD** instructions. This is - functionally equivalent to prefixing the command with `=`. The - environment variables that are set with **ENV** persist when a container is run - from the resulting image. Use `docker inspect` to inspect these values, and - change them using `docker run --env =`. - - Note that setting "`ENV DEBIAN_FRONTEND noninteractive`" may cause - unintended consequences, because it will persist when the container is run - interactively, as with the following command: `docker run -t -i image bash` - -**ADD** - -- **ADD** has two forms: - - ``` - ADD - - # Required for paths with whitespace - ADD ["",... ""] - ``` - - The **ADD** instruction copies new files, directories - or remote file URLs to the filesystem of the container at path ``. - Multiple `` resources may be specified but if they are files or directories - then they must be relative to the source directory that is being built - (the context of the build). The `` is the absolute path, or path relative - to **WORKDIR**, into which the source is copied inside the target container. - If the `` argument is a local file in a recognized compression format - (tar, gzip, bzip2, etc) then it is unpacked at the specified `` in the - container's filesystem. Note that only local compressed files will be unpacked, - i.e., the URL download and archive unpacking features cannot be used together. - All new directories are created with mode 0755 and with the uid and gid of **0**. - -**COPY** - -- **COPY** has two forms: - - ``` - COPY - - # Required for paths with whitespace - COPY ["",... ""] - ``` - - The **COPY** instruction copies new files from `` and - adds them to the filesystem of the container at path . The `` must be - the path to a file or directory relative to the source directory that is - being built (the context of the build) or a remote file URL. The `` is an - absolute path, or a path relative to **WORKDIR**, into which the source will - be copied inside the target container. If you **COPY** an archive file it will - land in the container exactly as it appears in the build context without any - attempt to unpack it. All new files and directories are created with mode **0755** - and with the uid and gid of **0**. - -**ENTRYPOINT** - -- **ENTRYPOINT** has two forms: - - ``` - # executable form - ENTRYPOINT ["executable", "param1", "param2"]` - - # run command in a shell - /bin/sh -c - ENTRYPOINT command param1 param2 - ``` - - -- An **ENTRYPOINT** helps you configure a - container that can be run as an executable. When you specify an **ENTRYPOINT**, - the whole container runs as if it was only that executable. The **ENTRYPOINT** - instruction adds an entry command that is not overwritten when arguments are - passed to docker run. This is different from the behavior of **CMD**. This allows - arguments to be passed to the entrypoint, for instance `docker run -d` - passes the -d argument to the **ENTRYPOINT**. Specify parameters either in the - **ENTRYPOINT** JSON array (as in the preferred exec form above), or by using a **CMD** - statement. Parameters in the **ENTRYPOINT** are not overwritten by the docker run - arguments. Parameters specified via **CMD** are overwritten by docker run - arguments. Specify a plain string for the **ENTRYPOINT**, and it will execute in - `/bin/sh -c`, like a **CMD** instruction: - - ``` - FROM ubuntu - ENTRYPOINT wc -l - - ``` - - This means that the Dockerfile's image always takes stdin as input (that's - what "-" means), and prints the number of lines (that's what "-l" means). To - make this optional but default, use a **CMD**: - - ``` - FROM ubuntu - CMD ["-l", "-"] - ENTRYPOINT ["/usr/bin/wc"] - ``` - -**VOLUME** - -- `VOLUME ["/data"]` - The **VOLUME** instruction creates a mount point with the specified name and marks - it as holding externally-mounted volumes from the native host or from other - containers. - -**USER** - -- `USER daemon` - Sets the username or UID used for running subsequent commands. - - The **USER** instruction can optionally be used to set the group or GID. The - followings examples are all valid: - USER [user | user:group | uid | uid:gid | user:gid | uid:group ] - - Until the **USER** instruction is set, instructions will be run as root. The USER - instruction can be used any number of times in a Dockerfile, and will only affect - subsequent commands. - -**WORKDIR** - -- `WORKDIR /path/to/workdir` - The **WORKDIR** instruction sets the working directory for the **RUN**, **CMD**, - **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can - be used multiple times in a single Dockerfile. Relative paths are defined - relative to the path of the previous **WORKDIR** instruction. For example: - - ``` - WORKDIR /a - WORKDIR b - WORKDIR c - RUN pwd - ``` - - In the above example, the output of the **pwd** command is **a/b/c**. - -**ARG** - -- ARG [=] - - The `ARG` instruction defines a variable that users can pass at build-time to - the builder with the `docker build` command using the `--build-arg - =` flag. If a user specifies a build argument that was not - defined in the Dockerfile, the build outputs a warning. - - ``` - [Warning] One or more build-args [foo] were not consumed - ``` - - The Dockerfile author can define a single variable by specifying `ARG` once or many - variables by specifying `ARG` more than once. For example, a valid Dockerfile: - - ``` - FROM busybox - ARG user1 - ARG buildno - ... - ``` - - A Dockerfile author may optionally specify a default value for an `ARG` instruction: - - ``` - FROM busybox - ARG user1=someuser - ARG buildno=1 - ... - ``` - - If an `ARG` value has a default and if there is no value passed at build-time, the - builder uses the default. - - An `ARG` variable definition comes into effect from the line on which it is - defined in the `Dockerfile` not from the argument's use on the command-line or - elsewhere. For example, consider this Dockerfile: - - ``` - 1 FROM busybox - 2 USER ${user:-some_user} - 3 ARG user - 4 USER $user - ... - ``` - A user builds this file by calling: - - ``` - $ docker build --build-arg user=what_user Dockerfile - ``` - - The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the - subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is - defined and the `what_user` value was passed on the command line. Prior to its definition by an - `ARG` instruction, any use of a variable results in an empty string. - - > **Warning:** It is not recommended to use build-time variables for - > passing secrets like github keys, user credentials etc. Build-time variable - > values are visible to any user of the image with the `docker history` command. - - You can use an `ARG` or an `ENV` instruction to specify variables that are - available to the `RUN` instruction. Environment variables defined using the - `ENV` instruction always override an `ARG` instruction of the same name. Consider - this Dockerfile with an `ENV` and `ARG` instruction. - - ``` - 1 FROM ubuntu - 2 ARG CONT_IMG_VER - 3 ENV CONT_IMG_VER v1.0.0 - 4 RUN echo $CONT_IMG_VER - ``` - Then, assume this image is built with this command: - - ``` - $ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile - ``` - - In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting - passed by the user:`v2.0.1` This behavior is similar to a shell - script where a locally scoped variable overrides the variables passed as - arguments or inherited from environment, from its point of definition. - - Using the example above but a different `ENV` specification you can create more - useful interactions between `ARG` and `ENV` instructions: - - ``` - 1 FROM ubuntu - 2 ARG CONT_IMG_VER - 3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} - 4 RUN echo $CONT_IMG_VER - ``` - - Unlike an `ARG` instruction, `ENV` values are always persisted in the built - image. Consider a docker build without the --build-arg flag: - - ``` - $ docker build Dockerfile - ``` - - Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but - its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. - - The variable expansion technique in this example allows you to pass arguments - from the command line and persist them in the final image by leveraging the - `ENV` instruction. Variable expansion is only supported for [a limited set of - Dockerfile instructions.](#environment-replacement) - - Docker has a set of predefined `ARG` variables that you can use without a - corresponding `ARG` instruction in the Dockerfile. - - * `HTTP_PROXY` - * `http_proxy` - * `HTTPS_PROXY` - * `https_proxy` - * `FTP_PROXY` - * `ftp_proxy` - * `NO_PROXY` - * `no_proxy` - - To use these, simply pass them on the command line using the `--build-arg - =` flag. - -**ONBUILD** - -- `ONBUILD [INSTRUCTION]` - The **ONBUILD** instruction adds a trigger instruction to an image. The - trigger is executed at a later time, when the image is used as the base for - another build. Docker executes the trigger in the context of the downstream - build, as if the trigger existed immediately after the **FROM** instruction in - the downstream Dockerfile. - - You can register any build instruction as a trigger. A trigger is useful if - you are defining an image to use as a base for building other images. For - example, if you are defining an application build environment or a daemon that - is customized with a user-specific configuration. - - Consider an image intended as a reusable python application builder. It must - add application source code to a particular directory, and might need a build - script called after that. You can't just call **ADD** and **RUN** now, because - you don't yet have access to the application source code, and it is different - for each application build. - - -- Providing application developers with a boilerplate Dockerfile to copy-paste - into their application is inefficient, error-prone, and - difficult to update because it mixes with application-specific code. - The solution is to use **ONBUILD** to register instructions in advance, to - run later, during the next build stage. - -# HISTORY -*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. -*Feb 2015, updated by Brian Goff (cpuguy83@gmail.com) for readability -*Sept 2015, updated by Sally O'Malley (somalley@redhat.com) -*Oct 2016, updated by Addam Hardy (addam.hardy@gmail.com) diff --git a/fn/vendor/github.com/docker/docker/man/Dockerfile.aarch64 b/fn/vendor/github.com/docker/docker/man/Dockerfile.aarch64 deleted file mode 100644 index 8553d1f76..000000000 --- a/fn/vendor/github.com/docker/docker/man/Dockerfile.aarch64 +++ /dev/null @@ -1,34 +0,0 @@ -FROM aarch64/ubuntu:xenial - -RUN apt-get update && apt-get install -y git golang-go curl - -ENV GO_VERSION 1.7.5 -ENV GOARCH arm64 -ENV PATH /go/bin:/usr/src/go/bin:$PATH - -RUN mkdir /usr/src/go && \ - curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 && \ - cd /usr/src/go/src && \ - GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash - -RUN mkdir -p /go/src /go/bin /go/pkg -ENV GOPATH=/go -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - git checkout $GLIDE && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/fn/vendor/github.com/docker/docker/man/Dockerfile.armhf b/fn/vendor/github.com/docker/docker/man/Dockerfile.armhf deleted file mode 100644 index e7ea49564..000000000 --- a/fn/vendor/github.com/docker/docker/man/Dockerfile.armhf +++ /dev/null @@ -1,43 +0,0 @@ -FROM armhf/debian:jessie - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -RUN apt-get update && apt-get install -y \ - git \ - bash \ - curl \ - gcc \ - make - -ENV GO_VERSION 1.7.5 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go - -# We're building for armhf, which is ARMv7, so let's be explicit about that -ENV GOARCH arm -ENV GOARM 7 - -RUN mkdir -p /go/src /go/bin /go/pkg -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - git checkout $GLIDE && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/fn/vendor/github.com/docker/docker/man/Dockerfile.ppc64le b/fn/vendor/github.com/docker/docker/man/Dockerfile.ppc64le deleted file mode 100644 index fc96ca769..000000000 --- a/fn/vendor/github.com/docker/docker/man/Dockerfile.ppc64le +++ /dev/null @@ -1,35 +0,0 @@ -FROM ppc64le/ubuntu:xenial - -RUN apt-get update && apt-get install -y \ - curl \ - gcc \ - git \ - make \ - tar - -ENV GO_VERSION 1.7.5 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /usr/local/go/bin:$PATH -ENV GOPATH=/go - -RUN mkdir -p /go/src /go/bin /go/pkg -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - git checkout $GLIDE && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/fn/vendor/github.com/docker/docker/man/Dockerfile.s390x b/fn/vendor/github.com/docker/docker/man/Dockerfile.s390x deleted file mode 100644 index d4bcf1da1..000000000 --- a/fn/vendor/github.com/docker/docker/man/Dockerfile.s390x +++ /dev/null @@ -1,35 +0,0 @@ -FROM s390x/ubuntu:xenial - -RUN apt-get update && apt-get install -y \ - curl \ - gcc \ - git \ - make \ - tar - -ENV GO_VERSION 1.7.5 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /usr/local/go/bin:$PATH -ENV GOPATH=/go - -RUN mkdir -p /go/src /go/bin /go/pkg -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - git checkout $GLIDE && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/fn/vendor/github.com/docker/docker/man/README.md b/fn/vendor/github.com/docker/docker/man/README.md deleted file mode 100644 index 82dac650f..000000000 --- a/fn/vendor/github.com/docker/docker/man/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Docker Documentation -==================== - -This directory contains scripts for generating the man pages. Many of the man -pages are generated directly from the `spf13/cobra` `Command` definition. Some -legacy pages are still generated from the markdown files in this directory. -Do *not* edit the man pages in the man1 directory. Instead, update the -Cobra command or amend the Markdown files for legacy pages. - - -## Generate the man pages - -From within the project root directory run: - - make manpages diff --git a/fn/vendor/github.com/docker/docker/man/docker-build.1.md b/fn/vendor/github.com/docker/docker/man/docker-build.1.md deleted file mode 100644 index b650fc3aa..000000000 --- a/fn/vendor/github.com/docker/docker/man/docker-build.1.md +++ /dev/null @@ -1,347 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-build - Build an image from a Dockerfile - -# SYNOPSIS -**docker build** -[**--add-host**[=*[]*]] -[**--build-arg**[=*[]*]] -[**--cpu-shares**[=*0*]] -[**--cgroup-parent**[=*CGROUP-PARENT*]] -[**--help**] -[**-f**|**--file**[=*PATH/Dockerfile*]] -[**-squash**] *Experimental* -[**--force-rm**] -[**--isolation**[=*default*]] -[**--label**[=*[]*]] -[**--no-cache**] -[**--pull**] -[**--compress**] -[**-q**|**--quiet**] -[**--rm**[=*true*]] -[**-t**|**--tag**[=*[]*]] -[**-m**|**--memory**[=*MEMORY*]] -[**--memory-swap**[=*LIMIT*]] -[**--network**[=*"default"*]] -[**--shm-size**[=*SHM-SIZE*]] -[**--cpu-period**[=*0*]] -[**--cpu-quota**[=*0*]] -[**--cpuset-cpus**[=*CPUSET-CPUS*]] -[**--cpuset-mems**[=*CPUSET-MEMS*]] -[**--ulimit**[=*[]*]] -PATH | URL | - - -# DESCRIPTION -This will read the Dockerfile from the directory specified in **PATH**. -It also sends any other files and directories found in the current -directory to the Docker daemon. The contents of this directory would -be used by **ADD** commands found within the Dockerfile. - -Warning, this will send a lot of data to the Docker daemon depending -on the contents of the current directory. The build is run by the Docker -daemon, not by the CLI, so the whole context must be transferred to the daemon. -The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to -the daemon. - -When the URL to a tarball archive or to a single Dockerfile is given, no context is sent from -the client to the Docker daemon. In this case, the Dockerfile at the root of the archive and -the rest of the archive will get used as the context of the build. When a Git repository is -set as the **URL**, the repository is cloned locally and then sent as the context. - -# OPTIONS -**-f**, **--file**=*PATH/Dockerfile* - Path to the Dockerfile to use. If the path is a relative path and you are - building from a local directory, then the path must be relative to that - directory. If you are building from a remote URL pointing to either a - tarball or a Git repository, then the path must be relative to the root of - the remote context. In all cases, the file must be within the build context. - The default is *Dockerfile*. - -**--squash**=*true*|*false* - **Experimental Only** - Once the image is built, squash the new layers into a new image with a single - new layer. Squashing does not destroy any existing image, rather it creates a new - image with the content of the squashed layers. This effectively makes it look - like all `Dockerfile` commands were created with a single layer. The build - cache is preserved with this method. - - **Note**: using this option means the new image will not be able to take - advantage of layer sharing with other images and may use significantly more - space. - - **Note**: using this option you may see significantly more space used due to - storing two copies of the image, one for the build cache with all the cache - layers in tact, and one for the squashed version. - -**--add-host**=[] - Add a custom host-to-IP mapping (host:ip) - - Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** -option can be set multiple times. - -**--build-arg**=*variable* - name and value of a **buildarg**. - - For example, if you want to pass a value for `http_proxy`, use - `--build-arg=http_proxy="http://some.proxy.url"` - - Users pass these values at build-time. Docker uses the `buildargs` as the - environment context for command(s) run via the Dockerfile's `RUN` instruction - or for variable expansion in other Dockerfile instructions. This is not meant - for passing secret values. [Read more about the buildargs instruction](https://docs.docker.com/engine/reference/builder/#arg) - -**--force-rm**=*true*|*false* - Always remove intermediate containers, even after unsuccessful builds. The default is *false*. - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. - -**--label**=*label* - Set metadata for an image - -**--no-cache**=*true*|*false* - Do not use cache when building the image. The default is *false*. - -**--help** - Print usage statement - -**--pull**=*true*|*false* - Always attempt to pull a newer version of the image. The default is *false*. - -**--compress**=*true*|*false* - Compress the build context using gzip. The default is *false*. - -**-q**, **--quiet**=*true*|*false* - Suppress the build output and print image ID on success. The default is *false*. - -**--rm**=*true*|*false* - Remove intermediate containers after a successful build. The default is *true*. - -**-t**, **--tag**="" - Repository names (and optionally with tags) to be applied to the resulting - image in case of success. Refer to **docker-tag(1)** for more information - about valid tag names. - -**-m**, **--memory**=*MEMORY* - Memory limit - -**--memory-swap**=*LIMIT* - A limit value equal to memory plus swap. Must be used with the **-m** -(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** -(**--memory**) value. - - The format of `LIMIT` is `[]`. Unit can be `b` (bytes), -`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a -unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. - -**--network**=*bridge* - Set the networking mode for the RUN instructions during build. Supported standard - values are: `bridge`, `host`, `none` and `container:`. Any other value - is taken as a custom network's name or ID which this container should connect to. - -**--shm-size**=*SHM-SIZE* - Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. - If you omit the size entirely, the system uses `64m`. - -**--cpu-shares**=*0* - CPU shares (relative weight). - - By default, all containers get the same proportion of CPU cycles. - CPU shares is a 'relative weight', relative to the default setting of 1024. - This default value is defined here: - ``` - cat /sys/fs/cgroup/cpu/cpu.shares - 1024 - ``` - You can change this proportion by adjusting the container's CPU share - weighting relative to the weighting of all other running containers. - - To modify the proportion from the default of 1024, use the **--cpu-shares** - flag to set the weighting to 2 or higher. - - Container CPU share Flag - {C0} 60% of CPU --cpu-shares=614 (614 is 60% of 1024) - {C1} 40% of CPU --cpu-shares=410 (410 is 40% of 1024) - - The proportion is only applied when CPU-intensive processes are running. - When tasks in one container are idle, the other containers can use the - left-over CPU time. The actual amount of CPU time used varies depending on - the number of containers running on the system. - - For example, consider three containers, where one has **--cpu-shares=1024** and - two others have **--cpu-shares=512**. When processes in all three - containers attempt to use 100% of CPU, the first container would receive - 50% of the total CPU time. If you add a fourth container with **--cpu-shares=1024**, - the first container only gets 33% of the CPU. The remaining containers - receive 16.5%, 16.5% and 33% of the CPU. - - - Container CPU share Flag CPU time - {C0} 100% --cpu-shares=1024 33% - {C1} 50% --cpu-shares=512 16.5% - {C2} 50% --cpu-shares=512 16.5% - {C4} 100% --cpu-shares=1024 33% - - - On a multi-core system, the shares of CPU time are distributed across the CPU - cores. Even if a container is limited to less than 100% of CPU time, it can - use 100% of each individual CPU core. - - For example, consider a system with more than three cores. If you start one - container **{C0}** with **--cpu-shares=512** running one process, and another container - **{C1}** with **--cpu-shares=1024** running two processes, this can result in the following - division of CPU shares: - - PID container CPU CPU share - 100 {C0} 0 100% of CPU0 - 101 {C1} 1 100% of CPU1 - 102 {C1} 2 100% of CPU2 - -**--cpu-period**=*0* - Limit the CPU CFS (Completely Fair Scheduler) period. - - Limit the container's CPU usage. This flag causes the kernel to restrict the - container's CPU usage to the period you specify. - -**--cpu-quota**=*0* - Limit the CPU CFS (Completely Fair Scheduler) quota. - - By default, containers run with the full CPU resource. This flag causes the -kernel to restrict the container's CPU usage to the quota you specify. - -**--cpuset-cpus**=*CPUSET-CPUS* - CPUs in which to allow execution (0-3, 0,1). - -**--cpuset-mems**=*CPUSET-MEMS* - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on - NUMA systems. - - For example, if you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` -to ensure the processes in your Docker container only use memory from the first -two memory nodes. - -**--cgroup-parent**=*CGROUP-PARENT* - Path to `cgroups` under which the container's `cgroup` are created. - - If the path is not absolute, the path is considered relative to the `cgroups` path of the init process. -Cgroups are created if they do not already exist. - -**--ulimit**=[] - Ulimit options - - For more information about `ulimit` see [Setting ulimits in a -container](https://docs.docker.com/engine/reference/commandline/run/#set-ulimits-in-container---ulimit) - -# EXAMPLES - -## Building an image using a Dockerfile located inside the current directory - -Docker images can be built using the build command and a Dockerfile: - - docker build . - -During the build process Docker creates intermediate images. In order to -keep them, you must explicitly set `--rm=false`. - - docker build --rm=false . - -A good practice is to make a sub-directory with a related name and create -the Dockerfile in that directory. For example, a directory called mongo may -contain a Dockerfile to create a Docker MongoDB image. Likewise, another -directory called httpd may be used to store Dockerfiles for Apache web -server images. - -It is also a good practice to add the files required for the image to the -sub-directory. These files will then be specified with the `COPY` or `ADD` -instructions in the `Dockerfile`. - -Note: If you include a tar file (a good practice), then Docker will -automatically extract the contents of the tar file specified within the `ADD` -instruction into the specified target. - -## Building an image and naming that image - -A good practice is to give a name to the image you are building. Note that -only a-z0-9-_. should be used for consistency. There are no hard rules here but it is best to give the names consideration. - -The **-t**/**--tag** flag is used to rename an image. Here are some examples: - -Though it is not a good practice, image names can be arbitrary: - - docker build -t myimage . - -A better approach is to provide a fully qualified and meaningful repository, -name, and tag (where the tag in this context means the qualifier after -the ":"). In this example we build a JBoss image for the Fedora repository -and give it the version 1.0: - - docker build -t fedora/jboss:1.0 . - -The next example is for the "whenry" user repository and uses Fedora and -JBoss and gives it the version 2.1 : - - docker build -t whenry/fedora-jboss:v2.1 . - -If you do not provide a version tag then Docker will assign `latest`: - - docker build -t whenry/fedora-jboss . - -When you list the images, the image above will have the tag `latest`. - -You can apply multiple tags to an image. For example, you can apply the `latest` -tag to a newly built image and add another tag that references a specific -version. -For example, to tag an image both as `whenry/fedora-jboss:latest` and -`whenry/fedora-jboss:v2.1`, use the following: - - docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . - -So renaming an image is arbitrary but consideration should be given to -a useful convention that makes sense for consumers and should also take -into account Docker community conventions. - - -## Building an image using a URL - -This will clone the specified GitHub repository from the URL and use it -as context. The Dockerfile at the root of the repository is used as -Dockerfile. This only works if the GitHub repository is a dedicated -repository. - - docker build github.com/scollier/purpletest - -Note: You can set an arbitrary Git repository via the `git://` scheme. - -## Building an image using a URL to a tarball'ed context - -This will send the URL itself to the Docker daemon. The daemon will fetch the -tarball archive, decompress it and use its contents as the build context. The -Dockerfile at the root of the archive and the rest of the archive will get used -as the context of the build. If you pass an **-f PATH/Dockerfile** option as well, -the system will look for that file inside the contents of the tarball. - - docker build -f dev/Dockerfile https://10.10.10.1/docker/context.tar.gz - -Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression). - -## Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation=` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. On Microsoft Windows, you can specify these values: - -* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. -* `process`: Namespace isolation only. -* `hyperv`: Hyper-V hypervisor partition-based isolation. - -Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. - -# HISTORY -March 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -June 2015, updated by Sally O'Malley diff --git a/fn/vendor/github.com/docker/docker/man/docker-config-json.5.md b/fn/vendor/github.com/docker/docker/man/docker-config-json.5.md deleted file mode 100644 index 49987f08b..000000000 --- a/fn/vendor/github.com/docker/docker/man/docker-config-json.5.md +++ /dev/null @@ -1,72 +0,0 @@ -% CONFIG.JSON(5) Docker User Manuals -% Docker Community -% JANUARY 2016 -# NAME -HOME/.docker/config.json - Default Docker configuration file - -# INTRODUCTION - -By default, the Docker command line stores its configuration files in a -directory called `.docker` within your `$HOME` directory. Docker manages most of -the files in the configuration directory and you should not modify them. -However, you *can modify* the `config.json` file to control certain aspects of -how the `docker` command behaves. - -Currently, you can modify the `docker` command behavior using environment -variables or command-line options. You can also use options within -`config.json` to modify some of the same behavior. When using these -mechanisms, you must keep in mind the order of precedence among them. Command -line options override environment variables and environment variables override -properties you specify in a `config.json` file. - -The `config.json` file stores a JSON encoding of several properties: - -* The `HttpHeaders` property specifies a set of headers to include in all messages -sent from the Docker client to the daemon. Docker does not try to interpret or -understand these header; it simply puts them into the messages. Docker does not -allow these headers to change any headers it sets for itself. - -* The `psFormat` property specifies the default format for `docker ps` output. -When the `--format` flag is not provided with the `docker ps` command, -Docker's client uses this property. If this property is not set, the client -falls back to the default table format. For a list of supported formatting -directives, see **docker-ps(1)**. - -* The `detachKeys` property specifies the default key sequence which -detaches the container. When the `--detach-keys` flag is not provide -with the `docker attach`, `docker exec`, `docker run` or `docker -start`, Docker's client uses this property. If this property is not -set, the client falls back to the default sequence `ctrl-p,ctrl-q`. - - -* The `imagesFormat` property specifies the default format for `docker images` -output. When the `--format` flag is not provided with the `docker images` -command, Docker's client uses this property. If this property is not set, the -client falls back to the default table format. For a list of supported -formatting directives, see **docker-images(1)**. - -You can specify a different location for the configuration files via the -`DOCKER_CONFIG` environment variable or the `--config` command line option. If -both are specified, then the `--config` option overrides the `DOCKER_CONFIG` -environment variable: - - docker --config ~/testconfigs/ ps - -This command instructs Docker to use the configuration files in the -`~/testconfigs/` directory when running the `ps` command. - -## Examples - -Following is a sample `config.json` file: - - { - "HttpHeaders": { - "MyHeader": "MyValue" - }, - "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", - "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", - "detachKeys": "ctrl-e,e" - } - -# HISTORY -January 2016, created by Moxiegirl diff --git a/fn/vendor/github.com/docker/docker/man/docker-run.1.md b/fn/vendor/github.com/docker/docker/man/docker-run.1.md deleted file mode 100644 index 2a743c134..000000000 --- a/fn/vendor/github.com/docker/docker/man/docker-run.1.md +++ /dev/null @@ -1,1108 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-run - Run a command in a new container - -# SYNOPSIS -**docker run** -[**-a**|**--attach**[=*[]*]] -[**--add-host**[=*[]*]] -[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] -[**--blkio-weight-device**[=*[]*]] -[**--cpu-shares**[=*0*]] -[**--cap-add**[=*[]*]] -[**--cap-drop**[=*[]*]] -[**--cgroup-parent**[=*CGROUP-PATH*]] -[**--cidfile**[=*CIDFILE*]] -[**--cpu-count**[=*0*]] -[**--cpu-percent**[=*0*]] -[**--cpu-period**[=*0*]] -[**--cpu-quota**[=*0*]] -[**--cpu-rt-period**[=*0*]] -[**--cpu-rt-runtime**[=*0*]] -[**--cpus**[=*0.0*]] -[**--cpuset-cpus**[=*CPUSET-CPUS*]] -[**--cpuset-mems**[=*CPUSET-MEMS*]] -[**-d**|**--detach**] -[**--detach-keys**[=*[]*]] -[**--device**[=*[]*]] -[**--device-cgroup-rule**[=*[]*]] -[**--device-read-bps**[=*[]*]] -[**--device-read-iops**[=*[]*]] -[**--device-write-bps**[=*[]*]] -[**--device-write-iops**[=*[]*]] -[**--dns**[=*[]*]] -[**--dns-option**[=*[]*]] -[**--dns-search**[=*[]*]] -[**-e**|**--env**[=*[]*]] -[**--entrypoint**[=*ENTRYPOINT*]] -[**--env-file**[=*[]*]] -[**--expose**[=*[]*]] -[**--group-add**[=*[]*]] -[**-h**|**--hostname**[=*HOSTNAME*]] -[**--help**] -[**--init**] -[**-i**|**--interactive**] -[**--ip**[=*IPv4-ADDRESS*]] -[**--ip6**[=*IPv6-ADDRESS*]] -[**--ipc**[=*IPC*]] -[**--isolation**[=*default*]] -[**--kernel-memory**[=*KERNEL-MEMORY*]] -[**-l**|**--label**[=*[]*]] -[**--label-file**[=*[]*]] -[**--link**[=*[]*]] -[**--link-local-ip**[=*[]*]] -[**--log-driver**[=*[]*]] -[**--log-opt**[=*[]*]] -[**-m**|**--memory**[=*MEMORY*]] -[**--mac-address**[=*MAC-ADDRESS*]] -[**--memory-reservation**[=*MEMORY-RESERVATION*]] -[**--memory-swap**[=*LIMIT*]] -[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] -[**--mount**[=*[MOUNT]*]] -[**--name**[=*NAME*]] -[**--network-alias**[=*[]*]] -[**--network**[=*"bridge"*]] -[**--oom-kill-disable**] -[**--oom-score-adj**[=*0*]] -[**-P**|**--publish-all**] -[**-p**|**--publish**[=*[]*]] -[**--pid**[=*[PID]*]] -[**--userns**[=*[]*]] -[**--pids-limit**[=*PIDS_LIMIT*]] -[**--privileged**] -[**--read-only**] -[**--restart**[=*RESTART*]] -[**--rm**] -[**--security-opt**[=*[]*]] -[**--storage-opt**[=*[]*]] -[**--stop-signal**[=*SIGNAL*]] -[**--stop-timeout**[=*TIMEOUT*]] -[**--shm-size**[=*[]*]] -[**--sig-proxy**[=*true*]] -[**--sysctl**[=*[]*]] -[**-t**|**--tty**] -[**--tmpfs**[=*[CONTAINER-DIR[:]*]] -[**-u**|**--user**[=*USER*]] -[**--ulimit**[=*[]*]] -[**--uts**[=*[]*]] -[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] -[**--volume-driver**[=*DRIVER*]] -[**--volumes-from**[=*[]*]] -[**-w**|**--workdir**[=*WORKDIR*]] -IMAGE [COMMAND] [ARG...] - -# DESCRIPTION - -Run a process in a new container. **docker run** starts a process with its own -file system, its own networking, and its own isolated process tree. The IMAGE -which starts the process may define defaults related to the process that will be -run in the container, the networking to expose, and more, but **docker run** -gives final control to the operator or administrator who starts the container -from the image. For that reason **docker run** has more options than any other -Docker command. - -If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and -all image dependencies, from the repository in the same way running **docker -pull** IMAGE, before it starts the container from that image. - -# OPTIONS -**-a**, **--attach**=[] - Attach to STDIN, STDOUT or STDERR. - - In foreground mode (the default when **-d** -is not specified), **docker run** can start the process in the container -and attach the console to the process's standard input, output, and standard -error. It can even pretend to be a TTY (this is what most commandline -executables expect) and pass along signals. The **-a** option can be set for -each of stdin, stdout, and stderr. - -**--add-host**=[] - Add a custom host-to-IP mapping (host:ip) - - Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** -option can be set multiple times. - -**--blkio-weight**=*0* - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - -**--blkio-weight-device**=[] - Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). - -**--cpu-shares**=*0* - CPU shares (relative weight) - - By default, all containers get the same proportion of CPU cycles. This proportion -can be modified by changing the container's CPU share weighting relative -to the weighting of all other running containers. - -To modify the proportion from the default of 1024, use the **--cpu-shares** -flag to set the weighting to 2 or higher. - -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. - -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If you add a fourth container with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. If you start one -container **{C0}** with **-c=512** running one process, and another container -**{C1}** with **-c=1024** running two processes, this can result in the following -division of CPU shares: - - PID container CPU CPU share - 100 {C0} 0 100% of CPU0 - 101 {C1} 1 100% of CPU1 - 102 {C1} 2 100% of CPU2 - -**--cap-add**=[] - Add Linux capabilities - -**--cap-drop**=[] - Drop Linux capabilities - -**--cgroup-parent**="" - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. - -**--cidfile**="" - Write the container ID to the file - -**--cpu-count**=*0* - Limit the number of CPUs available for execution by the container. - - On Windows Server containers, this is approximated as a percentage of total CPU usage. - - On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. - -**--cpu-percent**=*0* - Limit the percentage of CPU available for execution by a container running on a Windows daemon. - - On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. - -**--cpu-period**=*0* - Limit the CPU CFS (Completely Fair Scheduler) period - - Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. - -**--cpuset-cpus**="" - CPUs in which to allow execution (0-3, 0,1) - -**--cpuset-mems**="" - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - - If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` -then processes in your Docker container will only use memory from the first -two memory nodes. - -**--cpu-quota**=*0* - Limit the CPU CFS (Completely Fair Scheduler) quota - - Limit the container's CPU usage. By default, containers run with the full -CPU resource. This flag tell the kernel to restrict the container's CPU usage -to the quota you specify. - -**--cpu-rt-period**=0 - Limit the CPU real-time period in microseconds - - Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. - -**--cpu-rt-runtime**=0 - Limit the CPU real-time runtime in microseconds - - Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: - Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. - - The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. - -**--cpus**=0.0 - Number of CPUs. The default is *0.0* which means no limit. - -**-d**, **--detach**=*true*|*false* - Detached mode: run the container in the background and print the new container ID. The default is *false*. - - At any time you can run **docker ps** in -the other shell to view a list of the running containers. You can reattach to a -detached container with **docker attach**. - - When attached in the tty mode, you can detach from the container (and leave it -running) using a configurable key sequence. The default sequence is `CTRL-p CTRL-q`. -You configure the key sequence using the **--detach-keys** option or a configuration file. -See **config-json(5)** for documentation on using a configuration file. - -**--detach-keys**="" - Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - -**--device**=[] - Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) - -**--device-cgroup-rule**=[] - Add a rule to the cgroup allowed devices list. - - The rule is expected to be in the format specified in the Linux kernel documentation (Documentation/cgroup-v1/devices.txt): - - type: `a` (all), `c` (char) or `b` (block) - - major and minor: either a number or `*` for all - - permission: a composition of `r` (read), `w` (write) and `m` (mknod) - - Example: `c 1:3 mr`: allow for character device with major `1` and minor `3` to be created (`m`) and read (`r`) - -**--device-read-bps**=[] - Limit read rate from a device (e.g. --device-read-bps=/dev/sda:1mb) - -**--device-read-iops**=[] - Limit read rate from a device (e.g. --device-read-iops=/dev/sda:1000) - -**--device-write-bps**=[] - Limit write rate to a device (e.g. --device-write-bps=/dev/sda:1mb) - -**--device-write-iops**=[] - Limit write rate to a device (e.g. --device-write-iops=/dev/sda:1000) - -**--dns-search**=[] - Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) - -**--dns-option**=[] - Set custom DNS options - -**--dns**=[] - Set custom DNS servers - - This option can be used to override the DNS -configuration passed to the container. Typically this is necessary when the -host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this -is the case the **--dns** flags is necessary for every run. - -**-e**, **--env**=[] - Set environment variables - - This option allows you to specify arbitrary -environment variables that are available for the process that will be launched -inside of the container. - -**--entrypoint**="" - Overwrite the default ENTRYPOINT of the image - - This option allows you to overwrite the default entrypoint of the image that -is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND -because it specifies what executable to run when the container starts, but it is -(purposely) more difficult to override. The ENTRYPOINT gives a container its -default nature or behavior, so that when you set an ENTRYPOINT you can run the -container as if it were that binary, complete with default options, and you can -pass in more options via the COMMAND. But, sometimes an operator may want to run -something else inside the container, so you can override the default ENTRYPOINT -at runtime by using a **--entrypoint** and a string to specify the new -ENTRYPOINT. - -**--env-file**=[] - Read in a line delimited file of environment variables - -**--expose**=[] - Expose a port, or a range of ports (e.g. --expose=3300-3310) informs Docker -that the container listens on the specified network ports at runtime. Docker -uses this information to interconnect containers using links and to set up port -redirection on the host system. - -**--group-add**=[] - Add additional groups to run as - -**-h**, **--hostname**="" - Container host name - - Sets the container host name that is available inside the container. - -**--help** - Print usage statement - -**--init** - Run an init inside the container that forwards signals and reaps processes - -**-i**, **--interactive**=*true*|*false* - Keep STDIN open even if not attached. The default is *false*. - - When set to true, keep stdin open even if not attached. The default is false. - -**--ip**="" - Sets the container's interface IPv4 address (e.g., 172.23.0.9) - - It can only be used in conjunction with **--network** for user-defined networks - -**--ip6**="" - Sets the container's interface IPv6 address (e.g., 2001:db8::1b99) - - It can only be used in conjunction with **--network** for user-defined networks - -**--ipc**="" - Default is to create a private IPC namespace (POSIX SysV IPC) for the container - 'container:': reuses another container shared memory, semaphores and message queues - 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. Note -that the default on Windows server is `process`, and the default on Windows client -is `hyperv`. Linux only supports `default`. - -**-l**, **--label**=[] - Set metadata on the container (e.g., --label com.example.key=value) - -**--kernel-memory**="" - Kernel memory limit (format: `[]`, where unit = b, k, m or g) - - Constrains the kernel memory available to a container. If a limit of 0 -is specified (not using `--kernel-memory`), the container's kernel memory -is not limited. If you specify a limit, it may be rounded up to a multiple -of the operating system's page size and the value can be very large, -millions of trillions. - -**--label-file**=[] - Read in a line delimited file of labels - -**--link**=[] - Add link to another container in the form of :alias or just -in which case the alias will match the name - - If the operator -uses **--link** when starting the new client container, then the client -container can access the exposed port via a private networking interface. Docker -will set some environment variables in the client container to help indicate -which interface and port to use. - -**--link-local-ip**=[] - Add one or more link-local IPv4/IPv6 addresses to the container's interface - -**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" - Logging driver for the container. Default is defined by daemon `--log-driver` flag. - **Warning**: the `docker logs` command works only for the `json-file` and - `journald` logging drivers. - -**--log-opt**=[] - Logging driver specific options. - -**-m**, **--memory**="" - Memory limit (format: [], where unit = b, k, m or g) - - Allows you to constrain the memory available to a container. If the host -supports swap memory, then the **-m** memory setting can be larger than physical -RAM. If a limit of 0 is specified (not using **-m**), the container's memory is -not limited. The actual limit may be rounded up to a multiple of the operating -system's page size (the value would be very large, that's millions of trillions). - -**--memory-reservation**="" - Memory soft limit (format: [], where unit = b, k, m or g) - - After setting memory reservation, when the system detects memory contention -or low memory, containers are forced to restrict their consumption to their -reservation. So you should always set the value below **--memory**, otherwise the -hard limit will take precedence. By default, memory reservation will be the same -as memory limit. - -**--memory-swap**="LIMIT" - A limit value equal to memory plus swap. Must be used with the **-m** -(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** -(**--memory**) value. By default, the swap `LIMIT` will be set to double -the value of --memory. - - The format of `LIMIT` is `[]`. Unit can be `b` (bytes), -`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a -unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. - -**--mac-address**="" - Container MAC address (e.g., 92:d0:c6:0a:29:33) - - Remember that the MAC address in an Ethernet network must be unique. -The IPv6 link-local address will be based on the device's MAC address -according to RFC4862. - -**--mount**=[*[type=TYPE[,TYPE-SPECIFIC-OPTIONS]]*] - Attach a filesystem mount to the container - - Current supported mount `TYPES` are `bind`, `volume`, and `tmpfs`. - - e.g. - - `type=bind,source=/path/on/host,destination=/path/in/container` - - `type=volume,source=my-volume,destination=/path/in/container,volume-label="color=red",volume-label="shape=round"` - - `type=tmpfs,tmpfs-size=512M,destination=/path/in/container` - - Common Options: - - * `src`, `source`: mount source spec for `bind` and `volume`. Mandatory for `bind`. - * `dst`, `destination`, `target`: mount destination spec. - * `ro`, `read-only`: `true` or `false` (default). - - Options specific to `bind`: - - * `bind-propagation`: `shared`, `slave`, `private`, `rshared`, `rslave`, or `rprivate`(default). See also `mount(2)`. - * `consistency`: `consistent`(default), `cached`, or `delegated`. Currently, only effective for Docker for Mac. - - Options specific to `volume`: - - * `volume-driver`: Name of the volume-driver plugin. - * `volume-label`: Custom metadata. - * `volume-nocopy`: `true`(default) or `false`. If set to `false`, the Engine copies existing files and directories under the mount-path into the volume, allowing the host to access them. - * `volume-opt`: specific to a given volume driver. - - Options specific to `tmpfs`: - - * `tmpfs-size`: Size of the tmpfs mount in bytes. Unlimited by default in Linux. - * `tmpfs-mode`: File mode of the tmpfs in octal. (e.g. `700` or `0700`.) Defaults to `1777` in Linux. - -**--name**="" - Assign a name to the container - - The operator can identify a container in three ways: - UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) - UUID short identifier (“f78375b1c487”) - Name (“jonah”) - - The UUID identifiers come from the Docker daemon, and if a name is not assigned -to the container with **--name** then the daemon will also generate a random -string name. The name is useful when defining links (see **--link**) (or any -other place you need to identify a container). This works for both background -and foreground Docker containers. - -**--network**="*bridge*" - Set the Network mode for the container - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. - '|': connect to a user-defined network - -**--network-alias**=[] - Add network-scoped alias for the container - -**--oom-kill-disable**=*true*|*false* - Whether to disable OOM Killer for the container or not. - -**--oom-score-adj**="" - Tune the host's OOM preferences for containers (accepts -1000 to 1000) - -**-P**, **--publish-all**=*true*|*false* - Publish all exposed ports to random ports on the host interfaces. The default is *false*. - - When set to true publish all exposed ports to the host interfaces. The -default is false. If the operator uses -P (or -p) then Docker will make the -exposed port accessible on the host and the ports will be available to any -client that can reach the host. When using -P, Docker will bind any exposed -port to a random port on the host within an *ephemeral port range* defined by -`/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host -ports and the exposed ports, use `docker port`. - -**-p**, **--publish**=[] - Publish a container's port, or range of ports, to the host. - - Format: `ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort` -Both hostPort and containerPort can be specified as a range of ports. -When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. -(e.g., `docker run -p 1234-1236:1222-1224 --name thisWorks -t busybox` -but not `docker run -p 1230-1236:1230-1240 --name RangeContainerPortsBiggerThanRangeHostPorts -t busybox`) -With ip: `docker run -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT --name CONTAINER -t someimage` -Use `docker port` to see the actual mapping: `docker port CONTAINER $CONTAINERPORT` - -**--pid**="" - Set the PID mode for the container - Default is to create a private PID namespace for the container - 'container:': join another container's PID namespace - 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. - -**--userns**="" - Set the usernamespace mode for the container when `userns-remap` option is enabled. - **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). - -**--pids-limit**="" - Tune the container's pids limit. Set `-1` to have unlimited pids for the container. - -**--uts**=*host* - Set the UTS mode for the container - **host**: use the host's UTS namespace inside the container. - Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. - -**--privileged**=*true*|*false* - Give extended privileges to this container. The default is *false*. - - By default, Docker containers are -“unprivileged” (=false) and cannot, for example, run a Docker daemon inside the -Docker container. This is because by default a container is not allowed to -access any devices. A “privileged” container is given access to all devices. - - When the operator executes **docker run --privileged**, Docker will enable access -to all devices on the host as well as set some configuration in AppArmor to -allow the container nearly all the same access to the host as processes running -outside of a container on the host. - -**--read-only**=*true*|*false* - Mount the container's root filesystem as read only. - - By default a container will have its root filesystem writable allowing processes -to write files anywhere. By specifying the `--read-only` flag the container will have -its root filesystem mounted as read only prohibiting any writes. - -**--restart**="*no*" - Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). - -**--rm**=*true*|*false* - Automatically remove the container when it exits. The default is *false*. - `--rm` flag can work together with `-d`, and auto-removal will be done on daemon side. Note that it's -incompatible with any restart policy other than `none`. - -**--security-opt**=[] - Security Options - - "label=user:USER" : Set the label user for the container - "label=role:ROLE" : Set the label role for the container - "label=type:TYPE" : Set the label type for the container - "label=level:LEVEL" : Set the label level for the container - "label=disable" : Turn off label confinement for the container - "no-new-privileges" : Disable container processes from gaining additional privileges - - "seccomp=unconfined" : Turn off seccomp confinement for the container - "seccomp=profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter - - "apparmor=unconfined" : Turn off apparmor confinement for the container - "apparmor=your-profile" : Set the apparmor confinement profile for the container - -**--storage-opt**=[] - Storage driver options per container - - $ docker run -it --storage-opt size=120G fedora /bin/bash - - This (size) will allow to set the container rootfs size to 120G at creation time. - This option is only available for the `devicemapper`, `btrfs`, `overlay2` and `zfs` graph drivers. - For the `devicemapper`, `btrfs` and `zfs` storage drivers, user cannot pass a size less than the Default BaseFS Size. - For the `overlay2` storage driver, the size option is only available if the backing fs is `xfs` and mounted with the `pquota` mount option. - Under these conditions, user can pass any size less then the backing fs size. - -**--stop-signal**=*SIGTERM* - Signal to stop a container. Default is SIGTERM. - -**--stop-timeout**=*10* - Timeout (in seconds) to stop a container. Default is 10. - -**--shm-size**="" - Size of `/dev/shm`. The format is ``. - `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes). - If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. - -**--sysctl**=SYSCTL - Configure namespaced kernel parameters at runtime - - IPC Namespace - current sysctls allowed: - - kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced - Sysctls beginning with fs.mqueue.* - - If you use the `--ipc=host` option these sysctls will not be allowed. - - Network Namespace - current sysctls allowed: - Sysctls beginning with net.* - - If you use the `--network=host` option these sysctls will not be allowed. - -**--sig-proxy**=*true*|*false* - Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. - -**--memory-swappiness**="" - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - -**-t**, **--tty**=*true*|*false* - Allocate a pseudo-TTY. The default is *false*. - - When set to true Docker can allocate a pseudo-tty and attach to the standard -input of any container. This can be used, for example, to run a throwaway -interactive shell. The default is false. - -The **-t** option is incompatible with a redirection of the docker client -standard input. - -**--tmpfs**=[] Create a tmpfs mount - - Mount a temporary filesystem (`tmpfs`) mount into a container, for example: - - $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image - - This command mounts a `tmpfs` at `/tmp` within the container. The supported mount -options are the same as the Linux default `mount` flags. If you do not specify -any options, the systems uses the following options: -`rw,noexec,nosuid,nodev,size=65536k`. - - See also `--mount`, which is the successor of `--tmpfs` and `--volume`. - Even though there is no plan to deprecate `--tmpfs`, usage of `--mount` is recommended. - -**-u**, **--user**="" - Sets the username or UID used and optionally the groupname or GID for the specified command. - - The followings examples are all valid: - --user [user | user:group | uid | uid:gid | user:gid | uid:group ] - - Without this argument the command will be run as root in the container. - -**--ulimit**=[] - Ulimit options - -**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] - Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker - bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker - container. If 'HOST-DIR' is omitted, Docker automatically creates the new - volume on the host. The `OPTIONS` are a comma delimited list and can be: - - * [rw|ro] - * [z|Z] - * [`[r]shared`|`[r]slave`|`[r]private`] - * [`delegated`|`cached`|`consistent`] - * [nocopy] - -The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` -can be an absolute path or a `name` value. A `name` value must start with an -alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or -`-` (hyphen). An absolute path starts with a `/` (forward slash). - -If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the -path you specify. If you supply a `name`, Docker creates a named volume by that -`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` -value. If you supply the `/foo` value, Docker creates a bind-mount. If you -supply the `foo` specification, Docker creates a named volume. - -You can specify multiple **-v** options to mount one or more mounts to a -container. To use these same mounts in other containers, specify the -**--volumes-from** option also. - -You can supply additional options for each bind-mount following an additional -colon. A `:ro` or `:rw` suffix mounts a volume in read-only or read-write -mode, respectively. By default, volumes are mounted in read-write mode. -You can also specify the consistency requirement for the mount, either -`:consistent` (the default), `:cached`, or `:delegated`. Multiple options are -separated by commas, e.g. `:ro,cached`. - -Labeling systems like SELinux require that proper labels are placed on volume -content mounted into a container. Without a label, the security system might -prevent the processes running inside the container from using the content. By -default, Docker does not change the labels set by the OS. - -To change a label in the container context, you can add either of two suffixes -`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file -objects on the shared volumes. The `z` option tells Docker that two containers -share the volume content. As a result, Docker labels the content with a shared -content label. Shared volume labels allow all containers to read/write content. -The `Z` option tells Docker to label the content with a private unshared label. -Only the current container can use a private volume. - -By default bind mounted volumes are `private`. That means any mounts done -inside container will not be visible on host and vice-a-versa. One can change -this behavior by specifying a volume mount propagation property. Making a -volume `shared` mounts done under that volume inside container will be -visible on host and vice-a-versa. Making a volume `slave` enables only one -way mount propagation and that is mounts done on host under that volume -will be visible inside container but not the other way around. - -To control mount propagation property of volume one can use `:[r]shared`, -`:[r]slave` or `:[r]private` propagation flag. Propagation property can -be specified only for bind mounted volumes and not for internal volumes or -named volumes. For mount propagation to work source mount point (mount point -where source dir is mounted on) has to have right propagation properties. For -shared volumes, source mount point has to be shared. And for slave volumes, -source mount has to be either shared or slave. - -Use `df ` to figure out the source mount and then use -`findmnt -o TARGET,PROPAGATION ` to figure out propagation -properties of source mount. If `findmnt` utility is not available, then one -can look at mount entry for source mount point in `/proc/self/mountinfo`. Look -at `optional fields` and see if any propagation properties are specified. -`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if -nothing is there that means mount is `private`. - -To change propagation properties of a mount point use `mount` command. For -example, if one wants to bind mount source directory `/foo` one can do -`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This -will convert /foo into a `shared` mount point. Alternatively one can directly -change propagation properties of source mount. Say `/` is source mount for -`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. - -> **Note**: -> When using systemd to manage the Docker daemon's start and stop, in the systemd -> unit file there is an option to control mount propagation for the Docker daemon -> itself, called `MountFlags`. The value of this setting may cause Docker to not -> see mount propagation changes made on the mount point. For example, if this value -> is `slave`, you may not be able to use the `shared` or `rshared` propagation on -> a volume. - -To disable automatic copying of data from the container path to the volume, use -the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. - -See also `--mount`, which is the successor of `--tmpfs` and `--volume`. -Even though there is no plan to deprecate `--volume`, usage of `--mount` is recommended. - -**--volume-driver**="" - Container's volume driver. This driver creates volumes specified either from - a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. - See **docker-volume-create(1)** for full details. - -**--volumes-from**=[] - Mount volumes from the specified container(s) - - Mounts already mounted volumes from a source container onto another - container. You must supply the source's container-id. To share - a volume, use the **--volumes-from** option when running - the target container. You can share volumes even if the source container - is not running. - - By default, Docker mounts the volumes in the same mode (read-write or - read-only) as it is mounted in the source container. Optionally, you - can change this by suffixing the container-id with either the `:ro` or - `:rw ` keyword. - - If the location of the volume from the source container overlaps with - data residing on a target container, then the volume hides - that data on the target. - -**-w**, **--workdir**="" - Working directory inside the container - - The default working directory for -running binaries within a container is the root directory (/). The developer can -set a different default with the Dockerfile WORKDIR instruction. The operator -can override the working directory by using the **-w** option. - -# Exit Status - -The exit code from `docker run` gives information about why the container -failed to run or why it exited. When `docker run` exits with a non-zero code, -the exit codes follow the `chroot` standard, see below: - -**_125_** if the error is with Docker daemon **_itself_** - - $ docker run --foo busybox; echo $? - # flag provided but not defined: --foo - See 'docker run --help'. - 125 - -**_126_** if the **_contained command_** cannot be invoked - - $ docker run busybox /etc; echo $? - # exec: "/etc": permission denied - docker: Error response from daemon: Contained command could not be invoked - 126 - -**_127_** if the **_contained command_** cannot be found - - $ docker run busybox foo; echo $? - # exec: "foo": executable file not found in $PATH - docker: Error response from daemon: Contained command not found or does not exist - 127 - -**_Exit code_** of **_contained command_** otherwise - - $ docker run busybox /bin/sh -c 'exit 3' - # 3 - -# EXAMPLES - -## Running container in read-only mode - -During container image development, containers often need to write to the image -content. Installing packages into /usr, for example. In production, -applications seldom need to write to the image. Container applications write -to volumes if they need to write to file systems at all. Applications can be -made more secure by running them in read-only mode using the --read-only switch. -This protects the containers image from modification. Read only containers may -still need to write temporary data. The best way to handle this is to mount -tmpfs directories on /run and /tmp. - - # docker run --read-only --tmpfs /run --tmpfs /tmp -i -t fedora /bin/bash - -## Exposing log messages from the container to the host's log - -If you want messages that are logged in your container to show up in the host's -syslog/journal then you should bind mount the /dev/log directory as follows. - - # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash - -From inside the container you can test this by sending a message to the log. - - (bash)# logger "Hello from my container" - -Then exit and check the journal. - - # exit - - # journalctl -b | grep Hello - -This should list the message sent to logger. - -## Attaching to one or more from STDIN, STDOUT, STDERR - -If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) -. You can specify to which of the three standard streams (stdin, stdout, stderr) -you'd like to connect instead, as in: - - # docker run -a stdin -a stdout -i -t fedora /bin/bash - -## Sharing IPC between containers - -Using shm_server.c available here: https://www.cs.cf.ac.uk/Dave/C/node27.html - -Testing `--ipc=host` mode: - -Host shows a shared memory segment with 7 pids attached, happens to be from httpd: - -``` - $ sudo ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x01128e25 0 root 600 1000 7 -``` - -Now run a regular container, and it correctly does NOT see the shared memory segment from the host: - -``` - $ docker run -it shm ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status -``` - -Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: - - ``` - $ docker run -it --ipc=host shm ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x01128e25 0 root 600 1000 7 -``` -Testing `--ipc=container:CONTAINERID` mode: - -Start a container with a program to create a shared memory segment: -``` - $ docker run -it shm bash - $ sudo shm/shm_server & - $ sudo ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x0000162e 0 root 666 27 1 -``` -Create a 2nd container correctly shows no shared memory segment from 1st container: -``` - $ docker run shm ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status -``` - -Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: - -``` - $ docker run -it --ipc=container:ed735b2264ac shm ipcs -m - $ sudo ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x0000162e 0 root 666 27 1 -``` - -## Linking Containers - -> **Note**: This section describes linking between containers on the -> default (bridge) network, also known as "legacy links". Using `--link` -> on user-defined networks uses the DNS-based discovery, which does not add -> entries to `/etc/hosts`, and does not set environment variables for -> discovery. - -The link feature allows multiple containers to communicate with each other. For -example, a container whose Dockerfile has exposed port 80 can be run and named -as follows: - - # docker run --name=link-test -d -i -t fedora/httpd - -A second container, in this case called linker, can communicate with the httpd -container, named link-test, by running with the **--link=:** - - # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash - -Now the container linker is linked to container link-test with the alias lt. -Running the **env** command in the linker container shows environment variables - with the LT (alias) context (**LT_**) - - # env - HOSTNAME=668231cb0978 - TERM=xterm - LT_PORT_80_TCP=tcp://172.17.0.3:80 - LT_PORT_80_TCP_PORT=80 - LT_PORT_80_TCP_PROTO=tcp - LT_PORT=tcp://172.17.0.3:80 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PWD=/ - LT_NAME=/linker/lt - SHLVL=1 - HOME=/ - LT_PORT_80_TCP_ADDR=172.17.0.3 - _=/usr/bin/env - -When linking two containers Docker will use the exposed ports of the container -to create a secure tunnel for the parent to access. - -If a container is connected to the default bridge network and `linked` -with other containers, then the container's `/etc/hosts` file is updated -with the linked container's name. - -> **Note** Since Docker may live update the container's `/etc/hosts` file, there -may be situations when processes inside the container can end up reading an -empty or incomplete `/etc/hosts` file. In most cases, retrying the read again -should fix the problem. - - -## Mapping Ports for External Usage - -The exposed port of an application can be mapped to a host port using the **-p** -flag. For example, an httpd port 80 can be mapped to the host port 8080 using the -following: - - # docker run -p 8080:80 -d -i -t fedora/httpd - -## Creating and Mounting a Data Volume Container - -Many applications require the sharing of persistent data across several -containers. Docker allows you to create a Data Volume Container that other -containers can mount from. For example, create a named container that contains -directories /var/volume1 and /tmp/volume2. The image will need to contain these -directories so a couple of RUN mkdir instructions might be required for you -fedora-data image: - - # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true - # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash - -Multiple --volumes-from parameters will bring together multiple data volumes from -multiple containers. And it's possible to mount the volumes that came from the -DATA container in yet another container via the fedora-container1 intermediary -container, allowing to abstract the actual data source from users of that data: - - # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash - -## Mounting External Volumes - -To mount a host directory as a container volume, specify the absolute path to -the directory and the absolute path for the container directory separated by a -colon: - - # docker run -v /var/db:/data1 -i -t fedora bash - -When using SELinux, be aware that the host has no knowledge of container SELinux -policy. Therefore, in the above example, if SELinux policy is enforced, the -`/var/db` directory is not writable to the container. A "Permission Denied" -message will occur and an avc: message in the host's syslog. - - -To work around this, at time of writing this man page, the following command -needs to be run in order for the proper SELinux policy type label to be attached -to the host directory: - - # chcon -Rt svirt_sandbox_file_t /var/db - - -Now, writing to the /data1 volume in the container will be allowed and the -changes will also be reflected on the host in /var/db. - -## Using alternative security labeling - -You can override the default labeling scheme for each container by specifying -the `--security-opt` flag. For example, you can specify the MCS/MLS level, a -requirement for MLS systems. Specifying the level in the following command -allows you to share the same content between containers. - - # docker run --security-opt label=level:s0:c100,c200 -i -t fedora bash - -An MLS example might be: - - # docker run --security-opt label=level:TopSecret -i -t rhel7 bash - -To disable the security labeling for this container versus running with the -`--permissive` flag, use the following command: - - # docker run --security-opt label=disable -i -t fedora bash - -If you want a tighter security policy on the processes within a container, -you can specify an alternate type for the container. You could run a container -that is only allowed to listen on Apache ports by executing the following -command: - - # docker run --security-opt label=type:svirt_apache_t -i -t centos bash - -Note: - -You would have to write policy defining a `svirt_apache_t` type. - -## Setting device weight - -If you want to set `/dev/sda` device weight to `200`, you can specify the device -weight by `--blkio-weight-device` flag. Use the following command: - - # docker run -it --blkio-weight-device "/dev/sda:200" ubuntu - -## Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Microsoft Windows. The `--isolation ` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. These two commands are equivalent on Linux: - -``` -$ docker run -d busybox top -$ docker run -d --isolation default busybox top -``` - -On Microsoft Windows, can take any of these values: - -* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. -* `process`: Namespace isolation only. -* `hyperv`: Hyper-V hypervisor partition-based isolation. - -In practice, when running on Microsoft Windows without a `daemon` option set, these two commands are equivalent: - -``` -$ docker run -d --isolation default busybox top -$ docker run -d --isolation process busybox top -``` - -If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, any of these commands also result in `hyperv` isolation: - -``` -$ docker run -d --isolation default busybox top -$ docker run -d --isolation hyperv busybox top -``` - -## Setting Namespaced Kernel Parameters (Sysctls) - -The `--sysctl` sets namespaced kernel parameters (sysctls) in the -container. For example, to turn on IP forwarding in the containers -network namespace, run this command: - - $ docker run --sysctl net.ipv4.ip_forward=1 someimage - -Note: - -Not all sysctls are namespaced. Docker does not support changing sysctls -inside of a container that also modify the host system. As the kernel -evolves we expect to see more sysctls become namespaced. - -See the definition of the `--sysctl` option above for the current list of -supported sysctls. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -November 2015, updated by Sally O'Malley diff --git a/fn/vendor/github.com/docker/docker/man/docker.1.md b/fn/vendor/github.com/docker/docker/man/docker.1.md deleted file mode 100644 index abedb45ca..000000000 --- a/fn/vendor/github.com/docker/docker/man/docker.1.md +++ /dev/null @@ -1,70 +0,0 @@ -% DOCKER(1) Docker User Manuals -% William Henry -% APRIL 2014 -# NAME -docker \- Docker image and container command line interface - -# SYNOPSIS -**docker** [OPTIONS] COMMAND [ARG...] - -**docker** [--help|-v|--version] - -# DESCRIPTION -**docker** is a client for interacting with the daemon (see **dockerd(8)**) through the CLI. - -The Docker CLI has over 30 commands. The commands are listed below and each has -its own man page which explain usage and arguments. - -To see the man page for a command run **man docker **. - -# OPTIONS -**--help** - Print usage statement - -**--config**="" - Specifies the location of the Docker client configuration files. The default is '~/.docker'. - -**-D**, **--debug**=*true*|*false* - Enable debug mode. Default is false. - -**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host]:[port][path] to bind or -unix://[/path/to/socket] to use. - The socket(s) to bind to in daemon mode specified using one or more - tcp://host:port/path, unix:///path/to/socket, fd://* or fd://socketfd. - If the tcp port is not specified, then it will default to either `2375` when - `--tls` is off, or `2376` when `--tls` is on, or `--tlsverify` is specified. - -**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" - Set the logging level. Default is `info`. - -**--tls**=*true*|*false* - Use TLS; implied by --tlsverify. Default is false. - -**--tlscacert**=*~/.docker/ca.pem* - Trust certs signed only by this CA. - -**--tlscert**=*~/.docker/cert.pem* - Path to TLS certificate file. - -**--tlskey**=*~/.docker/key.pem* - Path to TLS key file. - -**--tlsverify**=*true*|*false* - Use TLS and verify the remote (daemon: verify client, client: verify daemon). - Default is false. - -**-v**, **--version**=*true*|*false* - Print version information and quit. Default is false. - -# COMMANDS - -Use "docker help" or "docker --help" to get an overview of available commands. - -# EXAMPLES -For specific client examples please see the man page for the specific Docker -command. For example: - - man docker-run - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. diff --git a/fn/vendor/github.com/docker/docker/man/dockerd.8.md b/fn/vendor/github.com/docker/docker/man/dockerd.8.md deleted file mode 100644 index 8304a45dd..000000000 --- a/fn/vendor/github.com/docker/docker/man/dockerd.8.md +++ /dev/null @@ -1,734 +0,0 @@ -% DOCKER(8) Docker User Manuals -% Shishir Mahajan -% SEPTEMBER 2015 -# NAME -dockerd - Enable daemon mode - -# SYNOPSIS -**dockerd** -[**--add-runtime**[=*[]*]] -[**--api-cors-header**=[=*API-CORS-HEADER*]] -[**--authorization-plugin**[=*[]*]] -[**-b**|**--bridge**[=*BRIDGE*]] -[**--bip**[=*BIP*]] -[**--cgroup-parent**[=*[]*]] -[**--cluster-store**[=*[]*]] -[**--cluster-advertise**[=*[]*]] -[**--cluster-store-opt**[=*map[]*]] -[**--config-file**[=*/etc/docker/daemon.json*]] -[**--containerd**[=*SOCKET-PATH*]] -[**--data-root**[=*/var/lib/docker*]] -[**-D**|**--debug**] -[**--default-gateway**[=*DEFAULT-GATEWAY*]] -[**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]] -[**--default-runtime**[=*runc*]] -[**--default-shm-size**[=*64MiB*]] -[**--default-ulimit**[=*[]*]] -[**--disable-legacy-registry**] -[**--dns**[=*[]*]] -[**--dns-opt**[=*[]*]] -[**--dns-search**[=*[]*]] -[**--exec-opt**[=*[]*]] -[**--exec-root**[=*/var/run/docker*]] -[**--experimental**[=*false*]] -[**--fixed-cidr**[=*FIXED-CIDR*]] -[**--fixed-cidr-v6**[=*FIXED-CIDR-V6*]] -[**-G**|**--group**[=*docker*]] -[**-H**|**--host**[=*[]*]] -[**--help**] -[**--icc**[=*true*]] -[**--init**[=*false*]] -[**--init-path**[=*""*]] -[**--insecure-registry**[=*[]*]] -[**--ip**[=*0.0.0.0*]] -[**--ip-forward**[=*true*]] -[**--ip-masq**[=*true*]] -[**--iptables**[=*true*]] -[**--ipv6**] -[**--isolation**[=*default*]] -[**-l**|**--log-level**[=*info*]] -[**--label**[=*[]*]] -[**--live-restore**[=*false*]] -[**--log-driver**[=*json-file*]] -[**--log-opt**[=*map[]*]] -[**--mtu**[=*0*]] -[**--max-concurrent-downloads**[=*3*]] -[**--max-concurrent-uploads**[=*5*]] -[**-p**|**--pidfile**[=*/var/run/docker.pid*]] -[**--raw-logs**] -[**--registry-mirror**[=*[]*]] -[**-s**|**--storage-driver**[=*STORAGE-DRIVER*]] -[**--seccomp-profile**[=*SECCOMP-PROFILE-PATH*]] -[**--selinux-enabled**] -[**--shutdown-timeout**[=*15*]] -[**--storage-opt**[=*[]*]] -[**--swarm-default-advertise-addr**[=*IP|INTERFACE*]] -[**--tls**] -[**--tlscacert**[=*~/.docker/ca.pem*]] -[**--tlscert**[=*~/.docker/cert.pem*]] -[**--tlskey**[=*~/.docker/key.pem*]] -[**--tlsverify**] -[**--userland-proxy**[=*true*]] -[**--userland-proxy-path**[=*""*]] -[**--userns-remap**[=*default*]] - -# DESCRIPTION -**dockerd** is used for starting the Docker daemon (i.e., to command the daemon -to manage images, containers etc). So **dockerd** is a server, as a daemon. - -To run the Docker daemon you can specify **dockerd**. -You can check the daemon options using **dockerd --help**. -Daemon options should be specified after the **dockerd** keyword in the -following format. - -**dockerd [OPTIONS]** - -# OPTIONS - -**--add-runtime**=[] - Runtimes can be registered with the daemon either via the -configuration file or using the `--add-runtime` command line argument. - - The following is an example adding 2 runtimes via the configuration: - -```json -{ - "default-runtime": "runc", - "runtimes": { - "runc": { - "path": "runc" - }, - "custom": { - "path": "/usr/local/bin/my-runc-replacement", - "runtimeArgs": [ - "--debug" - ] - } - } -} -``` - - This is the same example via the command line: - -```bash -$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement -``` - - **Note**: defining runtime arguments via the command line is not supported. - -**--api-cors-header**="" - Set CORS headers in the Engine API. Default is cors disabled. Give urls like - "http://foo, http://bar, ...". Give "*" to allow all. - -**--authorization-plugin**="" - Set authorization plugins to load - -**-b**, **--bridge**="" - Attach containers to a pre\-existing network bridge; use 'none' to disable - container networking - -**--bip**="" - Use the provided CIDR notation address for the dynamically created bridge - (docker0); Mutually exclusive of \-b - -**--cgroup-parent**="" - Set parent cgroup for all containers. Default is "/docker" for fs cgroup - driver and "system.slice" for systemd cgroup driver. - -**--cluster-store**="" - URL of the distributed storage backend - -**--cluster-advertise**="" - Specifies the 'host:port' or `interface:port` combination that this - particular daemon instance should use when advertising itself to the cluster. - The daemon is reached through this value. - -**--cluster-store-opt**="" - Specifies options for the Key/Value store. - -**--config-file**="/etc/docker/daemon.json" - Specifies the JSON file path to load the configuration from. - -**--containerd**="" - Path to containerd socket. - -**--data-root**="" - Path to the directory used to store persisted Docker data such as - configuration for resources, swarm cluster state, and filesystem data for - images, containers, and local volumes. Default is `/var/lib/docker`. - -**-D**, **--debug**=*true*|*false* - Enable debug mode. Default is false. - -**--default-gateway**="" - IPv4 address of the container default gateway; this address must be part of - the bridge subnet (which is defined by \-b or \--bip) - -**--default-gateway-v6**="" - IPv6 address of the container default gateway - -**--default-runtime**="runc" - Set default runtime if there're more than one specified by `--add-runtime`. - -**--default-shm-size**=*64MiB* - Set the daemon-wide default shm size for containers. Default is `64MiB`. - -**--default-ulimit**=[] - Default ulimits for containers. - -**--disable-legacy-registry**=*true*|*false* - Disable contacting legacy registries - -**--dns**="" - Force Docker to use specific DNS servers - -**--dns-opt**="" - DNS options to use. - -**--dns-search**=[] - DNS search domains to use. - -**--exec-opt**=[] - Set runtime execution options. See RUNTIME EXECUTION OPTIONS. - -**--exec-root**="" - Path to use as the root of the Docker execution state files. Default is - `/var/run/docker`. - -**--experimental**="" - Enable the daemon experimental features. - -**--fixed-cidr**="" - IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in - the bridge subnet (which is defined by \-b or \-\-bip). - -**--fixed-cidr-v6**="" - IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64) - -**-G**, **--group**="" - Group to assign the unix socket specified by -H when running in daemon mode. - use '' (the empty string) to disable setting of a group. Default is `docker`. - -**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host:port] to bind or -unix://[/path/to/socket] to use. - The socket(s) to bind to in daemon mode specified using one or more - tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - -**--help** - Print usage statement - -**--icc**=*true*|*false* - Allow unrestricted inter\-container and Docker daemon host communication. If - disabled, containers can still be linked together using the **--link** option - (see **docker-run(1)**). Default is true. - -**--init** - Run an init process inside containers for signal forwarding and process - reaping. - -**--init-path** - Path to the docker-init binary. - -**--insecure-registry**=[] - Enable insecure registry communication, i.e., enable un-encrypted and/or - untrusted communication. - - List of insecure registries can contain an element with CIDR notation to - specify a whole subnet. Insecure registries accept HTTP and/or accept HTTPS - with certificates from unknown CAs. - - Enabling `--insecure-registry` is useful when running a local registry. - However, because its use creates security vulnerabilities it should ONLY be - enabled for testing purposes. For increased security, users should add their - CA to their system's list of trusted CAs instead of using - `--insecure-registry`. - -**--ip**="" - Default IP address to use when binding container ports. Default is `0.0.0.0`. - -**--ip-forward**=*true*|*false* - Enables IP forwarding on the Docker host. The default is `true`. This flag - interacts with the IP forwarding setting on your host system's kernel. If - your system has IP forwarding disabled, this setting enables it. If your - system has IP forwarding enabled, setting this flag to `--ip-forward=false` - has no effect. - - This setting will also enable IPv6 forwarding if you have both - `--ip-forward=true` and `--fixed-cidr-v6` set. Note that this may reject - Router Advertisements and interfere with the host's existing IPv6 - configuration. For more information, please consult the documentation about - "Advanced Networking - IPv6". - -**--ip-masq**=*true*|*false* - Enable IP masquerading for bridge's IP range. Default is true. - -**--iptables**=*true*|*false* - Enable Docker's addition of iptables rules. Default is true. - -**--ipv6**=*true*|*false* - Enable IPv6 support. Default is false. Docker will create an IPv6-enabled - bridge with address fe80::1 which will allow you to create IPv6-enabled - containers. Use together with `--fixed-cidr-v6` to provide globally routable - IPv6 addresses. IPv6 forwarding will be enabled if not used with - `--ip-forward=false`. This may collide with your host's current IPv6 - settings. For more information please consult the documentation about - "Advanced Networking - IPv6". - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. - Note that the default on Windows server is `process`, and the default on - Windows client is `hyperv`. Linux only supports `default`. - -**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" - Set the logging level. Default is `info`. - -**--label**="[]" - Set key=value labels to the daemon (displayed in `docker info`) - -**--live-restore**=*false* - Enable live restore of running containers when the daemon starts so that they - are not restarted. This option is applicable only for docker daemon running - on Linux host. - -**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" - Default driver for container logs. Default is `json-file`. - **Warning**: `docker logs` command works only for `json-file` logging driver. - -**--log-opt**=[] - Logging driver specific options. - -**--mtu**=*0* - Set the containers network mtu. Default is `0`. - -**--max-concurrent-downloads**=*3* - Set the max concurrent downloads for each pull. Default is `3`. - -**--max-concurrent-uploads**=*5* - Set the max concurrent uploads for each push. Default is `5`. - -**-p**, **--pidfile**="" - Path to use for daemon PID file. Default is `/var/run/docker.pid` - -**--raw-logs** - Output daemon logs in full timestamp format without ANSI coloring. If this - flag is not set, the daemon outputs condensed, colorized logs if a terminal - is detected, or full ("raw") output otherwise. - -**--registry-mirror**=*://* - Prepend a registry mirror to be used for image pulls. May be specified - multiple times. - -**-s**, **--storage-driver**="" - Force the Docker runtime to use a specific storage driver. - -**--seccomp-profile**="" - Path to seccomp profile. - -**--selinux-enabled**=*true*|*false* - Enable selinux support. Default is false. - -**--shutdown-timeout**=*15* - Set the shutdown timeout value in seconds. Default is `15`. - -**--storage-opt**=[] - Set storage driver options. See STORAGE DRIVER OPTIONS. - -**--swarm-default-advertise-addr**=*IP|INTERFACE* - Set default address or interface for swarm to advertise as its - externally-reachable address to other cluster members. This can be a - hostname, an IP address, or an interface such as `eth0`. A port cannot be - specified with this option. - -**--tls**=*true*|*false* - Use TLS; implied by --tlsverify. Default is false. - -**--tlscacert**=*~/.docker/ca.pem* - Trust certs signed only by this CA. - -**--tlscert**=*~/.docker/cert.pem* - Path to TLS certificate file. - -**--tlskey**=*~/.docker/key.pem* - Path to TLS key file. - -**--tlsverify**=*true*|*false* - Use TLS and verify the remote (daemon: verify client, client: verify daemon). - Default is false. - -**--userland-proxy**=*true*|*false* - Rely on a userland proxy implementation for inter-container and - outside-to-container loopback communications. Default is true. - -**--userland-proxy-path**="" - Path to the userland proxy binary. - -**--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid* - Enable user namespaces for containers on the daemon. Specifying "default" - will cause a new user and group to be created to handle UID and GID range - remapping for the user namespace mappings used for contained processes. - Specifying a user (or uid) and optionally a group (or gid) will cause the - daemon to lookup the user and group's subordinate ID ranges for use as the - user namespace mappings for contained processes. - -# STORAGE DRIVER OPTIONS - -Docker uses storage backends (known as "graphdrivers" in the Docker -internals) to create writable containers from images. Many of these -backends use operating system level technologies and can be -configured. - -Specify options to the storage backend with **--storage-opt** flags. The -backends that currently take options are *devicemapper*, *zfs* and *btrfs*. -Options for *devicemapper* are prefixed with *dm*, options for *zfs* -start with *zfs* and options for *btrfs* start with *btrfs*. - -Specifically for devicemapper, the default is a "loopback" model which -requires no pre-configuration, but is extremely inefficient. Do not -use it in production. - -To make the best use of Docker with the devicemapper backend, you must -have a recent version of LVM. Use `lvm` to create a thin pool; for -more information see `man lvmthin`. Then, use `--storage-opt -dm.thinpooldev` to tell the Docker engine to use that pool for -allocating images and container snapshots. - -## Devicemapper options - -#### dm.thinpooldev - -Specifies a custom block storage device to use for the thin pool. - -If using a block device for device mapper storage, it is best to use `lvm` -to create and manage the thin-pool volume. This volume is then handed to Docker -to exclusively create snapshot volumes needed for images and containers. - -Managing the thin-pool outside of Engine makes for the most feature-rich -method of having Docker utilize device mapper thin provisioning as the -backing storage for Docker containers. The highlights of the lvm-based -thin-pool management feature include: automatic or interactive thin-pool -resize support, dynamically changing thin-pool features, automatic thinp -metadata checking when lvm activates the thin-pool, etc. - -As a fallback if no thin pool is provided, loopback files are -created. Loopback is very slow, but can be used without any -pre-configuration of storage. It is strongly recommended that you do -not use loopback in production. Ensure your Engine daemon has a -`--storage-opt dm.thinpooldev` argument provided. - -Example use: - - $ dockerd \ - --storage-opt dm.thinpooldev=/dev/mapper/thin-pool - -#### dm.basesize - -Specifies the size to use when creating the base device, which limits -the size of images and containers. The default value is 10G. Note, -thin devices are inherently "sparse", so a 10G device which is mostly -empty doesn't use 10 GB of space on the pool. However, the filesystem -will use more space for base images the larger the device -is. - -The base device size can be increased at daemon restart which will allow -all future images and containers (based on those new images) to be of the -new base device size. - -Example use: `dockerd --storage-opt dm.basesize=50G` - -This will increase the base device size to 50G. The Docker daemon will throw an -error if existing base device size is larger than 50G. A user can use -this option to expand the base device size however shrinking is not permitted. - -This value affects the system-wide "base" empty filesystem that may already -be initialized and inherited by pulled images. Typically, a change to this -value requires additional steps to take effect: - - $ sudo service docker stop - $ sudo rm -rf /var/lib/docker - $ sudo service docker start - -Example use: `dockerd --storage-opt dm.basesize=20G` - -#### dm.fs - -Specifies the filesystem type to use for the base device. The -supported options are `ext4` and `xfs`. The default is `ext4`. - -Example use: `dockerd --storage-opt dm.fs=xfs` - -#### dm.mkfsarg - -Specifies extra mkfs arguments to be used when creating the base device. - -Example use: `dockerd --storage-opt "dm.mkfsarg=-O ^has_journal"` - -#### dm.mountopt - -Specifies extra mount options used when mounting the thin devices. - -Example use: `dockerd --storage-opt dm.mountopt=nodiscard` - -#### dm.use_deferred_removal - -Enables use of deferred device removal if `libdm` and the kernel driver -support the mechanism. - -Deferred device removal means that if device is busy when devices are -being removed/deactivated, then a deferred removal is scheduled on -device. And devices automatically go away when last user of the device -exits. - -For example, when a container exits, its associated thin device is removed. If -that device has leaked into some other mount namespace and can't be removed, -the container exit still succeeds and this option causes the system to schedule -the device for deferred removal. It does not wait in a loop trying to remove a -busy device. - -Example use: `dockerd --storage-opt dm.use_deferred_removal=true` - -#### dm.use_deferred_deletion - -Enables use of deferred device deletion for thin pool devices. By default, -thin pool device deletion is synchronous. Before a container is deleted, the -Docker daemon removes any associated devices. If the storage driver can not -remove a device, the container deletion fails and daemon returns. - -`Error deleting container: Error response from daemon: Cannot destroy container` - -To avoid this failure, enable both deferred device deletion and deferred -device removal on the daemon. - -`dockerd --storage-opt dm.use_deferred_deletion=true --storage-opt dm.use_deferred_removal=true` - -With these two options enabled, if a device is busy when the driver is -deleting a container, the driver marks the device as deleted. Later, when the -device isn't in use, the driver deletes it. - -In general it should be safe to enable this option by default. It will help -when unintentional leaking of mount point happens across multiple mount -namespaces. - -#### dm.loopdatasize - -**Note**: This option configures devicemapper loopback, which should not be -used in production. - -Specifies the size to use when creating the loopback file for the "data" device -which is used for the thin pool. The default size is 100G. The file is sparse, -so it will not initially take up this much space. - -Example use: `dockerd --storage-opt dm.loopdatasize=200G` - -#### dm.loopmetadatasize - -**Note**: This option configures devicemapper loopback, which should not be -used in production. - -Specifies the size to use when creating the loopback file for the "metadata" -device which is used for the thin pool. The default size is 2G. The file is -sparse, so it will not initially take up this much space. - -Example use: `dockerd --storage-opt dm.loopmetadatasize=4G` - -#### dm.datadev - -(Deprecated, use `dm.thinpooldev`) - -Specifies a custom blockdevice to use for data for a Docker-managed thin pool. -It is better to use `dm.thinpooldev` - see the documentation for it above for -discussion of the advantages. - -#### dm.metadatadev - -(Deprecated, use `dm.thinpooldev`) - -Specifies a custom blockdevice to use for metadata for a Docker-managed thin -pool. See `dm.datadev` for why this is deprecated. - -#### dm.blocksize - -Specifies a custom blocksize to use for the thin pool. The default -blocksize is 64K. - -Example use: `dockerd --storage-opt dm.blocksize=512K` - -#### dm.blkdiscard - -Enables or disables the use of `blkdiscard` when removing devicemapper devices. -This is disabled by default due to the additional latency, but as a special -case with loopback devices it will be enabled, in order to re-sparsify the -loopback file on image/container removal. - -Disabling this on loopback can lead to *much* faster container removal times, -but it also prevents the space used in `/var/lib/docker` directory from being -returned to the system for other use when containers are removed. - -Example use: `dockerd --storage-opt dm.blkdiscard=false` - -#### dm.override_udev_sync_check - -By default, the devicemapper backend attempts to synchronize with the `udev` -device manager for the Linux kernel. This option allows disabling that -synchronization, to continue even though the configuration may be buggy. - -To view the `udev` sync support of a Docker daemon that is using the -`devicemapper` driver, run: - - $ docker info - [...] - Udev Sync Supported: true - [...] - -When `udev` sync support is `true`, then `devicemapper` and `udev` can -coordinate the activation and deactivation of devices for containers. - -When `udev` sync support is `false`, a race condition occurs between the -`devicemapper` and `udev` during create and cleanup. The race condition results -in errors and failures. (For information on these failures, see -[docker#4036](https://github.com/docker/docker/issues/4036)) - -To allow the `docker` daemon to start, regardless of whether `udev` sync is -`false`, set `dm.override_udev_sync_check` to true: - - $ dockerd --storage-opt dm.override_udev_sync_check=true - -When this value is `true`, the driver continues and simply warns you the errors -are happening. - -**Note**: The ideal is to pursue a `docker` daemon and environment that does -support synchronizing with `udev`. For further discussion on this topic, see -[docker#4036](https://github.com/docker/docker/issues/4036). -Otherwise, set this flag for migrating existing Docker daemons to a daemon with -a supported environment. - -#### dm.min_free_space - -Specifies the min free space percent in a thin pool require for new device -creation to succeed. This check applies to both free data space as well -as free metadata space. Valid values are from 0% - 99%. Value 0% disables -free space checking logic. If user does not specify a value for this option, -the Engine uses a default value of 10%. - -Whenever a new a thin pool device is created (during `docker pull` or during -container creation), the Engine checks if the minimum free space is available. -If the space is unavailable, then device creation fails and any relevant -`docker` operation fails. - -To recover from this error, you must create more free space in the thin pool to -recover from the error. You can create free space by deleting some images and -containers from tge thin pool. You can also add more storage to the thin pool. - -To add more space to an LVM (logical volume management) thin pool, just add -more storage to the group container thin pool; this should automatically -resolve any errors. If your configuration uses loop devices, then stop the -Engine daemon, grow the size of loop files and restart the daemon to resolve -the issue. - -Example use:: `dockerd --storage-opt dm.min_free_space=10%` - -#### dm.xfs_nospace_max_retries - -Specifies the maximum number of retries XFS should attempt to complete IO when -ENOSPC (no space) error is returned by underlying storage device. - -By default XFS retries infinitely for IO to finish and this can result in -unkillable process. To change this behavior one can set xfs_nospace_max_retries -to say 0 and XFS will not retry IO after getting ENOSPC and will shutdown -filesystem. - -Example use: - - $ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 - - -## ZFS options - -#### zfs.fsname - -Set zfs filesystem under which docker will create its own datasets. By default -docker will pick up the zfs filesystem where docker graph (`/var/lib/docker`) -is located. - -Example use: `dockerd -s zfs --storage-opt zfs.fsname=zroot/docker` - -## Btrfs options - -#### btrfs.min_space - -Specifies the minimum size to use when creating the subvolume which is used for -containers. If user uses disk quota for btrfs when creating or running a -container with **--storage-opt size** option, docker should ensure the **size** -cannot be smaller than **btrfs.min_space**. - -Example use: `docker daemon -s btrfs --storage-opt btrfs.min_space=10G` - -# CLUSTER STORE OPTIONS - -The daemon uses libkv to advertise the node within the cluster. Some Key/Value -backends support mutual TLS, and the client TLS settings used by the daemon can -be configured using the **--cluster-store-opt** flag, specifying the paths to -PEM encoded files. - -#### kv.cacertfile - -Specifies the path to a local file with PEM encoded CA certificates to trust - -#### kv.certfile - -Specifies the path to a local file with a PEM encoded certificate. This -certificate is used as the client cert for communication with the Key/Value -store. - -#### kv.keyfile - -Specifies the path to a local file with a PEM encoded private key. This -private key is used as the client key for communication with the Key/Value -store. - -# Access authorization - -Docker's access authorization can be extended by authorization plugins that -your organization can purchase or build themselves. You can install one or more -authorization plugins when you start the Docker `daemon` using the -`--authorization-plugin=PLUGIN_ID` option. - -```bash -dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... -``` - -The `PLUGIN_ID` value is either the plugin's name or a path to its -specification file. The plugin's implementation determines whether you can -specify a name or path. Consult with your Docker administrator to get -information about the plugins available to you. - -Once a plugin is installed, requests made to the `daemon` through the command -line or Docker's Engine API are allowed or denied by the plugin. If you have -multiple plugins installed, at least one must allow the request for it to -complete. - -For information about how to create an authorization plugin, see [authorization -plugin](https://docs.docker.com/engine/extend/authorization/) section in the -Docker extend section of this documentation. - -# RUNTIME EXECUTION OPTIONS - -You can configure the runtime using options specified with the `--exec-opt` flag. -All the flag's options have the `native` prefix. A single `native.cgroupdriver` -option is available. - -The `native.cgroupdriver` option specifies the management of the container's -cgroups. You can only specify `cgroupfs` or `systemd`. If you specify -`systemd` and it is not available, the system errors out. If you omit the -`native.cgroupdriver` option,` cgroupfs` is used. - -This example sets the `cgroupdriver` to `systemd`: - -```bash -$ sudo dockerd --exec-opt native.cgroupdriver=systemd -``` - -Setting this option applies to all containers the daemon launches. - -# HISTORY -Sept 2015, Originally compiled by Shishir Mahajan -based on docker.com source material and internal work. diff --git a/fn/vendor/github.com/docker/docker/man/generate.go b/fn/vendor/github.com/docker/docker/man/generate.go deleted file mode 100644 index 964d69277..000000000 --- a/fn/vendor/github.com/docker/docker/man/generate.go +++ /dev/null @@ -1,106 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/commands" - "github.com/docker/docker/pkg/term" - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" - "github.com/spf13/pflag" -) - -const descriptionSourcePath = "man/src/" - -func generateManPages(opts *options) error { - header := &doc.GenManHeader{ - Title: "DOCKER", - Section: "1", - Source: "Docker Community", - } - - stdin, stdout, stderr := term.StdStreams() - dockerCli := command.NewDockerCli(stdin, stdout, stderr) - cmd := &cobra.Command{Use: "docker"} - commands.AddCommands(cmd, dockerCli) - source := filepath.Join(opts.source, descriptionSourcePath) - if err := loadLongDescription(cmd, source); err != nil { - return err - } - - cmd.DisableAutoGenTag = true - return doc.GenManTreeFromOpts(cmd, doc.GenManTreeOptions{ - Header: header, - Path: opts.target, - CommandSeparator: "-", - }) -} - -func loadLongDescription(cmd *cobra.Command, path string) error { - for _, cmd := range cmd.Commands() { - if cmd.Name() == "" { - continue - } - fullpath := filepath.Join(path, cmd.Name()+".md") - - if cmd.HasSubCommands() { - loadLongDescription(cmd, filepath.Join(path, cmd.Name())) - } - - if _, err := os.Stat(fullpath); err != nil { - log.Printf("WARN: %s does not exist, skipping\n", fullpath) - continue - } - - content, err := ioutil.ReadFile(fullpath) - if err != nil { - return err - } - cmd.Long = string(content) - - fullpath = filepath.Join(path, cmd.Name()+"-example.md") - if _, err := os.Stat(fullpath); err != nil { - continue - } - - content, err = ioutil.ReadFile(fullpath) - if err != nil { - return err - } - cmd.Example = string(content) - - } - return nil -} - -type options struct { - source string - target string -} - -func parseArgs() (*options, error) { - opts := &options{} - cwd, _ := os.Getwd() - flags := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError) - flags.StringVar(&opts.source, "root", cwd, "Path to project root") - flags.StringVar(&opts.target, "target", "/tmp", "Target path for generated man pages") - err := flags.Parse(os.Args[1:]) - return opts, err -} - -func main() { - opts, err := parseArgs() - if err != nil { - fmt.Fprintln(os.Stderr, err.Error()) - } - fmt.Printf("Project root: %s\n", opts.source) - fmt.Printf("Generating man pages into %s\n", opts.target) - if err := generateManPages(opts); err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate man pages: %s\n", err.Error()) - } -} diff --git a/fn/vendor/github.com/docker/docker/man/generate.sh b/fn/vendor/github.com/docker/docker/man/generate.sh deleted file mode 100755 index 905b2d7ba..000000000 --- a/fn/vendor/github.com/docker/docker/man/generate.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -# -# Generate man pages for docker/docker -# - -set -eu - -mkdir -p ./man/man1 - -# Generate man pages from cobra commands -go build -o /tmp/gen-manpages ./man -/tmp/gen-manpages --root . --target ./man/man1 - -# Generate legacy pages from markdown -./man/md2man-all.sh -q diff --git a/fn/vendor/github.com/docker/docker/man/glide.lock b/fn/vendor/github.com/docker/docker/man/glide.lock deleted file mode 100644 index 5ec765a4c..000000000 --- a/fn/vendor/github.com/docker/docker/man/glide.lock +++ /dev/null @@ -1,52 +0,0 @@ -hash: ead3ea293a6143fe41069ebec814bf197d8c43a92cc7666b1f7e21a419b46feb -updated: 2016-06-20T21:53:35.420817456Z -imports: -- name: github.com/BurntSushi/toml - version: f0aeabca5a127c4078abb8c8d64298b147264b55 -- name: github.com/cpuguy83/go-md2man - version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa - subpackages: - - md2man -- name: github.com/fsnotify/fsnotify - version: 30411dbcefb7a1da7e84f75530ad3abe4011b4f8 -- name: github.com/hashicorp/hcl - version: da486364306ed66c218be9b7953e19173447c18b - subpackages: - - hcl/ast - - hcl/parser - - hcl/token - - json/parser - - hcl/scanner - - hcl/strconv - - json/scanner - - json/token -- name: github.com/inconshreveable/mousetrap - version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -- name: github.com/magiconair/properties - version: c265cfa48dda6474e208715ca93e987829f572f8 -- name: github.com/mitchellh/mapstructure - version: d2dd0262208475919e1a362f675cfc0e7c10e905 -- name: github.com/russross/blackfriday - version: 1d6b8e9301e720b08a8938b8c25c018285885438 -- name: github.com/shurcooL/sanitized_anchor_name - version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 -- name: github.com/spf13/cast - version: 27b586b42e29bec072fe7379259cc719e1289da6 -- name: github.com/spf13/jwalterweatherman - version: 33c24e77fb80341fe7130ee7c594256ff08ccc46 -- name: github.com/spf13/pflag - version: dabebe21bf790f782ea4c7bbd2efc430de182afd -- name: github.com/spf13/viper - version: c1ccc378a054ea8d4e38d8c67f6938d4760b53dd -- name: golang.org/x/sys - version: 62bee037599929a6e9146f29d10dd5208c43507d - subpackages: - - unix -- name: gopkg.in/yaml.v2 - version: a83829b6f1293c91addabc89d0571c246397bbf4 -- name: github.com/spf13/cobra - repo: https://github.com/dnephin/cobra - subpackages: - - doc - version: v1.3 -devImports: [] diff --git a/fn/vendor/github.com/docker/docker/man/glide.yaml b/fn/vendor/github.com/docker/docker/man/glide.yaml deleted file mode 100644 index e99b2670d..000000000 --- a/fn/vendor/github.com/docker/docker/man/glide.yaml +++ /dev/null @@ -1,12 +0,0 @@ -package: github.com/docker/docker/man -import: -- package: github.com/cpuguy83/go-md2man - subpackages: - - md2man -- package: github.com/inconshreveable/mousetrap -- package: github.com/spf13/pflag -- package: github.com/spf13/viper -- package: github.com/spf13/cobra - repo: https://github.com/dnephin/cobra - subpackages: - - doc diff --git a/fn/vendor/github.com/docker/docker/man/md2man-all.sh b/fn/vendor/github.com/docker/docker/man/md2man-all.sh deleted file mode 100755 index 46c7b8f08..000000000 --- a/fn/vendor/github.com/docker/docker/man/md2man-all.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -set -e - -# get into this script's directory -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -[ "$1" = '-q' ] || { - set -x - pwd -} - -for FILE in *.md; do - base="$(basename "$FILE")" - name="${base%.md}" - num="${name##*.}" - if [ -z "$num" -o "$name" = "$num" ]; then - # skip files that aren't of the format xxxx.N.md (like README.md) - continue - fi - mkdir -p "./man${num}" - go-md2man -in "$FILE" -out "./man${num}/${name}" -done diff --git a/fn/vendor/github.com/docker/docker/man/src/attach.md b/fn/vendor/github.com/docker/docker/man/src/attach.md deleted file mode 100644 index ff1102e10..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/attach.md +++ /dev/null @@ -1,2 +0,0 @@ - -Alias for `docker container attach`. diff --git a/fn/vendor/github.com/docker/docker/man/src/commit.md b/fn/vendor/github.com/docker/docker/man/src/commit.md deleted file mode 100644 index 3deb25bb5..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/commit.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container commit`. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/attach.md b/fn/vendor/github.com/docker/docker/man/src/container/attach.md deleted file mode 100644 index e2fa4f8af..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/attach.md +++ /dev/null @@ -1,66 +0,0 @@ -The **docker attach** command allows you to attach to a running container using -the container's ID or name, either to view its ongoing output or to control it -interactively. You can attach to the same contained process multiple times -simultaneously, screen sharing style, or quickly view the progress of your -detached process. - -To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the -container. You can detach from the container (and leave it running) using a -configurable key sequence. The default sequence is `CTRL-p CTRL-q`. You -configure the key sequence using the **--detach-keys** option or a configuration -file. See **config-json(5)** for documentation on using a configuration file. - -It is forbidden to redirect the standard input of a `docker attach` command while -attaching to a tty-enabled container (i.e.: launched with `-t`). - -# Override the detach sequence - -If you want, you can configure an override the Docker key sequence for detach. -This is useful if the Docker default sequence conflicts with key sequence you -use for other applications. There are two ways to define your own detach key -sequence, as a per-container override or as a configuration property on your -entire configuration. - -To override the sequence for an individual container, use the -`--detach-keys=""` flag with the `docker attach` command. The format of -the `` is either a letter [a-Z], or the `ctrl-` combined with any of -the following: - -* `a-z` (a single lowercase alpha character ) -* `@` (at sign) -* `[` (left bracket) -* `\\` (two backward slashes) -* `_` (underscore) -* `^` (caret) - -These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key -sequences. To configure a different configuration default key sequence for all -containers, see **docker(1)**. - -# EXAMPLES - -## Attaching to a container - -In this example the top command is run inside a container, from an image called -fedora, in detached mode. The ID from the container is passed into the **docker -attach** command: - - $ ID=$(sudo docker run -d fedora /usr/bin/top -b) - $ sudo docker attach $ID - top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355560k used, 18012k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221740k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top - - top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355244k used, 18328k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221776k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top diff --git a/fn/vendor/github.com/docker/docker/man/src/container/commit.md b/fn/vendor/github.com/docker/docker/man/src/container/commit.md deleted file mode 100644 index 43d2e9c25..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/commit.md +++ /dev/null @@ -1,30 +0,0 @@ -Create a new image from an existing container specified by name or -container ID. The new image will contain the contents of the -container filesystem, *excluding* any data volumes. Refer to **docker-tag(1)** -for more information about valid image and tag names. - -While the `docker commit` command is a convenient way of extending an -existing image, you should prefer the use of a Dockerfile and `docker -build` for generating images that you intend to share with other -people. - -# EXAMPLES - -## Creating a new image from an existing container -An existing Fedora based container has had Apache installed while running -in interactive mode with the bash shell. Apache is also running. To -create a new image run `docker ps` to find the container's ID and then run: - - $ docker commit -m="Added Apache to Fedora base image" \ - -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 - -Note that only a-z0-9-_. are allowed when naming images from an -existing container. - -## Apply specified Dockerfile instructions while committing the image -If an existing container was created without the DEBUG environment -variable set to "true", you can create a new image based on that -container by first getting the container's ID with `docker ps` and -then running: - - $ docker container commit -c="ENV DEBUG true" 98bd7fc99854 debug-image diff --git a/fn/vendor/github.com/docker/docker/man/src/container/cp.md b/fn/vendor/github.com/docker/docker/man/src/container/cp.md deleted file mode 100644 index 557e76de9..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/cp.md +++ /dev/null @@ -1,145 +0,0 @@ -The `docker container cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. -You can copy from the container's file system to the local machine or the -reverse, from the local filesystem to the container. If `-` is specified for -either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from -`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. -The `SRC_PATH` or `DEST_PATH` can be a file or directory. - -The `docker container cp` command assumes container paths are relative to the container's -`/` (root) directory. This means supplying the initial forward slash is optional; -The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and -`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can -be an absolute or relative value. The command interprets a local machine's -relative paths as relative to the current working directory where `docker container cp` is -run. - -The `cp` command behaves like the Unix `cp -a` command in that directories are -copied recursively with permissions preserved if possible. Ownership is set to -the user and primary group at the destination. For example, files copied to a -container are created with `UID:GID` of the root user. Files copied to the local -machine are created with the `UID:GID` of the user which invoked the `docker container cp` -command. If you specify the `-L` option, `docker container cp` follows any symbolic link -in the `SRC_PATH`. `docker container cp` does *not* create parent directories for -`DEST_PATH` if they do not exist. - -Assuming a path separator of `/`, a first argument of `SRC_PATH` and second -argument of `DEST_PATH`, the behavior is as follows: - -- `SRC_PATH` specifies a file - - `DEST_PATH` does not exist - - the file is saved to a file created at `DEST_PATH` - - `DEST_PATH` does not exist and ends with `/` - - Error condition: the destination directory must exist. - - `DEST_PATH` exists and is a file - - the destination is overwritten with the source file's contents - - `DEST_PATH` exists and is a directory - - the file is copied into this directory using the basename from - `SRC_PATH` -- `SRC_PATH` specifies a directory - - `DEST_PATH` does not exist - - `DEST_PATH` is created as a directory and the *contents* of the source - directory are copied into this directory - - `DEST_PATH` exists and is a file - - Error condition: cannot copy a directory to a file - - `DEST_PATH` exists and is a directory - - `SRC_PATH` does not end with `/.` (that is: _slash_ followed by _dot_) - - the source directory is copied into this directory - - `SRC_PATH` does end with `/.` (that is: _slash_ followed by _dot_) - - the *content* of the source directory is copied into this - directory - -The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above -rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not -the target, is copied by default. To copy the link target and not the link, -specify the `-L` option. - -A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can -also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local -machine, for example `file:name.txt`. If you use a `:` in a local machine path, -you must be explicit with a relative or absolute path, for example: - - `/path/to/file:name.txt` or `./file:name.txt` - -It is not possible to copy certain system files such as resources under -`/proc`, `/sys`, `/dev`, tmpfs, and mounts created by the user in the container. -However, you can still copy such files by manually running `tar` in `docker exec`. -For example (consider `SRC_PATH` and `DEST_PATH` are directories): - - $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - - -or - - $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - - - -Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. -The command extracts the content of the tar to the `DEST_PATH` in container's -filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as -the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. - -# EXAMPLES - -Suppose a container has finished producing some output as a file it saves -to somewhere in its filesystem. This could be the output of a build job or -some other computation. You can copy these outputs from the container to a -location on your local host. - -If you want to copy the `/tmp/foo` directory from a container to the -existing `/tmp` directory on your host. If you run `docker container cp` in your `~` -(home) directory on the local host: - - $ docker container cp compassionate_darwin:tmp/foo /tmp - -Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit -the leading slash in the command. If you execute this command from your home -directory: - - $ docker container cp compassionate_darwin:tmp/foo tmp - -If `~/tmp` does not exist, Docker will create it and copy the contents of -`/tmp/foo` from the container into this new directory. If `~/tmp` already -exists as a directory, then Docker will copy the contents of `/tmp/foo` from -the container into a directory at `~/tmp/foo`. - -When copying a single file to an existing `LOCALPATH`, the `docker container cp` command -will either overwrite the contents of `LOCALPATH` if it is a file or place it -into `LOCALPATH` if it is a directory, overwriting an existing file of the same -name if one exists. For example, this command: - - $ docker container cp sharp_ptolemy:/tmp/foo/myfile.txt /test - -If `/test` does not exist on the local machine, it will be created as a file -with the contents of `/tmp/foo/myfile.txt` from the container. If `/test` -exists as a file, it will be overwritten. Lastly, if `/test` exists as a -directory, the file will be copied to `/test/myfile.txt`. - -Next, suppose you want to copy a file or folder into a container. For example, -this could be a configuration file or some other input to a long running -computation that you would like to place into a created container before it -starts. This is useful because it does not require the configuration file or -other input to exist in the container image. - -If you have a file, `config.yml`, in the current directory on your local host -and wish to copy it to an existing directory at `/etc/my-app.d` in a container, -this command can be used: - - $ docker container cp config.yml myappcontainer:/etc/my-app.d - -If you have several files in a local directory `/config` which you need to copy -to a directory `/etc/my-app.d` in a container: - - $ docker container cp /config/. myappcontainer:/etc/my-app.d - -The above command will copy the contents of the local `/config` directory into -the directory `/etc/my-app.d` in the container. - -Finally, if you want to copy a symbolic link into a container, you typically -want to copy the linked target and not the link itself. To copy the target, use -the `-L` option, for example: - - $ ln -s /tmp/somefile /tmp/somefile.ln - $ docker container cp -L /tmp/somefile.ln myappcontainer:/tmp/ - -This command copies content of the local `/tmp/somefile` into the file -`/tmp/somefile.ln` in the container. Without `-L` option, the `/tmp/somefile.ln` -preserves its symbolic link but not its content. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/create-example.md b/fn/vendor/github.com/docker/docker/man/src/container/create-example.md deleted file mode 100644 index bd8329366..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/create-example.md +++ /dev/null @@ -1,35 +0,0 @@ -### Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation=` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. On Microsoft Windows, you can specify these values: - -* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. -* `process`: Namespace isolation only. -* `hyperv`: Hyper-V hypervisor partition-based isolation. - -Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. - -### Dealing with dynamically created devices (--device-cgroup-rule) - -Devices available to a container are assigned at creation time. The -assigned devices will both be added to the cgroup.allow file and -created into the container once it is run. This poses a problem when -a new device needs to be added to running container. - -One of the solution is to add a more permissive rule to a container -allowing it access to a wider range of devices. For example, supposing -our container needs access to a character device with major `42` and -any number of minor number (added as new devices appear), the -following rule would be added: - -``` -docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image -``` - -Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 ` -the required device when it is added. - -NOTE: initially present devices still need to be explicitly added to -the create/run command diff --git a/fn/vendor/github.com/docker/docker/man/src/container/create.md b/fn/vendor/github.com/docker/docker/man/src/container/create.md deleted file mode 100644 index e47bb38db..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/create.md +++ /dev/null @@ -1,87 +0,0 @@ -Creates a writeable container layer over the specified image and prepares it for -running the specified command. The container ID is then printed to STDOUT. This -is similar to **docker run -d** except the container is never started. You can -then use the **docker start ** command to start the container at -any point. - -The initial status of the container created with **docker create** is 'created'. - -### OPTIONS - -The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` -can be an absolute path or a `name` value. A `name` value must start with an -alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or -`-` (hyphen). An absolute path starts with a `/` (forward slash). - -If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the -path you specify. If you supply a `name`, Docker creates a named volume by that -`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` -value. If you supply the `/foo` value, Docker creates a bind-mount. If you -supply the `foo` specification, Docker creates a named volume. - -You can specify multiple **-v** options to mount one or more mounts to a -container. To use these same mounts in other containers, specify the -**--volumes-from** option also. - -You can supply additional options for each bind-mount following an additional -colon. A `:ro` or `:rw` suffix mounts a volume in read-only or read-write -mode, respectively. By default, volumes are mounted in read-write mode. -You can also specify the consistency requirement for the mount, either -`:consistent` (the default), `:cached`, or `:delegated`. Multiple options are -separated by commas, e.g. `:ro,cached`. - -Labeling systems like SELinux require that proper labels are placed on volume -content mounted into a container. Without a label, the security system might -prevent the processes running inside the container from using the content. By -default, Docker does not change the labels set by the OS. - -To change a label in the container context, you can add either of two suffixes -`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file -objects on the shared volumes. The `z` option tells Docker that two containers -share the volume content. As a result, Docker labels the content with a shared -content label. Shared volume labels allow all containers to read/write content. -The `Z` option tells Docker to label the content with a private unshared label. -Only the current container can use a private volume. - -By default bind mounted volumes are `private`. That means any mounts done -inside container will not be visible on host and vice-a-versa. One can change -this behavior by specifying a volume mount propagation property. Making a -volume `shared` mounts done under that volume inside container will be -visible on host and vice-a-versa. Making a volume `slave` enables only one -way mount propagation and that is mounts done on host under that volume -will be visible inside container but not the other way around. - -To control mount propagation property of volume one can use `:[r]shared`, -`:[r]slave` or `:[r]private` propagation flag. Propagation property can -be specified only for bind mounted volumes and not for internal volumes or -named volumes. For mount propagation to work source mount point (mount point -where source dir is mounted on) has to have right propagation properties. For -shared volumes, source mount point has to be shared. And for slave volumes, -source mount has to be either shared or slave. - -Use `df ` to figure out the source mount and then use -`findmnt -o TARGET,PROPAGATION ` to figure out propagation -properties of source mount. If `findmnt` utility is not available, then one -can look at mount entry for source mount point in `/proc/self/mountinfo`. Look -at `optional fields` and see if any propagation properties are specified. -`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if -nothing is there that means mount is `private`. - -To change propagation properties of a mount point use `mount` command. For -example, if one wants to bind mount source directory `/foo` one can do -`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This -will convert /foo into a `shared` mount point. Alternatively one can directly -change propagation properties of source mount. Say `/` is source mount for -`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. - -> **Note**: -> When using systemd to manage the Docker daemon's start and stop, in the systemd -> unit file there is an option to control mount propagation for the Docker daemon -> itself, called `MountFlags`. The value of this setting may cause Docker to not -> see mount propagation changes made on the mount point. For example, if this value -> is `slave`, you may not be able to use the `shared` or `rshared` propagation on -> a volume. - - -To disable automatic copying of data from the container path to the volume, use -the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/diff.md b/fn/vendor/github.com/docker/docker/man/src/container/diff.md deleted file mode 100644 index eb485e364..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/diff.md +++ /dev/null @@ -1,39 +0,0 @@ -List the changed files and directories in a container᾿s filesystem since the -container was created. Three different types of change are tracked: - -| Symbol | Description | -|--------|---------------------------------| -| `A` | A file or directory was added | -| `D` | A file or directory was deleted | -| `C` | A file or directory was changed | - -You can use the full or shortened container ID or the container name set using -**docker run --name** option. - -# EXAMPLES - -Inspect the changes to an `nginx` container: - -```bash -$ docker diff 1fdfd1f54c1b - -C /dev -C /dev/console -C /dev/core -C /dev/stdout -C /dev/fd -C /dev/ptmx -C /dev/stderr -C /dev/stdin -C /run -A /run/nginx.pid -C /var/lib/nginx/tmp -A /var/lib/nginx/tmp/client_body -A /var/lib/nginx/tmp/fastcgi -A /var/lib/nginx/tmp/proxy -A /var/lib/nginx/tmp/scgi -A /var/lib/nginx/tmp/uwsgi -C /var/log/nginx -A /var/log/nginx/access.log -A /var/log/nginx/error.log -``` diff --git a/fn/vendor/github.com/docker/docker/man/src/container/exec.md b/fn/vendor/github.com/docker/docker/man/src/container/exec.md deleted file mode 100644 index 033db426b..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/exec.md +++ /dev/null @@ -1,25 +0,0 @@ -Run a process in a running container. - -The command started using `docker exec` will only run while the container's primary -process (`PID 1`) is running, and will not be restarted if the container is restarted. - -If the container is paused, then the `docker exec` command will wait until the -container is unpaused, and then run - -# CAPABILITIES - -`privileged` gives the process extended -[Linux capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) -when running in a container. - -Without this flag, the process run by `docker exec` in a running container has -the same capabilities as the container, which may be limited. Set -`--privileged` to give all capabilities to the process. - -# USER -`user` sets the username or UID used and optionally the groupname or GID for the specified command. - - The followings examples are all valid: - --user [user | user:group | uid | uid:gid | user:gid | uid:group ] - - Without this argument the command will be run as root in the container. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/export.md b/fn/vendor/github.com/docker/docker/man/src/container/export.md deleted file mode 100644 index ac07d9630..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/export.md +++ /dev/null @@ -1,20 +0,0 @@ -Export the contents of a container's filesystem using the full or shortened -container ID or container name. The output is exported to STDOUT and can be -redirected to a tar file. - -Stream to a file instead of STDOUT by using **-o**. - -# EXAMPLES -Export the contents of the container called angry_bell to a tar file -called angry_bell.tar: - - $ docker export angry_bell > angry_bell.tar - $ docker export --output=angry_bell-latest.tar angry_bell - $ ls -sh angry_bell.tar - 321M angry_bell.tar - $ ls -sh angry_bell-latest.tar - 321M angry_bell-latest.tar - -# See also -**docker-import(1)** to create an empty filesystem image -and import the contents of the tarball into it, then optionally tag it. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/kill.md b/fn/vendor/github.com/docker/docker/man/src/container/kill.md deleted file mode 100644 index b8b94e528..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/kill.md +++ /dev/null @@ -1,2 +0,0 @@ -The main process inside each container specified will be sent SIGKILL, - or any signal specified with option --signal. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/logs.md b/fn/vendor/github.com/docker/docker/man/src/container/logs.md deleted file mode 100644 index c053f8575..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/logs.md +++ /dev/null @@ -1,28 +0,0 @@ -The **docker container logs** command batch-retrieves whatever logs are present for -a container at the time of execution. This does not guarantee execution -order when combined with a docker run (i.e., your run may not have generated -any logs at the time you execute docker container logs). - -The **docker container logs --follow** command combines commands **docker container logs** and -**docker attach**. It will first return all logs from the beginning and -then continue streaming new output from the container's stdout and stderr. - -**Warning**: This command works only for the **json-file** or **journald** -logging drivers. - -The `--since` option can be Unix timestamps, date formatted timestamps, or Go -duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine's -time. Supported formats for date formatted time stamps include RFC3339Nano, -RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, -`2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be -used if you do not provide either a `Z` or a `+-00:00` timezone offset at the -end of the timestamp. When providing Unix timestamps enter -seconds[.nanoseconds], where seconds is the number of seconds that have elapsed -since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix -epoch or Unix time), and the optional .nanoseconds field is a fraction of a -second no more than nine digits long. You can combine the `--since` option with -either or both of the `--follow` or `--tail` options. - -The `docker container logs --details` command will add on extra attributes, such as -environment variables and labels, provided to `--log-opt` when creating the -container. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/ls.md b/fn/vendor/github.com/docker/docker/man/src/container/ls.md deleted file mode 100644 index 5cb63079e..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/ls.md +++ /dev/null @@ -1,110 +0,0 @@ -List the containers in the local repository. By default this shows only -the running containers. - -## Filters - -Filter output based on these conditions: - - exited= an exit code of - - label= or label== - - status=(created|restarting|running|paused|exited|dead) - - name= a container's name - - id= a container's ID - - is-task=(true|false) - containers that are a task (part of a service managed by swarm) - - before=(|) - - since=(|) - - ancestor=([:tag]||) - containers created from an image or a descendant. - - volume=(|) - - network=(|) - containers connected to the provided network - - health=(starting|healthy|unhealthy|none) - filters containers based on healthcheck status - - publish=([/]|/[]) - filters containers based on published ports - - expose=([/]|/[]) - filters containers based on exposed ports - -## Format - - Pretty-print containers using a Go template. - Valid placeholders: - .ID - Container ID - .Image - Image ID - .Command - Quoted command - .CreatedAt - Time when the container was created. - .RunningFor - Elapsed time since the container was started. - .Ports - Exposed ports. - .Status - Container status. - .Size - Container disk size. - .Names - Container names. - .Labels - All labels assigned to the container. - .Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` - .Mounts - Names of the volumes mounted in this container. - -# EXAMPLES -## Display all containers, including non-running - - $ docker container ls -a - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain - 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell - c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds - 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike - -## Display only IDs of all containers, including non-running - - $ docker container ls -a -q - a87ecb4f327c - 01946d9d34d8 - c1d3b0166030 - 41d50ecd2f57 - -## Display only IDs of all containers that have the name `determined_torvalds` - - $ docker container ls -a -q --filter=name=determined_torvalds - c1d3b0166030 - -## Display containers with their commands - - $ docker container ls --format "{{.ID}}: {{.Command}}" - a87ecb4f327c: /bin/sh -c #(nop) MA - 01946d9d34d8: /bin/sh -c #(nop) MA - c1d3b0166030: /bin/sh -c yum -y up - 41d50ecd2f57: /bin/sh -c #(nop) MA - -## Display containers with their labels in a table - - $ docker container ls --format "table {{.ID}}\t{{.Labels}}" - CONTAINER ID LABELS - a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd - 01946d9d34d8 - c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 - 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd - -## Display containers with their node label in a table - - $ docker container ls --format 'table {{.ID}}\t{{(.Label "com.docker.swarm.node")}}' - CONTAINER ID NODE - a87ecb4f327c ubuntu - 01946d9d34d8 - c1d3b0166030 debian - 41d50ecd2f57 fedora - -## Display containers with `remote-volume` mounted - - $ docker container ls --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" - CONTAINER ID MOUNTS - 9c3527ed70ce remote-volume - -## Display containers with a volume mounted in `/data` - - $ docker container ls --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" - CONTAINER ID MOUNTS - 9c3527ed70ce remote-volume - -## Display containers that have published port of 80: - - $ docker ps --filter publish=80 - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - fc7e477723b7 busybox "top" About a minute ago Up About a minute 0.0.0.0:32768->80/tcp admiring_roentgen - -## Display containers that have exposed TCP port in the range of `8000-8080`: - - $ docker ps --filter expose=8000-8080/tcp - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 9833437217a5 busybox "top" 21 seconds ago Up 19 seconds 8080/tcp dreamy_mccarthy diff --git a/fn/vendor/github.com/docker/docker/man/src/container/pause.md b/fn/vendor/github.com/docker/docker/man/src/container/pause.md deleted file mode 100644 index 09ea5b93d..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/pause.md +++ /dev/null @@ -1,12 +0,0 @@ -The `docker container pause` command suspends all processes in the specified containers. -On Linux, this uses the cgroups freezer. Traditionally, when suspending a process -the `SIGSTOP` signal is used, which is observable by the process being suspended. -With the cgroups freezer the process is unaware, and unable to capture, -that it is being suspended, and subsequently resumed. On Windows, only Hyper-V -containers can be paused. - -See the [cgroups freezer documentation] -(https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) for -further details. - -**docker-container-unpause(1)** to unpause all processes within a container. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/port.md b/fn/vendor/github.com/docker/docker/man/src/container/port.md deleted file mode 100644 index a1c8cc6ee..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/port.md +++ /dev/null @@ -1,26 +0,0 @@ -List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT - -# EXAMPLES - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test - -## Find out all the ports mapped - - $ docker container port test - 7890/tcp -> 0.0.0.0:4321 - 9876/tcp -> 0.0.0.0:1234 - -## Find out a specific mapping - - $ docker container port test 7890/tcp - 0.0.0.0:4321 - - $ docker container port test 7890 - 0.0.0.0:4321 - -## An example showing error for non-existent mapping - - $ docker container port test 7890/udp - 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test diff --git a/fn/vendor/github.com/docker/docker/man/src/container/rename.md b/fn/vendor/github.com/docker/docker/man/src/container/rename.md deleted file mode 100644 index e6f49a0eb..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/rename.md +++ /dev/null @@ -1 +0,0 @@ -Rename a container. Container may be running, paused or stopped. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/restart.md b/fn/vendor/github.com/docker/docker/man/src/container/restart.md deleted file mode 100644 index 66ef6688e..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/restart.md +++ /dev/null @@ -1 +0,0 @@ -Restart each container listed. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/rm.md b/fn/vendor/github.com/docker/docker/man/src/container/rm.md deleted file mode 100644 index 561f0e913..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/rm.md +++ /dev/null @@ -1,37 +0,0 @@ -**docker container rm** will remove one or more containers from the host node. The -container name or ID can be used. This does not remove images. You cannot -remove a running container unless you use the **-f** option. To see all -containers on a host use the **docker container ls -a** command. - -# EXAMPLES - -## Removing a container using its ID - -To remove a container using its ID, find either from a **docker ps -a** -command, or use the ID returned from the **docker run** command, or retrieve -it from a file used to store it using the **docker run --cidfile**: - - docker container rm abebf7571666 - -## Removing a container using the container name - -The name of the container can be found using the **docker ps -a** -command. The use that name as follows: - - docker container rm hopeful_morse - -## Removing a container and all associated volumes - - $ docker container rm -v redis - redis - -This command will remove the container and any volumes associated with it. -Note that if a volume was specified with a name, it will not be removed. - - $ docker create -v awesome:/foo -v /bar --name hello redis - hello - $ docker container rm -v hello - -In this example, the volume for `/foo` will remain in tact, but the volume for -`/bar` will be removed. The same behavior holds for volumes inherited with -`--volumes-from`. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/run.md b/fn/vendor/github.com/docker/docker/man/src/container/run.md deleted file mode 100644 index 5e20273e6..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/run.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker run`. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/start.md b/fn/vendor/github.com/docker/docker/man/src/container/start.md deleted file mode 100644 index 48d8592c6..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/start.md +++ /dev/null @@ -1 +0,0 @@ -Start one or more containers. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/stats.md b/fn/vendor/github.com/docker/docker/man/src/container/stats.md deleted file mode 100644 index 2de672ee2..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/stats.md +++ /dev/null @@ -1,43 +0,0 @@ -Display a live stream of one or more containers' resource usage statistics - -# Format - - Pretty-print containers statistics using a Go template. - Valid placeholders: - .Container - Container name or ID. - .Name - Container name. - .ID - Container ID. - .CPUPerc - CPU percentage. - .MemUsage - Memory usage. - .NetIO - Network IO. - .BlockIO - Block IO. - .MemPerc - Memory percentage (Not available on Windows). - .PIDs - Number of PIDs (Not available on Windows). - -# EXAMPLES - -Running `docker container stats` on all running containers. - - $ docker container stats - CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O - 1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB - 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B - d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B - -Running `docker container stats` on multiple containers by name and id. - - $ docker container stats fervent_panini 5acfcb1b4fd1 - CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O - 5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B - fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B - -Running `docker container stats` with customized format on all (Running and Stopped) containers. - - $ docker container stats --all --format "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}" - CONTAINER ID NAME CPU % MEM USAGE / LIMIT - c9dfa83f0317f87637d5b7e67aa4223337d947215c5a9947e697e4f7d3e0f834 ecstatic_noether 0.00% 56KiB / 15.57GiB - 8f92d01cf3b29b4f5fca4cd33d907e05def7af5a3684711b20a2369d211ec67f stoic_goodall 0.07% 32.86MiB / 15.57GiB - 38dd23dba00f307d53d040c1d18a91361bbdcccbf592315927d56cf13d8b7343 drunk_visvesvaraya 0.00% 0B / 0B - 5a8b07ec4cc52823f3cbfdb964018623c1ba307bce2c057ccdbde5f4f6990833 big_heisenberg 0.00% 0B / 0B - -`drunk_visvesvaraya` and `big_heisenberg` are stopped containers in the above example. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/stop.md b/fn/vendor/github.com/docker/docker/man/src/container/stop.md deleted file mode 100644 index e3142481b..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/stop.md +++ /dev/null @@ -1 +0,0 @@ -Stop a container (Send SIGTERM, and then SIGKILL after grace period) diff --git a/fn/vendor/github.com/docker/docker/man/src/container/top.md b/fn/vendor/github.com/docker/docker/man/src/container/top.md deleted file mode 100644 index 5e243569a..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/top.md +++ /dev/null @@ -1,11 +0,0 @@ -Display the running process of the container. ps-OPTION can be any of the options you would pass to a Linux ps command. - -All displayed information is from host's point of view. - -# EXAMPLES - -Run **docker container top** with the ps option of -x: - - $ docker container top 8601afda2b -x - PID TTY STAT TIME COMMAND - 16623 ? Ss 0:00 sleep 99999 diff --git a/fn/vendor/github.com/docker/docker/man/src/container/unpause.md b/fn/vendor/github.com/docker/docker/man/src/container/unpause.md deleted file mode 100644 index 0e77ceed6..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/unpause.md +++ /dev/null @@ -1,6 +0,0 @@ -The `docker container unpause` command un-suspends all processes in a container. -On Linux, it does this using the cgroups freezer. - -See the [cgroups freezer documentation] -(https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) for -further details. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/update.md b/fn/vendor/github.com/docker/docker/man/src/container/update.md deleted file mode 100644 index 26ee4e321..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/update.md +++ /dev/null @@ -1,102 +0,0 @@ -The **docker container update** command dynamically updates container configuration. -You can use this command to prevent containers from consuming too many -resources from their Docker host. With a single command, you can place -limits on a single container or on many. To specify more than one container, -provide space-separated list of container names or IDs. - -With the exception of the **--kernel-memory** option, you can specify these -options on a running or a stopped container. On kernel version older than -4.6, You can only update **--kernel-memory** on a stopped container or on -a running container with kernel memory initialized. - -# OPTIONS - -## kernel-memory - -Kernel memory limit (format: `[]`, where unit = b, k, m or g) - -Note that on kernel version older than 4.6, you can not update kernel memory on -a running container if the container is started without kernel memory initialized, -in this case, it can only be updated after it's stopped. The new setting takes -effect when the container is started. - -## memory - -Memory limit (format: , where unit = b, k, m or g) - -Note that the memory should be smaller than the already set swap memory limit. -If you want update a memory limit bigger than the already set swap memory limit, -you should update swap memory limit at the same time. If you don't set swap memory -limit on docker create/run but only memory limit, the swap memory is double -the memory limit. - -# EXAMPLES - -The following sections illustrate ways to use this command. - -### Update a container's cpu-shares - -To limit a container's cpu-shares to 512, first identify the container -name or ID. You can use **docker ps** to find these values. You can also -use the ID returned from the **docker run** command. Then, do the following: - -```bash -$ docker container update --cpu-shares 512 abebf7571666 -``` - -### Update a container with cpu-shares and memory - -To update multiple resource configurations for multiple containers: - -```bash -$ docker container update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse -``` - -### Update a container's kernel memory constraints - -You can update a container's kernel memory limit using the **--kernel-memory** -option. On kernel version older than 4.6, this option can be updated on a -running container only if the container was started with **--kernel-memory**. -If the container was started *without* **--kernel-memory** you need to stop -the container before updating kernel memory. - -For example, if you started a container with this command: - -```bash -$ docker run -dit --name test --kernel-memory 50M ubuntu bash -``` - -You can update kernel memory while the container is running: - -```bash -$ docker container update --kernel-memory 80M test -``` - -If you started a container *without* kernel memory initialized: - -```bash -$ docker run -dit --name test2 --memory 300M ubuntu bash -``` - -Update kernel memory of running container `test2` will fail. You need to stop -the container before updating the **--kernel-memory** setting. The next time you -start it, the container uses the new value. - -Kernel version newer than (include) 4.6 does not have this limitation, you -can use `--kernel-memory` the same way as other options. - -### Update a container's restart policy - -You can change a container's restart policy on a running container. The new -restart policy takes effect instantly after you run `docker container update` on a -container. - -To update restart policy for one or more containers: - -```bash -$ docker container update --restart=on-failure:3 abebf7571666 hopeful_morse -``` - -Note that if the container is started with "--rm" flag, you cannot update the restart -policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the -container. diff --git a/fn/vendor/github.com/docker/docker/man/src/container/wait.md b/fn/vendor/github.com/docker/docker/man/src/container/wait.md deleted file mode 100644 index 63dcc5a48..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/container/wait.md +++ /dev/null @@ -1,8 +0,0 @@ -Block until a container stops, then print its exit code. - -# EXAMPLES - - $ docker run -d fedora sleep 99 - 079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622 - $ docker container wait 079b83f558a2bc - 0 diff --git a/fn/vendor/github.com/docker/docker/man/src/cp.md b/fn/vendor/github.com/docker/docker/man/src/cp.md deleted file mode 100644 index b1898ffc7..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/cp.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container cp`. diff --git a/fn/vendor/github.com/docker/docker/man/src/create.md b/fn/vendor/github.com/docker/docker/man/src/create.md deleted file mode 100644 index f600d7d53..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/create.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container create`. diff --git a/fn/vendor/github.com/docker/docker/man/src/diff.md b/fn/vendor/github.com/docker/docker/man/src/diff.md deleted file mode 100644 index 29a639efa..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/diff.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container diff`. diff --git a/fn/vendor/github.com/docker/docker/man/src/events.md b/fn/vendor/github.com/docker/docker/man/src/events.md deleted file mode 100644 index 05fa614b7..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/events.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker system events`. diff --git a/fn/vendor/github.com/docker/docker/man/src/exec.md b/fn/vendor/github.com/docker/docker/man/src/exec.md deleted file mode 100644 index 21d2ce31e..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/exec.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container exec`. diff --git a/fn/vendor/github.com/docker/docker/man/src/export.md b/fn/vendor/github.com/docker/docker/man/src/export.md deleted file mode 100644 index 1cc979979..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/export.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container export`. diff --git a/fn/vendor/github.com/docker/docker/man/src/history.md b/fn/vendor/github.com/docker/docker/man/src/history.md deleted file mode 100644 index 8b5f3e871..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/history.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker image history`. diff --git a/fn/vendor/github.com/docker/docker/man/src/image/build.md b/fn/vendor/github.com/docker/docker/man/src/image/build.md deleted file mode 100644 index 9dc897584..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/image/build.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker build`. diff --git a/fn/vendor/github.com/docker/docker/man/src/image/history.md b/fn/vendor/github.com/docker/docker/man/src/image/history.md deleted file mode 100644 index da7e5d64a..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/image/history.md +++ /dev/null @@ -1,18 +0,0 @@ -Show the history of when and how an image was created. - -# EXAMPLES - $ docker history fedora - IMAGE CREATED CREATED BY SIZE COMMENT - 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB - 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B - 511136ea3c5a 10 months ago 0 B Imported from - - -## Display comments in the image history -The `docker commit` command has a **-m** flag for adding comments to the image. These comments will be displayed in the image history. - - $ sudo docker history docker:scm - IMAGE CREATED CREATED BY SIZE COMMENT - 2ac9d1098bf1 3 months ago /bin/bash 241.4 MB Added Apache to Fedora base image - 88b42ffd1f7c 5 months ago /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7 373.7 MB - c69cab00d6ef 5 months ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B - 511136ea3c5a 19 months ago 0 B Imported from - diff --git a/fn/vendor/github.com/docker/docker/man/src/image/import.md b/fn/vendor/github.com/docker/docker/man/src/image/import.md deleted file mode 100644 index 2814a71e4..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/image/import.md +++ /dev/null @@ -1,42 +0,0 @@ -Create a new filesystem image from the contents of a tarball (`.tar`, -`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. - - -# EXAMPLES - -## Import from a remote location - - # docker image import http://example.com/exampleimage.tgz example/imagerepo - -## Import from a local file - -Import to docker via pipe and stdin: - - # cat exampleimage.tgz | docker image import - example/imagelocal - -Import with a commit message. - - # cat exampleimage.tgz | docker image import --message "New image imported from tarball" - exampleimagelocal:new - -Import to a Docker image from a local file. - - # docker image import /path/to/exampleimage.tgz - - -## Import from a local file and tag - -Import to docker via pipe and stdin: - - # cat exampleimageV2.tgz | docker image import - example/imagelocal:V-2.0 - -## Import from a local directory - - # tar -c . | docker image import - exampleimagedir - -## Apply specified Dockerfile instructions while importing the image -This example sets the docker image ENV variable DEBUG to true by default. - - # tar -c . | docker image import -c="ENV DEBUG true" - exampleimagedir - -# See also -**docker-export(1)** to export the contents of a filesystem as a tar archive to STDOUT. diff --git a/fn/vendor/github.com/docker/docker/man/src/image/load.md b/fn/vendor/github.com/docker/docker/man/src/image/load.md deleted file mode 100644 index 81f126fdf..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/image/load.md +++ /dev/null @@ -1,25 +0,0 @@ -Loads a tarred repository from a file or the standard input stream. -Restores both images and tags. Write image names or IDs imported it -standard output stream. - -# EXAMPLES - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - $ docker load --input fedora.tar - # […] - Loaded image: fedora:rawhide - # […] - Loaded image: fedora:20 - # […] - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - fedora rawhide 0d20aec6529d 7 weeks ago 387 MB - fedora 20 58394af37342 7 weeks ago 385.5 MB - fedora heisenbug 58394af37342 7 weeks ago 385.5 MB - fedora latest 58394af37342 7 weeks ago 385.5 MB - -# See also -**docker-image-save(1)** to save one or more images to a tar archive (streamed to STDOUT by default). diff --git a/fn/vendor/github.com/docker/docker/man/src/image/ls.md b/fn/vendor/github.com/docker/docker/man/src/image/ls.md deleted file mode 100644 index 7e3274991..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/image/ls.md +++ /dev/null @@ -1,118 +0,0 @@ -This command lists the images stored in the local Docker repository. - -By default, intermediate images, used during builds, are not listed. Some of the -output, e.g., image ID, is truncated, for space reasons. However the truncated -image ID, and often the first few characters, are enough to be used in other -Docker commands that use the image ID. The output includes repository, tag, image -ID, date created and the virtual size. - -The title REPOSITORY for the first title may seem confusing. It is essentially -the image name. However, because you can tag a specific image, and multiple tags -(image instances) can be associated with a single name, the name is really a -repository for all tagged images of the same name. For example consider an image -called fedora. It may be tagged with 18, 19, or 20, etc. to manage different -versions. - -## Filters - -Filters the output based on these conditions: - - - dangling=(true|false) - find unused images - - label= or label== - - before=([:tag]||) - - since=([:tag]||) - - reference=(pattern of an image reference) - -## Format - - Pretty-print images using a Go template. - Valid placeholders: - .ID - Image ID - .Repository - Image repository - .Tag - Image tag - .Digest - Image digest - .CreatedSince - Elapsed time since the image was created - .CreatedAt - Time when the image was created - .Size - Image disk size - -# EXAMPLES - -## Listing the images - -To list the images in a local repository (not the registry) run: - - docker image ls - -The list will contain the image repository name, a tag for the image, and an -image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, -IMAGE ID, CREATED, and SIZE. - -The `docker image ls` command takes an optional `[REPOSITORY[:TAG]]` argument -that restricts the list to images that match the argument. If you specify -`REPOSITORY` but no `TAG`, the `docker image ls` command lists all images in the -given repository. - - docker image ls java - -The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, -`docker image ls jav` does not match the image `java`. - -If both `REPOSITORY` and `TAG` are provided, only images matching that -repository and tag are listed. To find all local images in the "java" -repository with tag "8" you can use: - - docker image ls java:8 - -To get a verbose list of images which contains all the intermediate images -used in builds use **-a**: - - docker image ls -a - -Previously, the docker image ls command supported the --tree and --dot arguments, -which displayed different visualizations of the image data. Docker core removed -this functionality in the 1.7 version. If you liked this functionality, you can -still find it in the third-party dockviz tool: https://github.com/justone/dockviz. - -## Listing images in a desired format - -When using the --format option, the image command will either output the data -exactly as the template declares or, when using the `table` directive, will -include column headers as well. You can use special characters like `\t` for -inserting tab spacing between columns. - -The following example uses a template without headers and outputs the ID and -Repository entries separated by a colon for all images: - - docker images --format "{{.ID}}: {{.Repository}}" - 77af4d6b9913: - b6fa739cedf5: committ - 78a85c484bad: ipbabble - 30557a29d5ab: docker - 5ed6274db6ce: - 746b819f315e: postgres - 746b819f315e: postgres - 746b819f315e: postgres - 746b819f315e: postgres - -To list all images with their repository and tag in a table format you can use: - - docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" - IMAGE ID REPOSITORY TAG - 77af4d6b9913 - b6fa739cedf5 committ latest - 78a85c484bad ipbabble - 30557a29d5ab docker latest - 5ed6274db6ce - 746b819f315e postgres 9 - 746b819f315e postgres 9.3 - 746b819f315e postgres 9.3.5 - 746b819f315e postgres latest - -Valid template placeholders are listed above. - -## Listing only the shortened image IDs - -Listing just the shortened image IDs. This can be useful for some automated -tools. - - docker image ls -q diff --git a/fn/vendor/github.com/docker/docker/man/src/image/pull.md b/fn/vendor/github.com/docker/docker/man/src/image/pull.md deleted file mode 100644 index 0286ef150..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/image/pull.md +++ /dev/null @@ -1,189 +0,0 @@ -This command pulls down an image or a repository from a registry. If -there is more than one image for a repository (e.g., fedora) then all -images for that repository name can be pulled down including any tags -(see the option **-a** or **--all-tags**). - -If you do not specify a `REGISTRY_HOST`, the command uses Docker's public -registry located at `registry-1.docker.io` by default. - -# EXAMPLES - -### Pull an image from Docker Hub - -To download a particular image, or set of images (i.e., a repository), use -`docker image pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a -default. This command pulls the `debian:latest` image: - - $ docker image pull debian - - Using default tag: latest - latest: Pulling from library/debian - fdd5d7827f33: Pull complete - a3ed95caeb02: Pull complete - Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa - Status: Downloaded newer image for debian:latest - -Docker images can consist of multiple layers. In the example above, the image -consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. - -Layers can be reused by images. For example, the `debian:jessie` image shares -both layers with `debian:latest`. Pulling the `debian:jessie` image therefore -only pulls its metadata, but not its layers, because all layers are already -present locally: - - $ docker image pull debian:jessie - - jessie: Pulling from library/debian - fdd5d7827f33: Already exists - a3ed95caeb02: Already exists - Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e - Status: Downloaded newer image for debian:jessie - -To see which images are present locally, use the **docker-images(1)** -command: - - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - debian jessie f50f9524513f 5 days ago 125.1 MB - debian latest f50f9524513f 5 days ago 125.1 MB - -Docker uses a content-addressable image store, and the image ID is a SHA256 -digest covering the image's configuration and layers. In the example above, -`debian:jessie` and `debian:latest` have the same image ID because they are -actually the *same* image tagged with different names. Because they are the -same image, their layers are stored only once and do not consume extra disk -space. - -For more information about images, layers, and the content-addressable store, -refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/) -in the online documentation. - - -## Pull an image by digest (immutable identifier) - -So far, you've pulled images by their name (and "tag"). Using names and tags is -a convenient way to work with images. When using tags, you can `docker image pull` an -image again to make sure you have the most up-to-date version of that image. -For example, `docker image pull ubuntu:14.04` pulls the latest version of the Ubuntu -14.04 image. - -In some cases you don't want images to be updated to newer versions, but prefer -to use a fixed version of an image. Docker enables you to pull an image by its -*digest*. When pulling an image by digest, you specify *exactly* which version -of an image to pull. Doing so, allows you to "pin" an image to that version, -and guarantee that the image you're using is always the same. - -To know the digest of an image, pull the image first. Let's pull the latest -`ubuntu:14.04` image from Docker Hub: - - $ docker image pull ubuntu:14.04 - - 14.04: Pulling from library/ubuntu - 5a132a7e7af1: Pull complete - fd2731e4c50c: Pull complete - 28a2f68d1120: Pull complete - a3ed95caeb02: Pull complete - Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - Status: Downloaded newer image for ubuntu:14.04 - -Docker prints the digest of the image after the pull has finished. In the example -above, the digest of the image is: - - sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - -Docker also prints the digest of an image when *pushing* to a registry. This -may be useful if you want to pin to a version of the image you just pushed. - -A digest takes the place of the tag when pulling an image, for example, to -pull the above image by digest, run the following command: - - $ docker image pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - - sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu - 5a132a7e7af1: Already exists - fd2731e4c50c: Already exists - 28a2f68d1120: Already exists - a3ed95caeb02: Already exists - Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - -Digest can also be used in the `FROM` of a Dockerfile, for example: - - FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - MAINTAINER some maintainer - -> **Note**: Using this feature "pins" an image to a specific version in time. -> Docker will therefore not pull updated versions of an image, which may include -> security updates. If you want to pull an updated image, you need to change the -> digest accordingly. - -## Pulling from a different registry - -By default, `docker image pull` pulls images from Docker Hub. It is also possible to -manually specify the path of a registry to pull from. For example, if you have -set up a local registry, you can specify its path to pull from it. A registry -path is similar to a URL, but does not contain a protocol specifier (`https://`). - -The following command pulls the `testing/test-image` image from a local registry -listening on port 5000 (`myregistry.local:5000`): - - $ docker image pull myregistry.local:5000/testing/test-image - -Registry credentials are managed by **docker-login(1)**. - -Docker uses the `https://` protocol to communicate with a registry, unless the -registry is allowed to be accessed over an insecure connection. Refer to the -[insecure registries](https://docs.docker.com/engine/reference/commandline/daemon/#insecure-registries) -section in the online documentation for more information. - - -## Pull a repository with multiple images - -By default, `docker image pull` pulls a *single* image from the registry. A repository -can contain multiple images. To pull all images from a repository, provide the -`-a` (or `--all-tags`) option when using `docker image pull`. - -This command pulls all images from the `fedora` repository: - - $ docker image pull --all-tags fedora - - Pulling repository fedora - ad57ef8d78d7: Download complete - 105182bb5e8b: Download complete - 511136ea3c5a: Download complete - 73bd853d2ea5: Download complete - .... - - Status: Downloaded newer image for fedora - -After the pull has completed use the `docker images` command to see the -images that were pulled. The example below shows all the `fedora` images -that are present locally: - - $ docker images fedora - - REPOSITORY TAG IMAGE ID CREATED SIZE - fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB - fedora 20 105182bb5e8b 5 days ago 372.7 MB - fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB - fedora latest 105182bb5e8b 5 days ago 372.7 MB - - -## Canceling a pull - -Killing the `docker image pull` process, for example by pressing `CTRL-c` while it is -running in a terminal, will terminate the pull operation. - - $ docker image pull fedora - - Using default tag: latest - latest: Pulling from library/fedora - a3ed95caeb02: Pulling fs layer - 236608c7b546: Pulling fs layer - ^C - -> **Note**: Technically, the Engine terminates a pull operation when the -> connection between the Docker Engine daemon and the Docker Engine client -> initiating the pull is lost. If the connection with the Engine daemon is -> lost for other reasons than a manual interaction, the pull is also aborted. diff --git a/fn/vendor/github.com/docker/docker/man/src/image/push.md b/fn/vendor/github.com/docker/docker/man/src/image/push.md deleted file mode 100644 index 8b4334d27..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/image/push.md +++ /dev/null @@ -1,34 +0,0 @@ -Use `docker image push` to share your images to the [Docker Hub](https://hub.docker.com) -registry or to a self-hosted one. - -Refer to **docker-image-tag(1)** for more information about valid image and tag names. - -Killing the **docker image push** process, for example by pressing **CTRL-c** while it -is running in a terminal, terminates the push operation. - -Registry credentials are managed by **docker-login(1)**. - -# EXAMPLES - -## Pushing a new image to a registry - -First save the new image by finding the container ID (using **docker container ls**) -and then committing it to a new image name. Note that only a-z0-9-_. are -allowed when naming images: - - # docker container commit c16378f943fe rhel-httpd - -Now, push the image to the registry using the image ID. In this example the -registry is on host named `registry-host` and listening on port `5000`. To do -this, tag the image with the host name or IP address, and the port of the -registry: - - # docker image tag rhel-httpd registry-host:5000/myadmin/rhel-httpd - # docker image push registry-host:5000/myadmin/rhel-httpd - -Check that this worked by running: - - # docker image ls - -You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` -listed. diff --git a/fn/vendor/github.com/docker/docker/man/src/image/rm.md b/fn/vendor/github.com/docker/docker/man/src/image/rm.md deleted file mode 100644 index 348d45402..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/image/rm.md +++ /dev/null @@ -1,11 +0,0 @@ -Removes one or more images from the host node. This does not remove images from -a registry. You cannot remove an image of a running container unless you use the -**-f** option. To see all images on a host use the **docker image ls** command. - -# EXAMPLES - -## Removing an image - -Here is an example of removing an image: - - docker image rm fedora/httpd diff --git a/fn/vendor/github.com/docker/docker/man/src/image/save.md b/fn/vendor/github.com/docker/docker/man/src/image/save.md deleted file mode 100644 index 19d885ec6..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/image/save.md +++ /dev/null @@ -1,19 +0,0 @@ -Produces a tarred repository to the standard output stream. Contains all -parent layers, and all tags + versions, or specified repo:tag. - -Stream to a file instead of STDOUT by using **-o**. - -# EXAMPLES - -Save all fedora repository images to a fedora-all.tar and save the latest -fedora image to a fedora-latest.tar: - - $ docker image save fedora > fedora-all.tar - $ docker image save --output=fedora-latest.tar fedora:latest - $ ls -sh fedora-all.tar - 721M fedora-all.tar - $ ls -sh fedora-latest.tar - 367M fedora-latest.tar - -# See also -**docker-image-load(1)** to load an image from a tar archive on STDIN. diff --git a/fn/vendor/github.com/docker/docker/man/src/image/tag.md b/fn/vendor/github.com/docker/docker/man/src/image/tag.md deleted file mode 100644 index 16abd7529..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/image/tag.md +++ /dev/null @@ -1,54 +0,0 @@ -Assigns a new alias to an image in a registry. An alias refers to the -entire image name including the optional `TAG` after the ':'. - -# OPTIONS -**NAME** - The image name which is made up of slash-separated name components, - optionally prefixed by a registry hostname. The hostname must comply with - standard DNS rules, but may not contain underscores. If a hostname is - present, it may optionally be followed by a port number in the format - `:8080`. If not present, the command uses Docker's public registry located at - `registry-1.docker.io` by default. Name components may contain lowercase - letters, digits and separators. A separator is defined as a period, one or - two underscores, or one or more dashes. A name component may not start or end - with a separator. - -**TAG** - The tag assigned to the image to version and distinguish images with the same - name. The tag name must be valid ASCII and may contain lowercase and - uppercase letters, digits, underscores, periods and hyphens. A tag name - may not start with a period or a hyphen and may contain a maximum of 128 - characters. - -# EXAMPLES - -## Tagging an image referenced by ID - -To tag a local image with ID "0e5574283393" into the "fedora" repository with -"version1.0": - - docker image tag 0e5574283393 fedora/httpd:version1.0 - -## Tagging an image referenced by Name - -To tag a local image with name "httpd" into the "fedora" repository with -"version1.0": - - docker image tag httpd fedora/httpd:version1.0 - -Note that since the tag name is not specified, the alias is created for an -existing local version `httpd:latest`. - -## Tagging an image referenced by Name and Tag - -To tag a local image with name "httpd" and tag "test" into the "fedora" -repository with "version1.0.test": - - docker image tag httpd:test fedora/httpd:version1.0.test - -## Tagging an image for a private repository - -To push an image to a private registry and not the central Docker -registry you must tag it with the registry hostname and port (if needed). - - docker image tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 diff --git a/fn/vendor/github.com/docker/docker/man/src/images.md b/fn/vendor/github.com/docker/docker/man/src/images.md deleted file mode 100644 index ae6a3875e..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/images.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker image ls`. diff --git a/fn/vendor/github.com/docker/docker/man/src/import.md b/fn/vendor/github.com/docker/docker/man/src/import.md deleted file mode 100644 index 826c71b1b..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/import.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker image import`. diff --git a/fn/vendor/github.com/docker/docker/man/src/info.md b/fn/vendor/github.com/docker/docker/man/src/info.md deleted file mode 100644 index 35e62f86e..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/info.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker system info`. diff --git a/fn/vendor/github.com/docker/docker/man/src/inspect.md b/fn/vendor/github.com/docker/docker/man/src/inspect.md deleted file mode 100644 index d0a2a6837..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/inspect.md +++ /dev/null @@ -1,286 +0,0 @@ -This displays the low-level information on Docker object(s) (e.g. container, -image, volume,network, node, service, or task) identified by name or ID. By default, -this will render all results in a JSON array. If the container and image have -the same name, this will return container JSON for unspecified type. If a format -is specified, the given template will be executed for each result. - -# EXAMPLES - -Get information about an image when image name conflicts with the container name, -e.g. both image and container are named rhel7: - - $ docker inspect --type=image rhel7 - [ - { - "Id": "fe01a428b9d9de35d29531e9994157978e8c48fa693e1bf1d221dffbbb67b170", - "Parent": "10acc31def5d6f249b548e01e8ffbaccfd61af0240c17315a7ad393d022c5ca2", - .... - } - ] - -## Getting information on a container - -To get information on a container use its ID or instance name: - - $ docker inspect d2cc496561d6 - [{ - "Id": "d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", - "Created": "2015-06-08T16:18:02.505155285Z", - "Path": "bash", - "Args": [], - "State": { - "Running": false, - "Paused": false, - "Restarting": false, - "OOMKilled": false, - "Dead": false, - "Pid": 0, - "ExitCode": 0, - "Error": "", - "StartedAt": "2015-06-08T16:18:03.643865954Z", - "FinishedAt": "2015-06-08T16:57:06.448552862Z" - }, - "Image": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", - "NetworkSettings": { - "Bridge": "", - "SandboxID": "6b4851d1903e16dd6a567bd526553a86664361f31036eaaa2f8454d6f4611f6f", - "HairpinMode": false, - "LinkLocalIPv6Address": "", - "LinkLocalIPv6PrefixLen": 0, - "Ports": {}, - "SandboxKey": "/var/run/docker/netns/6b4851d1903e", - "SecondaryIPAddresses": null, - "SecondaryIPv6Addresses": null, - "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", - "Gateway": "172.17.0.1", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "IPAddress": "172.17.0.2", - "IPPrefixLen": 16, - "IPv6Gateway": "", - "MacAddress": "02:42:ac:12:00:02", - "Networks": { - "bridge": { - "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", - "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", - "Gateway": "172.17.0.1", - "IPAddress": "172.17.0.2", - "IPPrefixLen": 16, - "IPv6Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "MacAddress": "02:42:ac:12:00:02" - } - } - - }, - "ResolvConfPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/resolv.conf", - "HostnamePath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hostname", - "HostsPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hosts", - "LogPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47-json.log", - "Name": "/adoring_wozniak", - "RestartCount": 0, - "Driver": "devicemapper", - "MountLabel": "", - "ProcessLabel": "", - "Mounts": [ - { - "Source": "/data", - "Destination": "/data", - "Mode": "ro,Z", - "RW": false - "Propagation": "" - } - ], - "AppArmorProfile": "", - "ExecIDs": null, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 0, - "CpuPeriod": 0, - "CpusetCpus": "", - "CpusetMems": "", - "CpuQuota": 0, - "BlkioWeight": 0, - "OomKillDisable": false, - "Privileged": false, - "PortBindings": {}, - "Links": null, - "PublishAllPorts": false, - "Dns": null, - "DnsSearch": null, - "DnsOptions": null, - "ExtraHosts": null, - "VolumesFrom": null, - "Devices": [], - "NetworkMode": "bridge", - "IpcMode": "", - "PidMode": "", - "UTSMode": "", - "CapAdd": null, - "CapDrop": null, - "RestartPolicy": { - "Name": "no", - "MaximumRetryCount": 0 - }, - "SecurityOpt": null, - "ReadonlyRootfs": false, - "Ulimits": null, - "LogConfig": { - "Type": "json-file", - "Config": {} - }, - "CgroupParent": "" - }, - "GraphDriver": { - "Name": "devicemapper", - "Data": { - "DeviceId": "5", - "DeviceName": "docker-253:1-2763198-d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", - "DeviceSize": "171798691840" - } - }, - "Config": { - "Hostname": "d2cc496561d6", - "Domainname": "", - "User": "", - "AttachStdin": true, - "AttachStdout": true, - "AttachStderr": true, - "ExposedPorts": null, - "Tty": true, - "OpenStdin": true, - "StdinOnce": true, - "Env": null, - "Cmd": [ - "bash" - ], - "Image": "fedora", - "Volumes": null, - "VolumeDriver": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null, - "Labels": {}, - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 0, - "Cpuset": "", - "StopSignal": "SIGTERM" - } - } - ] -## Getting the IP address of a container instance - -To get the IP address of a container use: - - $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' d2cc496561d6 - 172.17.0.2 - -## Listing all port bindings - -One can loop over arrays and maps in the results to produce simple text -output: - - $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ - {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' d2cc496561d6 - 80/tcp -> 80 - -You can get more information about how to write a Go template from: -https://golang.org/pkg/text/template/. - -## Getting size information on a container - - $ docker inspect -s d2cc496561d6 - [ - { - .... - "SizeRw": 0, - "SizeRootFs": 972, - .... - } - ] - -## Getting information on an image - -Use an image's ID or name (e.g., repository/name[:tag]) to get information -about the image: - - $ docker inspect ded7cd95e059 - [{ - "Id": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", - "Parent": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", - "Comment": "", - "Created": "2015-05-27T16:58:22.937503085Z", - "Container": "76cf7f67d83a7a047454b33007d03e32a8f474ad332c3a03c94537edd22b312b", - "ContainerConfig": { - "Hostname": "76cf7f67d83a", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "ExposedPorts": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "/bin/sh", - "-c", - "#(nop) ADD file:4be46382bcf2b095fcb9fe8334206b584eff60bb3fad8178cbd97697fcb2ea83 in /" - ], - "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", - "Volumes": null, - "VolumeDriver": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null, - "Labels": {} - }, - "DockerVersion": "1.6.0", - "Author": "Lokesh Mandvekar \u003clsm5@fedoraproject.org\u003e", - "Config": { - "Hostname": "76cf7f67d83a", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "ExposedPorts": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": null, - "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", - "Volumes": null, - "VolumeDriver": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null, - "Labels": {} - }, - "Architecture": "amd64", - "Os": "linux", - "Size": 186507296, - "VirtualSize": 186507296, - "GraphDriver": { - "Name": "devicemapper", - "Data": { - "DeviceId": "3", - "DeviceName": "docker-253:1-2763198-ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", - "DeviceSize": "171798691840" - } - } - } - ] diff --git a/fn/vendor/github.com/docker/docker/man/src/kill.md b/fn/vendor/github.com/docker/docker/man/src/kill.md deleted file mode 100644 index 50efbcf03..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/kill.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container kill`. diff --git a/fn/vendor/github.com/docker/docker/man/src/load.md b/fn/vendor/github.com/docker/docker/man/src/load.md deleted file mode 100644 index 60e77d7a0..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/load.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker image load`. diff --git a/fn/vendor/github.com/docker/docker/man/src/login.md b/fn/vendor/github.com/docker/docker/man/src/login.md deleted file mode 100644 index 4d60882f5..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/login.md +++ /dev/null @@ -1,22 +0,0 @@ -Log in to a Docker Registry located on the specified -`SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you -do not specify a `SERVER`, the command uses Docker's public registry located at -`https://registry-1.docker.io/` by default. To get a username/password for Docker's public registry, create an account on Docker Hub. - -`docker login` requires user to use `sudo` or be `root`, except when: - -1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. -2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/engine/security/security/#/docker-daemon-attack-surface) for details. - -You can log into any public or private repository for which you have -credentials. When you log in, the command stores encoded credentials in -`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. - -# EXAMPLES - -## Login to a registry on your localhost - - # docker login localhost:8080 - -# See also -**docker-logout(1)** to log out from a Docker registry. diff --git a/fn/vendor/github.com/docker/docker/man/src/logout.md b/fn/vendor/github.com/docker/docker/man/src/logout.md deleted file mode 100644 index 826201803..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/logout.md +++ /dev/null @@ -1,13 +0,0 @@ -Log out of a Docker Registry located on the specified `SERVER`. You can -specify a URL or a `hostname` for the `SERVER` value. If you do not specify a -`SERVER`, the command attempts to log you out of Docker's public registry -located at `https://registry-1.docker.io/` by default. - -# EXAMPLES - -## Log out from a registry on your localhost - - # docker logout localhost:8080 - -# See also -**docker-login(1)** to log in to a Docker registry server. diff --git a/fn/vendor/github.com/docker/docker/man/src/logs.md b/fn/vendor/github.com/docker/docker/man/src/logs.md deleted file mode 100644 index e528150e2..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/logs.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container logs`. diff --git a/fn/vendor/github.com/docker/docker/man/src/network/connect.md b/fn/vendor/github.com/docker/docker/man/src/network/connect.md deleted file mode 100644 index 59bcc0302..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/network/connect.md +++ /dev/null @@ -1,39 +0,0 @@ -Connects a container to a network. You can connect a container by name -or by ID. Once connected, the container can communicate with other containers in -the same network. - -```bash -$ docker network connect multi-host-network container1 -``` - -You can also use the `docker run --network=` option to start a container and immediately connect it to a network. - -```bash -$ docker run -itd --network=multi-host-network --ip 172.20.88.22 --ip6 2001:db8::8822 busybox -``` -You can pause, restart, and stop containers that are connected to a network. -A container connects to its configured networks when it runs. - -If specified, the container's IP address(es) is reapplied when a stopped -container is restarted. If the IP address is no longer available, the container -fails to start. One way to guarantee that the IP address is available is -to specify an `--ip-range` when creating the network, and choose the static IP -address(es) from outside that range. This ensures that the IP address is not -given to another container while this container is not on the network. - -```bash -$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network -``` - -```bash -$ docker network connect --ip 172.20.128.2 multi-host-network container2 -``` - -To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. - -Once connected in network, containers can communicate using only another -container's IP address or name. For `overlay` networks or custom plugins that -support multi-host connectivity, containers connected to the same multi-host -network but launched from different Engines can also communicate in this way. - -You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. diff --git a/fn/vendor/github.com/docker/docker/man/src/network/create.md b/fn/vendor/github.com/docker/docker/man/src/network/create.md deleted file mode 100644 index efbf0d5d4..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/network/create.md +++ /dev/null @@ -1,136 +0,0 @@ -Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the -built-in network drivers. If you have installed a third party or your own custom -network driver you can specify that `DRIVER` here also. If you don't specify the -`--driver` option, the command automatically creates a `bridge` network for you. -When you install Docker Engine it creates a `bridge` network automatically. This -network corresponds to the `docker0` bridge that Engine has traditionally relied -on. When launch a new container with `docker run` it automatically connects to -this bridge network. You cannot remove this default bridge network but you can -create new ones using the `network create` command. - -```bash -$ docker network create -d bridge my-bridge-network -``` - -Bridge networks are isolated networks on a single Engine installation. If you -want to create a network that spans multiple Docker hosts each running an -Engine, you must create an `overlay` network. Unlike `bridge` networks overlay -networks require some pre-existing conditions before you can create one. These -conditions are: - -* Access to a key-value store. Engine supports Consul, Etcd, and Zookeeper (Distributed store) key-value stores. -* A cluster of hosts with connectivity to the key-value store. -* A properly configured Engine `daemon` on each host in the cluster. - -The `dockerd` options that support the `overlay` network are: - -* `--cluster-store` -* `--cluster-store-opt` -* `--cluster-advertise` - -To read more about these options and how to configure them, see ["*Get started -with multi-host -network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay/). - -It is also a good idea, though not required, that you install Docker Swarm on to -manage the cluster that makes up your network. Swarm provides sophisticated -discovery and server management that can assist your implementation. - -Once you have prepared the `overlay` network prerequisites you simply choose a -Docker host in the cluster and issue the following to create the network: - -```bash -$ docker network create -d overlay my-multihost-network -``` - -Network names must be unique. The Docker daemon attempts to identify naming -conflicts but this is not guaranteed. It is the user's responsibility to avoid -name conflicts. - -## Connect containers - -When you start a container use the `--network` flag to connect it to a network. -This adds the `busybox` container to the `mynet` network. - -```bash -$ docker run -itd --network=mynet busybox -``` - -If you want to add a container to a network after the container is already -running use the `docker network connect` subcommand. - -You can connect multiple containers to the same network. Once connected, the -containers can communicate using only another container's IP address or name. -For `overlay` networks or custom plugins that support multi-host connectivity, -containers connected to the same multi-host network but launched from different -Engines can also communicate in this way. - -You can disconnect a container from a network using the `docker network -disconnect` command. - -## Specifying advanced options - -When you create a network, Engine creates a non-overlapping subnetwork for the -network by default. This subnetwork is not a subdivision of an existing network. -It is purely for ip-addressing purposes. You can override this default and -specify subnetwork values directly using the `--subnet` option. On a -`bridge` network you can only create a single subnet: - -```bash -$ docker network create -d bridge --subnet=192.168.0.0/16 br0 -``` - -Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` -options. - -```bash -$ docker network create \ - --driver=bridge \ - --subnet=172.28.0.0/16 \ - --ip-range=172.28.5.0/24 \ - --gateway=172.28.5.254 \ - br0 -``` - -If you omit the `--gateway` flag the Engine selects one for you from inside a -preferred pool. For `overlay` networks and for network driver plugins that -support it you can create multiple subnetworks. - -```bash -$ docker network create -d overlay \ - --subnet=192.168.0.0/16 \ - --subnet=192.170.0.0/16 \ - --gateway=192.168.0.100 \ - --gateway=192.170.0.100 \ - --ip-range=192.168.1.0/24 \ - --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ - --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ - my-multihost-network -``` - -Be sure that your subnetworks do not overlap. If they do, the network create -fails and Engine returns an error. - -### Network internal mode - -By default, when you connect a container to an `overlay` network, Docker also -connects a bridge network to it to provide external connectivity. If you want -to create an externally isolated `overlay` network, you can specify the -`--internal` option. - -### Network ingress mode - -You can create the network which will be used to provide the routing-mesh in the -swarm cluster. You do so by specifying `--ingress` when creating the network. Only -one ingress network can be created at the time. The network can be removed only -if no services depend on it. Any option available when creating a overlay network -is also available when creating the ingress network, besides the `--attachable` option. - -```bash -$ docker network create -d overlay \ - --subnet=10.11.0.0/16 \ - --ingress \ - --opt com.docker.network.mtu=9216 \ - --opt encrypted=true \ - my-ingress-network -``` diff --git a/fn/vendor/github.com/docker/docker/man/src/network/disconnect.md b/fn/vendor/github.com/docker/docker/man/src/network/disconnect.md deleted file mode 100644 index 13943f3f8..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/network/disconnect.md +++ /dev/null @@ -1,5 +0,0 @@ -Disconnects a container from a network. - -```bash -$ docker network disconnect multi-host-network container1 -``` diff --git a/fn/vendor/github.com/docker/docker/man/src/network/inspect.md b/fn/vendor/github.com/docker/docker/man/src/network/inspect.md deleted file mode 100644 index 91cb2dae3..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/network/inspect.md +++ /dev/null @@ -1,183 +0,0 @@ -Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: - -```bash -$ sudo docker run -itd --name=container1 busybox -f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 - -$ sudo docker run -itd --name=container2 busybox -bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 -``` - -The `network inspect` command shows the containers, by id, in its -results. You can specify an alternate format to execute a given -template for each result. Go's -[text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -```bash -$ sudo docker network inspect bridge -[ - { - "Name": "bridge", - "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", - "Scope": "local", - "Driver": "bridge", - "IPAM": { - "Driver": "default", - "Config": [ - { - "Subnet": "172.17.42.1/16", - "Gateway": "172.17.42.1" - } - ] - }, - "Internal": false, - "Ingress": false, - "Containers": { - "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { - "Name": "container2", - "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", - "MacAddress": "02:42:ac:11:00:02", - "IPv4Address": "172.17.0.2/16", - "IPv6Address": "" - }, - "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { - "Name": "container1", - "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", - "MacAddress": "02:42:ac:11:00:01", - "IPv4Address": "172.17.0.1/16", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.bridge.default_bridge": "true", - "com.docker.network.bridge.enable_icc": "true", - "com.docker.network.bridge.enable_ip_masquerade": "true", - "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", - "com.docker.network.bridge.name": "docker0", - "com.docker.network.driver.mtu": "1500" - } - } -] -``` - -Returns the information about the user-defined network: - -```bash -$ docker network create simple-network -69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a -$ docker network inspect simple-network -[ - { - "Name": "simple-network", - "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", - "Scope": "local", - "Driver": "bridge", - "IPAM": { - "Driver": "default", - "Config": [ - { - "Subnet": "172.22.0.0/16", - "Gateway": "172.22.0.1" - } - ] - }, - "Containers": {}, - "Options": {} - } -] -``` - -`docker network inspect --verbose` for swarm mode overlay networks shows service-specific -details such as the service's VIP and port mappings. It also shows IPs of service tasks, -and the IPs of the nodes where the tasks are running. - -Following is an example output for a overlay network `ov1` that has one service `s1` -attached to. service `s1` in this case has three replicas. - -```bash -$ docker network inspect --verbose ov1 -[ - { - "Name": "ov1", - "Id": "ybmyjvao9vtzy3oorxbssj13b", - "Created": "2017-03-13T17:04:39.776106792Z", - "Scope": "swarm", - "Driver": "overlay", - "EnableIPv6": false, - "IPAM": { - "Driver": "default", - "Options": null, - "Config": [ - { - "Subnet": "10.0.0.0/24", - "Gateway": "10.0.0.1" - } - ] - }, - "Internal": false, - "Attachable": false, - "Ingress": false, - "Containers": { - "020403bd88a15f60747fd25d1ad5fa1272eb740e8a97fc547d8ad07b2f721c5e": { - "Name": "s1.1.pjn2ik0sfgkfzed3h0s00gs9o", - "EndpointID": "ad16946f416562d658f3bb30b9830d73ad91ccf6feae44411269cd0ff674714e", - "MacAddress": "02:42:0a:00:00:04", - "IPv4Address": "10.0.0.4/24", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.driver.overlay.vxlanid_list": "4097" - }, - "Labels": {}, - "Peers": [ - { - "Name": "net-3-5d3cfd30a58c", - "IP": "192.168.33.13" - }, - { - "Name": "net-1-6ecbc0040a73", - "IP": "192.168.33.11" - }, - { - "Name": "net-2-fb80208efd75", - "IP": "192.168.33.12" - } - ], - "Services": { - "s1": { - "VIP": "10.0.0.2", - "Ports": [], - "LocalLBIndex": 257, - "Tasks": [ - { - "Name": "s1.2.q4hcq2aiiml25ubtrtg4q1txt", - "EndpointID": "040879b027e55fb658e8b60ae3b87c6cdac7d291e86a190a3b5ac6567b26511a", - "EndpointIP": "10.0.0.5", - "Info": { - "Host IP": "192.168.33.11" - } - }, - { - "Name": "s1.3.yawl4cgkp7imkfx469kn9j6lm", - "EndpointID": "106edff9f120efe44068b834e1cddb5b39dd4a3af70211378b2f7a9e562bbad8", - "EndpointIP": "10.0.0.3", - "Info": { - "Host IP": "192.168.33.12" - } - }, - { - "Name": "s1.1.pjn2ik0sfgkfzed3h0s00gs9o", - "EndpointID": "ad16946f416562d658f3bb30b9830d73ad91ccf6feae44411269cd0ff674714e", - "EndpointIP": "10.0.0.4", - "Info": { - "Host IP": "192.168.33.13" - } - } - ] - } - } - } -] -``` diff --git a/fn/vendor/github.com/docker/docker/man/src/network/ls.md b/fn/vendor/github.com/docker/docker/man/src/network/ls.md deleted file mode 100644 index 417344951..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/network/ls.md +++ /dev/null @@ -1,182 +0,0 @@ -Lists all the networks the Engine `daemon` knows about. This includes the -networks that span across multiple hosts in a cluster, for example: - -```bash - $ docker network ls - NETWORK ID NAME DRIVER SCOPE - 7fca4eb8c647 bridge bridge local - 9f904ee27bf5 none null local - cf03ee007fb4 host host local - 78b03ee04fc4 multi-host overlay swarm -``` - -Use the `--no-trunc` option to display the full network id: - -```bash -$ docker network ls --no-trunc -NETWORK ID NAME DRIVER -18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null -c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host -7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge -95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge -63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge -``` - -## Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. For example, -`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. - -The currently supported filters are: - -* driver -* id (network's id) -* label (`label=` or `label==`) -* name (network's name) -* scope (`swarm|global|local`) -* type (custom|builtin) - -#### Driver - -The `driver` filter matches networks based on their driver. - -The following example matches networks with the `bridge` driver: - -```bash -$ docker network ls --filter driver=bridge -NETWORK ID NAME DRIVER -db9db329f835 test1 bridge -f6e212da9dfd test2 bridge -``` - -#### ID - -The `id` filter matches on all or part of a network's ID. - -The following filter matches all networks with an ID containing the -`63d1ff1f77b0...` string. - -```bash -$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 -NETWORK ID NAME DRIVER -63d1ff1f77b0 dev bridge -``` - -You can also filter for a substring in an ID as this shows: - -```bash -$ docker network ls --filter id=95e74588f40d -NETWORK ID NAME DRIVER -95e74588f40d foo bridge - -$ docker network ls --filter id=95e -NETWORK ID NAME DRIVER -95e74588f40d foo bridge -``` - -#### Label - -The `label` filter matches networks based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches networks with the `usage` label regardless of its value. - -```bash -$ docker network ls -f "label=usage" -NETWORK ID NAME DRIVER -db9db329f835 test1 bridge -f6e212da9dfd test2 bridge -``` - -The following filter matches networks with the `usage` label with the `prod` value. - -```bash -$ docker network ls -f "label=usage=prod" -NETWORK ID NAME DRIVER -f6e212da9dfd test2 bridge -``` - -#### Name - -The `name` filter matches on all or part of a network's name. - -The following filter matches all networks with a name containing the `foobar` string. - -```bash -$ docker network ls --filter name=foobar -NETWORK ID NAME DRIVER -06e7eef0a170 foobar bridge -``` - -You can also filter for a substring in a name as this shows: - -```bash -$ docker network ls --filter name=foo -NETWORK ID NAME DRIVER -95e74588f40d foo bridge -06e7eef0a170 foobar bridge -``` - -#### Scope - -The `scope` filter matches networks based on their scope. - -The following example matches networks with the `swarm` scope: - -```bash -$ docker network ls --filter scope=swarm -NETWORK ID NAME DRIVER SCOPE -xbtm0v4f1lfh ingress overlay swarm -ic6r88twuu92 swarmnet overlay swarm -``` - -The following example matches networks with the `local` scope: - -```bash -$ docker network ls --filter scope=local -NETWORK ID NAME DRIVER SCOPE -e85227439ac7 bridge bridge local -0ca0e19443ed host host local -ca13cc149a36 localnet bridge local -f9e115d2de35 none null local -``` - -#### Type - -The `type` filter supports two values; `builtin` displays predefined networks -(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. - -The following filter matches all user defined networks: - -```bash -$ docker network ls --filter type=custom -NETWORK ID NAME DRIVER -95e74588f40d foo bridge -63d1ff1f77b0 dev bridge -``` - -By having this flag it allows for batch cleanup. For example, use this filter -to delete all user defined networks: - -```bash -$ docker network rm `docker network ls --filter type=custom -q` -``` - -A warning will be issued when trying to remove a network that has containers -attached. - -## Format - -Format uses a Go template to print the output. The following variables are -supported: - -* .ID - Network ID -* .Name - Network name -* .Driver - Network driver -* .Scope - Network scope (local, global) -* .IPv6 - Whether IPv6 is enabled on the network or not -* .Internal - Whether the network is internal or not -* .Labels - All labels assigned to the network -* .Label - Value of a specific label for this network. For example `{{.Label "project.version"}}` diff --git a/fn/vendor/github.com/docker/docker/man/src/network/rm.md b/fn/vendor/github.com/docker/docker/man/src/network/rm.md deleted file mode 100644 index 815b6a487..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/network/rm.md +++ /dev/null @@ -1,20 +0,0 @@ -Removes one or more networks by name or identifier. To remove a network, -you must first disconnect any containers connected to it. -To remove the network named 'my-network': - -```bash - $ docker network rm my-network -``` - -To delete multiple networks in a single `docker network rm` command, provide -multiple network names or ids. The following example deletes a network with id -`3695c422697f` and a network named `my-network`: - -```bash - $ docker network rm 3695c422697f my-network -``` - -When you specify multiple networks, the command attempts to delete each in turn. -If the deletion of one network fails, the command continues to the next on the -list and tries to delete that. The command reports success or failure for each -deletion. diff --git a/fn/vendor/github.com/docker/docker/man/src/pause.md b/fn/vendor/github.com/docker/docker/man/src/pause.md deleted file mode 100644 index 8779d0601..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/pause.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container pause`. diff --git a/fn/vendor/github.com/docker/docker/man/src/plugin/ls.md b/fn/vendor/github.com/docker/docker/man/src/plugin/ls.md deleted file mode 100644 index f9f394524..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/plugin/ls.md +++ /dev/null @@ -1,43 +0,0 @@ -Lists all the plugins that are currently installed. You can install plugins -using the `docker plugin install` command. -You can also filter using the `-f` or `--filter` flag. - -## Filters - -Filter output based on these conditions: - - enabled=(true|false) - plugins that are enabled or not - - capability= - filters plugins based on capabilities (currently `volumedriver`, `networkdriver`, `ipamdriver`, or `authz`) - -## Format - - Pretty-print plugins using a Go template. - Valid placeholders: - .ID - Plugin ID. - .Name - Plugin Name. - .Description - Plugin description. - .Enabled - Whether plugin is enabled or not. - -# EXAMPLES -## Display all plugins - - $ docker plugin ls - ID NAME DESCRIPTION ENABLED - 869080b57404 tiborvass/sample-volume-plugin:latest A sample volume plugin for Docker true - 141bf6c02ddd vieux/sshfs:latest sshFS plugin for Docker false - -## Display plugins with their ID and names - - $ docker plugin ls --format "{{.ID}}: {{.Name}}" - 869080b57404: tiborvass/sample-volume-plugin:latest - -## Display enabled plugins - - $ docker plugin ls --filter enabled=true - ID NAME DESCRIPTION ENABLED - 869080b57404 tiborvass/sample-volume-plugin:latest A sample volume plugin for Docker true - -## Display plugins with `volumedriver` capability - - $ docker plugin ls --filter capability=volumedriver --format "table {{.ID}}\t{{.Name}}" - ID Name - 869080b57404 tiborvass/sample-volume-plugin:latest diff --git a/fn/vendor/github.com/docker/docker/man/src/port.md b/fn/vendor/github.com/docker/docker/man/src/port.md deleted file mode 100644 index b540ce177..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/port.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container port`. diff --git a/fn/vendor/github.com/docker/docker/man/src/ps.md b/fn/vendor/github.com/docker/docker/man/src/ps.md deleted file mode 100644 index 83f289e88..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/ps.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container ls`. diff --git a/fn/vendor/github.com/docker/docker/man/src/pull.md b/fn/vendor/github.com/docker/docker/man/src/pull.md deleted file mode 100644 index 78b0ab87c..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/pull.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker image pull`. diff --git a/fn/vendor/github.com/docker/docker/man/src/push.md b/fn/vendor/github.com/docker/docker/man/src/push.md deleted file mode 100644 index 84f721e0b..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/push.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker image push`. diff --git a/fn/vendor/github.com/docker/docker/man/src/rename.md b/fn/vendor/github.com/docker/docker/man/src/rename.md deleted file mode 100644 index 7a237f42f..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/rename.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container rename`. diff --git a/fn/vendor/github.com/docker/docker/man/src/restart.md b/fn/vendor/github.com/docker/docker/man/src/restart.md deleted file mode 100644 index eddad06b6..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/restart.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container restart`. diff --git a/fn/vendor/github.com/docker/docker/man/src/rm.md b/fn/vendor/github.com/docker/docker/man/src/rm.md deleted file mode 100644 index 8b0cbd658..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/rm.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container rm`. diff --git a/fn/vendor/github.com/docker/docker/man/src/rmi.md b/fn/vendor/github.com/docker/docker/man/src/rmi.md deleted file mode 100644 index b77750493..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/rmi.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker image rm`. diff --git a/fn/vendor/github.com/docker/docker/man/src/save.md b/fn/vendor/github.com/docker/docker/man/src/save.md deleted file mode 100644 index 95127f5a4..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/save.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker image save`. diff --git a/fn/vendor/github.com/docker/docker/man/src/search.md b/fn/vendor/github.com/docker/docker/man/src/search.md deleted file mode 100644 index cf2ac4f1a..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/search.md +++ /dev/null @@ -1,36 +0,0 @@ -Search Docker Hub for images that match the specified `TERM`. The table -of images returned displays the name, description (truncated by default), number -of stars awarded, whether the image is official, and whether it is automated. - -*Note* - Search queries will only return up to 25 results - -## Filter - - Filter output based on these conditions: - - stars= - - is-automated=(true|false) - - is-official=(true|false) - -# EXAMPLES - -## Search Docker Hub for ranked images - -Search a registry for the term 'fedora' and only display those images -ranked 3 or higher: - - $ docker search --filter=stars=3 fedora - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - mattdm/fedora A basic Fedora image corresponding roughly... 50 - fedora (Semi) Official Fedora base image. 38 - mattdm/fedora-small A small Fedora image on which to build. Co... 8 - goldmann/wildfly A WildFly application server running on a ... 3 [OK] - -## Search Docker Hub for automated images - -Search Docker Hub for the term 'fedora' and only display automated images -ranked 1 or higher: - - $ docker search --filter=is-automated=true --filter=stars=1 fedora - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - goldmann/wildfly A WildFly application server running on a ... 3 [OK] - tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] diff --git a/fn/vendor/github.com/docker/docker/man/src/start.md b/fn/vendor/github.com/docker/docker/man/src/start.md deleted file mode 100644 index 9bab86770..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/start.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container start`. diff --git a/fn/vendor/github.com/docker/docker/man/src/stats.md b/fn/vendor/github.com/docker/docker/man/src/stats.md deleted file mode 100644 index f709ce4f1..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/stats.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container stats`. diff --git a/fn/vendor/github.com/docker/docker/man/src/stop.md b/fn/vendor/github.com/docker/docker/man/src/stop.md deleted file mode 100644 index 35fd07b62..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/stop.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container stop`. diff --git a/fn/vendor/github.com/docker/docker/man/src/system/events.md b/fn/vendor/github.com/docker/docker/man/src/system/events.md deleted file mode 100644 index 44adc6c39..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/system/events.md +++ /dev/null @@ -1,134 +0,0 @@ -Get event information from the Docker daemon. Information can include historical -information and real-time information. - -Docker containers will report the following events: - - attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update - -Docker images report the following events: - - delete, import, load, pull, push, save, tag, untag - -Docker volumes report the following events: - - create, mount, unmount, destroy - -Docker networks report the following events: - - create, connect, disconnect, destroy - -# OPTIONS - -The `--since` and `--until` parameters can be Unix timestamps, date formatted -timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed -relative to the client machine's time. If you do not provide the `--since` option, -the command returns only new and/or live events. Supported formats for date -formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the client will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. - -# EXAMPLES - -## Listening for Docker events - -After running docker events a container 786d698004576 is started and stopped -(The container name has been shortened in the output below): - - # docker events - 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) start - 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) die - 2015-01-28T20:21:32.000000000-08:00 59211849bc10: (from whenry/testimage:latest) stop - -## Listening for events since a given date -Again the output container IDs have been shortened for the purposes of this document: - - # docker events --since '2015-01-28' - 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create - 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start - 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create - 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start - 2015-01-28T20:25:40.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die - 2015-01-28T20:25:42.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop - 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start - 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die - 2015-01-28T20:25:46.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop - -The following example outputs all events that were generated in the last 3 minutes, -relative to the current time on the client machine: - - # docker events --since '3m' - 2015-05-12T11:51:30.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die - 2015-05-12T15:52:12.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop - 2015-05-12T15:53:45.999999999Z07:00 7805c1d35632: (from redis:2.8) die - 2015-05-12T15:54:03.999999999Z07:00 7805c1d35632: (from redis:2.8) stop - -If you do not provide the --since option, the command returns only new and/or -live events. - -## Format - -If a format (`--format`) is specified, the given template will be executed -instead of the default format. Go's **text/template** package describes all the -details of the format. - - # docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' - Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - -If a format is set to `{{json .}}`, the events are streamed as valid JSON -Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . - - # docker events --format '{{json .}}' - {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. - {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. - {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - -## Filters - - $ docker events --filter 'event=stop' - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2014-09-03T17:42:14.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'image=ubuntu-1:14.04' - 2014-05-10T17:42:14.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - - $ docker events --filter 'container=7805c1d35632' - 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image= redis:2.8) - - $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' - 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'container=7805c1d35632' --filter 'event=stop' - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'type=volume' - 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) - 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) - 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) - 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) - - $ docker events --filter 'type=network' - 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) - 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) - - $ docker events --filter 'type=plugin' (experimental) - 2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) - 2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) - diff --git a/fn/vendor/github.com/docker/docker/man/src/system/info.md b/fn/vendor/github.com/docker/docker/man/src/system/info.md deleted file mode 100644 index 9a87e985e..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/system/info.md +++ /dev/null @@ -1,163 +0,0 @@ -This command displays system wide information regarding the Docker installation. -Information displayed includes the kernel version, number of containers and images. -The number of images shown is the number of unique images. The same image tagged -under different names is counted only once. - -If a format is specified, the given template will be executed instead of the -default format. Go's **text/template** package -describes all the details of the format. - -Depending on the storage driver in use, additional information can be shown, such -as pool name, data file, metadata file, data space used, total data space, metadata -space used, and total metadata space. - -The data file is where the images are stored and the metadata file is where the -meta data regarding those images are stored. When run for the first time Docker -allocates a certain amount of data space and meta data space from the space -available on the volume where `/var/lib/docker` is mounted. - -# EXAMPLES - -## Display Docker system information - -Here is a sample output for a daemon running on Ubuntu, using the overlay2 -storage driver: - - $ docker -D info - Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 - Images: 52 - Server Version: 1.13.0 - Storage Driver: overlay2 - Backing Filesystem: extfs - Supports d_type: true - Native Overlay Diff: false - Logging Driver: json-file - Cgroup Driver: cgroupfs - Plugins: - Volume: local - Network: bridge host macvlan null overlay - Swarm: active - NodeID: rdjq45w1op418waxlairloqbm - Is Manager: true - ClusterID: te8kdyw33n36fqiz74bfjeixd - Managers: 1 - Nodes: 2 - Orchestration: - Task History Retention Limit: 5 - Raft: - Snapshot Interval: 10000 - Number of Old Snapshots to Retain: 0 - Heartbeat Tick: 1 - Election Tick: 3 - Dispatcher: - Heartbeat Period: 5 seconds - CA Configuration: - Expiry Duration: 3 months - Node Address: 172.16.66.128 172.16.66.129 - Manager Addresses: - 172.16.66.128:2477 - Runtimes: runc - Default Runtime: runc - Init Binary: docker-init - containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 - runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 - init version: N/A (expected: v0.13.0) - Security Options: - apparmor - seccomp - Profile: default - Kernel Version: 4.4.0-31-generic - Operating System: Ubuntu 16.04.1 LTS - OSType: linux - Architecture: x86_64 - CPUs: 2 - Total Memory: 1.937 GiB - Name: ubuntu - ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 - Docker Root Dir: /var/lib/docker - Debug Mode (client): true - Debug Mode (server): true - File Descriptors: 30 - Goroutines: 123 - System Time: 2016-11-12T17:24:37.955404361-08:00 - EventsListeners: 0 - Http Proxy: http://test:test@proxy.example.com:8080 - Https Proxy: https://test:test@proxy.example.com:8080 - No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com - Registry: https://index.docker.io/v1/ - WARNING: No swap limit support - Labels: - storage=ssd - staging=true - Experimental: false - Insecure Registries: - 127.0.0.0/8 - Registry Mirrors: - http://192.168.1.2/ - http://registry-mirror.example.com:5000/ - Live Restore Enabled: false - - - -The global `-D` option tells all `docker` commands to output debug information. - -The example below shows the output for a daemon running on Red Hat Enterprise Linux, -using the devicemapper storage driver. As can be seen in the output, additional -information about the devicemapper storage driver is shown: - - $ docker info - Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 - Untagged Images: 52 - Server Version: 1.10.3 - Storage Driver: devicemapper - Pool Name: docker-202:2-25583803-pool - Pool Blocksize: 65.54 kB - Base Device Size: 10.74 GB - Backing Filesystem: xfs - Data file: /dev/loop0 - Metadata file: /dev/loop1 - Data Space Used: 1.68 GB - Data Space Total: 107.4 GB - Data Space Available: 7.548 GB - Metadata Space Used: 2.322 MB - Metadata Space Total: 2.147 GB - Metadata Space Available: 2.145 GB - Udev Sync Supported: true - Deferred Removal Enabled: false - Deferred Deletion Enabled: false - Deferred Deleted Device Count: 0 - Data loop file: /var/lib/docker/devicemapper/devicemapper/data - Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata - Library Version: 1.02.107-RHEL7 (2015-12-01) - Execution Driver: native-0.2 - Logging Driver: json-file - Plugins: - Volume: local - Network: null host bridge - Kernel Version: 3.10.0-327.el7.x86_64 - Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) - OSType: linux - Architecture: x86_64 - CPUs: 1 - Total Memory: 991.7 MiB - Name: ip-172-30-0-91.ec2.internal - ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S - Docker Root Dir: /var/lib/docker - Debug mode (client): false - Debug mode (server): false - Username: gordontheturtle - Registry: https://index.docker.io/v1/ - Insecure registries: - myinsecurehost:5000 - 127.0.0.0/8 - -You can also specify the output format: - - $ docker info --format '{{json .}}' - {"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} diff --git a/fn/vendor/github.com/docker/docker/man/src/tag.md b/fn/vendor/github.com/docker/docker/man/src/tag.md deleted file mode 100644 index 55c4ef1a7..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/tag.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker image tag`. diff --git a/fn/vendor/github.com/docker/docker/man/src/top.md b/fn/vendor/github.com/docker/docker/man/src/top.md deleted file mode 100644 index ac0f0845f..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/top.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container top`. diff --git a/fn/vendor/github.com/docker/docker/man/src/unpause.md b/fn/vendor/github.com/docker/docker/man/src/unpause.md deleted file mode 100644 index df538e135..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/unpause.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container unpause`. diff --git a/fn/vendor/github.com/docker/docker/man/src/update.md b/fn/vendor/github.com/docker/docker/man/src/update.md deleted file mode 100644 index 162022ab2..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/update.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container update`. diff --git a/fn/vendor/github.com/docker/docker/man/src/version.md b/fn/vendor/github.com/docker/docker/man/src/version.md deleted file mode 100644 index 5dea4a297..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/version.md +++ /dev/null @@ -1,37 +0,0 @@ -This command displays version information for both the Docker client and -daemon. - -# EXAMPLES - -## Display Docker version information - -The default output: - - $ docker version - Client: - Version: 1.8.0 - API version: 1.20 - Go version: go1.4.2 - Git commit: f5bae0a - Built: Tue Jun 23 17:56:00 UTC 2015 - OS/Arch: linux/amd64 - - Server: - Version: 1.8.0 - API version: 1.20 - Go version: go1.4.2 - Git commit: f5bae0a - Built: Tue Jun 23 17:56:00 UTC 2015 - OS/Arch: linux/amd64 - -Get server version: - - $ docker version --format '{{.Server.Version}}' - 1.8.0 - -Dump raw data: - -To view all available fields, you can use the format `{{json .}}`. - - $ docker version --format '{{json .}}' - {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} diff --git a/fn/vendor/github.com/docker/docker/man/src/volume.md b/fn/vendor/github.com/docker/docker/man/src/volume.md deleted file mode 100644 index 0a09a41da..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/volume.md +++ /dev/null @@ -1,14 +0,0 @@ -The `docker volume` command has subcommands for managing data volumes. A data -volume is a specially-designated directory that by-passes storage driver -management. - -Data volumes persist data independent of a container's life cycle. When you -delete a container, the Docker daemon does not delete any data volumes. You can -share volumes across multiple containers. Moreover, you can share data volumes -with other computing resources in your system. - -To see help for a subcommand, use: - - docker volume COMMAND --help - -For full details on using docker volume visit Docker's online documentation. diff --git a/fn/vendor/github.com/docker/docker/man/src/volume/create.md b/fn/vendor/github.com/docker/docker/man/src/volume/create.md deleted file mode 100644 index 408079d62..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/volume/create.md +++ /dev/null @@ -1,35 +0,0 @@ -Creates a new volume that containers can consume and store data in. If a name -is not specified, Docker generates a random name. You create a volume and then -configure the container to use it, for example: - - $ docker volume create hello - hello - $ docker run -d -v hello:/world busybox ls /world - -The mount is created inside the container's `/src` directory. Docker doesn't -not support relative paths for mount points inside the container. - -Multiple containers can use the same volume in the same time period. This is -useful if two containers need access to shared data. For example, if one -container writes and the other reads the data. - -## Driver specific options - -Some volume drivers may take options to customize the volume creation. Use the -`-o` or `--opt` flags to pass driver options: - - $ docker volume create --driver fake --opt tardis=blue --opt timey=wimey - -These options are passed directly to the volume driver. Options for different -volume drivers may do different things (or nothing at all). - -The built-in `local` driver on Windows does not support any options. - -The built-in `local` driver on Linux accepts options similar to the linux -`mount` command: - - $ docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m,uid=1000 - -Another example: - - $ docker volume create --driver local --opt type=btrfs --opt device=/dev/sda2 diff --git a/fn/vendor/github.com/docker/docker/man/src/volume/inspect.md b/fn/vendor/github.com/docker/docker/man/src/volume/inspect.md deleted file mode 100644 index 0885caab6..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/volume/inspect.md +++ /dev/null @@ -1,4 +0,0 @@ -Returns information about one or more volumes. By default, this command renders -all results in a JSON array. You can specify an alternate format to execute a -given template is executed for each result. Go's https://golang.org/pkg/text/template/ -package describes all the details of the format. diff --git a/fn/vendor/github.com/docker/docker/man/src/volume/ls.md b/fn/vendor/github.com/docker/docker/man/src/volume/ls.md deleted file mode 100644 index 597884278..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/volume/ls.md +++ /dev/null @@ -1,11 +0,0 @@ -Lists all the volumes Docker manages. You can filter using the `-f` or -`--filter` flag. The filtering format is a `key=value` pair. To specify -more than one filter, pass multiple flags (for example, -`--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* `dangling` (boolean - `true` or `false`, `1` or `0`) -* `driver` (a volume driver's name) -* `label` (`label=` or `label==`) -* `name` (a volume's name) diff --git a/fn/vendor/github.com/docker/docker/man/src/wait.md b/fn/vendor/github.com/docker/docker/man/src/wait.md deleted file mode 100644 index 8700848ec..000000000 --- a/fn/vendor/github.com/docker/docker/man/src/wait.md +++ /dev/null @@ -1 +0,0 @@ -Alias for `docker container wait`. diff --git a/fn/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go b/fn/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go index 55898f12b..51b674122 100644 --- a/fn/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go +++ b/fn/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go @@ -94,7 +94,7 @@ func TestMigrateContainers(t *testing.T) { t.Fatal(err) } - is, err := image.NewImageStore(ifs, ls) + is, err := image.NewImageStore(ifs, runtime.GOOS, ls) if err != nil { t.Fatal(err) } @@ -172,12 +172,12 @@ func TestMigrateImages(t *testing.T) { t.Fatal(err) } - is, err := image.NewImageStore(ifs, ls) + is, err := image.NewImageStore(ifs, runtime.GOOS, ls) if err != nil { t.Fatal(err) } - ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution")) + ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution"), runtime.GOOS) if err != nil { t.Fatal(err) } @@ -433,6 +433,10 @@ func (l *mockLayer) DiffSize() (int64, error) { return 0, nil } +func (l *mockLayer) Platform() layer.Platform { + return "" +} + func (l *mockLayer) Metadata() (map[string]string, error) { return nil, nil } diff --git a/fn/vendor/github.com/docker/docker/oci/defaults_linux.go b/fn/vendor/github.com/docker/docker/oci/defaults.go similarity index 60% rename from fn/vendor/github.com/docker/docker/oci/defaults_linux.go rename to fn/vendor/github.com/docker/docker/oci/defaults.go index 8b3ce7281..4376faf5a 100644 --- a/fn/vendor/github.com/docker/docker/oci/defaults_linux.go +++ b/fn/vendor/github.com/docker/docker/oci/defaults.go @@ -7,19 +7,78 @@ import ( "github.com/opencontainers/runtime-spec/specs-go" ) -func sPtr(s string) *string { return &s } func iPtr(i int64) *int64 { return &i } func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } -// DefaultSpec returns default oci spec used by docker. +func defaultCapabilities() []string { + return []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } +} + +// DefaultSpec returns the default spec used by docker for the current Platform func DefaultSpec() specs.Spec { - s := specs.Spec{ + return DefaultOSSpec(runtime.GOOS) +} + +// DefaultOSSpec returns the spec for a given OS +func DefaultOSSpec(osName string) specs.Spec { + if osName == "windows" { + return DefaultWindowsSpec() + } else if osName == "solaris" { + return DefaultSolarisSpec() + } else { + return DefaultLinuxSpec() + } +} + +// DefaultWindowsSpec create a default spec for running Windows containers +func DefaultWindowsSpec() specs.Spec { + return specs.Spec{ Version: specs.Version, Platform: specs.Platform{ OS: runtime.GOOS, Arch: runtime.GOARCH, }, + Windows: &specs.Windows{}, + } +} + +// DefaultSolarisSpec create a default spec for running Solaris containers +func DefaultSolarisSpec() specs.Spec { + s := specs.Spec{ + Version: "0.6.0", + Platform: specs.Platform{ + OS: "SunOS", + Arch: runtime.GOARCH, + }, + } + s.Solaris = &specs.Solaris{} + return s +} + +// DefaultLinuxSpec create a default spec for running Linux containers +func DefaultLinuxSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: "linux", + Arch: runtime.GOARCH, + }, } s.Mounts = []specs.Mount{ { @@ -59,21 +118,11 @@ func DefaultSpec() specs.Spec { Options: []string{"nosuid", "noexec", "nodev"}, }, } - s.Process.Capabilities = []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", + s.Process.Capabilities = &specs.LinuxCapabilities{ + Bounding: defaultCapabilities(), + Permitted: defaultCapabilities(), + Inheritable: defaultCapabilities(), + Effective: defaultCapabilities(), } s.Linux = &specs.Linux{ @@ -83,7 +132,6 @@ func DefaultSpec() specs.Spec { "/proc/timer_list", "/proc/timer_stats", "/proc/sched_debug", - "/sys/firmware", }, ReadonlyPaths: []string{ "/proc/asound", @@ -93,7 +141,7 @@ func DefaultSpec() specs.Spec { "/proc/sys", "/proc/sysrq-trigger", }, - Namespaces: []specs.Namespace{ + Namespaces: []specs.LinuxNamespace{ {Type: "mount"}, {Type: "network"}, {Type: "uts"}, @@ -104,65 +152,70 @@ func DefaultSpec() specs.Spec { // null, zero, full, random, urandom, tty, console, and ptmx. // ptmx is a bind-mount or symlink of the container's ptmx. // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices - Devices: []specs.Device{}, - Resources: &specs.Resources{ - Devices: []specs.DeviceCgroup{ + Devices: []specs.LinuxDevice{}, + Resources: &specs.LinuxResources{ + Devices: []specs.LinuxDeviceCgroup{ { Allow: false, - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(1), Minor: iPtr(5), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(1), Minor: iPtr(3), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(1), Minor: iPtr(9), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(1), Minor: iPtr(8), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(5), Minor: iPtr(0), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(5), Minor: iPtr(1), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: false, - Type: sPtr("c"), + Type: "c", Major: iPtr(10), Minor: iPtr(229), - Access: sPtr("rwm"), + Access: "rwm", }, }, }, } + // For LCOW support, don't mask /sys/firmware + if runtime.GOOS != "windows" { + s.Linux.MaskedPaths = append(s.Linux.MaskedPaths, "/sys/firmware") + } + return s } diff --git a/fn/vendor/github.com/docker/docker/oci/defaults_solaris.go b/fn/vendor/github.com/docker/docker/oci/defaults_solaris.go deleted file mode 100644 index 85c8b68e1..000000000 --- a/fn/vendor/github.com/docker/docker/oci/defaults_solaris.go +++ /dev/null @@ -1,20 +0,0 @@ -package oci - -import ( - "runtime" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -// DefaultSpec returns default oci spec used by docker. -func DefaultSpec() specs.Spec { - s := specs.Spec{ - Version: "0.6.0", - Platform: specs.Platform{ - OS: "SunOS", - Arch: runtime.GOARCH, - }, - } - s.Solaris = &specs.Solaris{} - return s -} diff --git a/fn/vendor/github.com/docker/docker/oci/defaults_windows.go b/fn/vendor/github.com/docker/docker/oci/defaults_windows.go deleted file mode 100644 index ab51904ec..000000000 --- a/fn/vendor/github.com/docker/docker/oci/defaults_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -package oci - -import ( - "runtime" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -// DefaultSpec returns default spec used by docker. -func DefaultSpec() specs.Spec { - return specs.Spec{ - Version: specs.Version, - Platform: specs.Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, - }, - Windows: &specs.Windows{}, - } -} diff --git a/fn/vendor/github.com/docker/docker/oci/devices_linux.go b/fn/vendor/github.com/docker/docker/oci/devices_linux.go index 2840d2586..fa9c72698 100644 --- a/fn/vendor/github.com/docker/docker/oci/devices_linux.go +++ b/fn/vendor/github.com/docker/docker/oci/devices_linux.go @@ -11,9 +11,9 @@ import ( specs "github.com/opencontainers/runtime-spec/specs-go" ) -// Device transforms a libcontainer configs.Device to a specs.Device object. -func Device(d *configs.Device) specs.Device { - return specs.Device{ +// Device transforms a libcontainer configs.Device to a specs.LinuxDevice object. +func Device(d *configs.Device) specs.LinuxDevice { + return specs.LinuxDevice{ Type: string(d.Type), Path: d.Path, Major: d.Major, @@ -24,19 +24,19 @@ func Device(d *configs.Device) specs.Device { } } -func deviceCgroup(d *configs.Device) specs.DeviceCgroup { +func deviceCgroup(d *configs.Device) specs.LinuxDeviceCgroup { t := string(d.Type) - return specs.DeviceCgroup{ + return specs.LinuxDeviceCgroup{ Allow: true, - Type: &t, + Type: t, Major: &d.Major, Minor: &d.Minor, - Access: &d.Permissions, + Access: d.Permissions, } } // DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. -func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { resolvedPathOnHost := pathOnHost // check if it is a symbolic link diff --git a/fn/vendor/github.com/docker/docker/oci/devices_unsupported.go b/fn/vendor/github.com/docker/docker/oci/devices_unsupported.go index 6252cab53..b5d3fab59 100644 --- a/fn/vendor/github.com/docker/docker/oci/devices_unsupported.go +++ b/fn/vendor/github.com/docker/docker/oci/devices_unsupported.go @@ -11,10 +11,10 @@ import ( // Device transforms a libcontainer configs.Device to a specs.Device object. // Not implemented -func Device(d *configs.Device) specs.Device { return specs.Device{} } +func Device(d *configs.Device) specs.LinuxDevice { return specs.LinuxDevice{} } // DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. // Not implemented -func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { return nil, nil, errors.New("oci/devices: unsupported platform") } diff --git a/fn/vendor/github.com/docker/docker/oci/namespaces.go b/fn/vendor/github.com/docker/docker/oci/namespaces.go index ebea76bfb..cb222dcee 100644 --- a/fn/vendor/github.com/docker/docker/oci/namespaces.go +++ b/fn/vendor/github.com/docker/docker/oci/namespaces.go @@ -3,7 +3,7 @@ package oci import specs "github.com/opencontainers/runtime-spec/specs-go" // RemoveNamespace removes the `nsType` namespace from OCI spec `s` -func RemoveNamespace(s *specs.Spec, nsType specs.NamespaceType) { +func RemoveNamespace(s *specs.Spec, nsType specs.LinuxNamespaceType) { for i, n := range s.Linux.Namespaces { if n.Type == nsType { s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) diff --git a/fn/vendor/github.com/docker/docker/opts/ip.go b/fn/vendor/github.com/docker/docker/opts/ip.go index fb03b5011..109506397 100644 --- a/fn/vendor/github.com/docker/docker/opts/ip.go +++ b/fn/vendor/github.com/docker/docker/opts/ip.go @@ -22,7 +22,7 @@ func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { } // Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parseable as an IP address it returns an error. +// string is not parsable as an IP address it returns an error. func (o *IPOpt) Set(val string) error { ip := net.ParseIP(val) if ip == nil { diff --git a/fn/vendor/github.com/docker/docker/opts/mount.go b/fn/vendor/github.com/docker/docker/opts/mount.go deleted file mode 100644 index d4ccf838d..000000000 --- a/fn/vendor/github.com/docker/docker/opts/mount.go +++ /dev/null @@ -1,173 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "strconv" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// MountOpt is a Value type for parsing mounts -type MountOpt struct { - values []mounttypes.Mount -} - -// Set a new mount value -func (m *MountOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - mount := mounttypes.Mount{} - - volumeOptions := func() *mounttypes.VolumeOptions { - if mount.VolumeOptions == nil { - mount.VolumeOptions = &mounttypes.VolumeOptions{ - Labels: make(map[string]string), - } - } - if mount.VolumeOptions.DriverConfig == nil { - mount.VolumeOptions.DriverConfig = &mounttypes.Driver{} - } - return mount.VolumeOptions - } - - bindOptions := func() *mounttypes.BindOptions { - if mount.BindOptions == nil { - mount.BindOptions = new(mounttypes.BindOptions) - } - return mount.BindOptions - } - - tmpfsOptions := func() *mounttypes.TmpfsOptions { - if mount.TmpfsOptions == nil { - mount.TmpfsOptions = new(mounttypes.TmpfsOptions) - } - return mount.TmpfsOptions - } - - setValueOnMap := func(target map[string]string, value string) { - parts := strings.SplitN(value, "=", 2) - if len(parts) == 1 { - target[value] = "" - } else { - target[parts[0]] = parts[1] - } - } - - mount.Type = mounttypes.TypeVolume // default to volume mounts - // Set writable as the default - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) == 1 { - switch key { - case "readonly", "ro": - mount.ReadOnly = true - continue - case "volume-nocopy": - volumeOptions().NoCopy = true - continue - } - } - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - mount.Type = mounttypes.Type(strings.ToLower(value)) - case "source", "src": - mount.Source = value - case "target", "dst", "destination": - mount.Target = value - case "readonly", "ro": - mount.ReadOnly, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - case "consistency": - mount.Consistency = mounttypes.Consistency(strings.ToLower(value)) - case "bind-propagation": - bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value)) - case "volume-nocopy": - volumeOptions().NoCopy, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for volume-nocopy: %s", value) - } - case "volume-label": - setValueOnMap(volumeOptions().Labels, value) - case "volume-driver": - volumeOptions().DriverConfig.Name = value - case "volume-opt": - if volumeOptions().DriverConfig.Options == nil { - volumeOptions().DriverConfig.Options = make(map[string]string) - } - setValueOnMap(volumeOptions().DriverConfig.Options, value) - case "tmpfs-size": - sizeBytes, err := units.RAMInBytes(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().SizeBytes = sizeBytes - case "tmpfs-mode": - ui64, err := strconv.ParseUint(value, 8, 32) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().Mode = os.FileMode(ui64) - default: - return fmt.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - if mount.Type == "" { - return fmt.Errorf("type is required") - } - - if mount.Target == "" { - return fmt.Errorf("target is required") - } - - if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { - return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type) - } - if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind { - return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type) - } - if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs { - return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type) - } - - m.values = append(m.values, mount) - return nil -} - -// Type returns the type of this option -func (m *MountOpt) Type() string { - return "mount" -} - -// String returns a string repr of this option -func (m *MountOpt) String() string { - mounts := []string{} - for _, mount := range m.values { - repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) - mounts = append(mounts, repr) - } - return strings.Join(mounts, ", ") -} - -// Value returns the mounts -func (m *MountOpt) Value() []mounttypes.Mount { - return m.values -} diff --git a/fn/vendor/github.com/docker/docker/opts/mount_test.go b/fn/vendor/github.com/docker/docker/opts/mount_test.go deleted file mode 100644 index 59606c38e..000000000 --- a/fn/vendor/github.com/docker/docker/opts/mount_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package opts - -import ( - "os" - "testing" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestMountOptString(t *testing.T) { - mount := MountOpt{ - values: []mounttypes.Mount{ - { - Type: mounttypes.TypeBind, - Source: "/home/path", - Target: "/target", - }, - { - Type: mounttypes.TypeVolume, - Source: "foo", - Target: "/target/foo", - }, - }, - } - expected := "bind /home/path /target, volume foo /target/foo" - assert.Equal(t, mount.String(), expected) -} - -func TestMountOptSetBindNoErrorBind(t *testing.T) { - for _, testcase := range []string{ - // tests several aliases that should have same result. - "type=bind,target=/target,source=/source", - "type=bind,src=/source,dst=/target", - "type=bind,source=/source,dst=/target", - "type=bind,src=/source,target=/target", - } { - var mount MountOpt - - assert.NilError(t, mount.Set(testcase)) - - mounts := mount.Value() - assert.Equal(t, len(mounts), 1) - assert.Equal(t, mounts[0], mounttypes.Mount{ - Type: mounttypes.TypeBind, - Source: "/source", - Target: "/target", - }) - } -} - -func TestMountOptSetVolumeNoError(t *testing.T) { - for _, testcase := range []string{ - // tests several aliases that should have same result. - "type=volume,target=/target,source=/source", - "type=volume,src=/source,dst=/target", - "type=volume,source=/source,dst=/target", - "type=volume,src=/source,target=/target", - } { - var mount MountOpt - - assert.NilError(t, mount.Set(testcase)) - - mounts := mount.Value() - assert.Equal(t, len(mounts), 1) - assert.Equal(t, mounts[0], mounttypes.Mount{ - Type: mounttypes.TypeVolume, - Source: "/source", - Target: "/target", - }) - } -} - -// TestMountOptDefaultType ensures that a mount without the type defaults to a -// volume mount. -func TestMountOptDefaultType(t *testing.T) { - var mount MountOpt - assert.NilError(t, mount.Set("target=/target,source=/foo")) - assert.Equal(t, mount.values[0].Type, mounttypes.TypeVolume) -} - -func TestMountOptSetErrorNoTarget(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,source=/foo"), "target is required") -} - -func TestMountOptSetErrorInvalidKey(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,bogus=foo"), "unexpected key 'bogus'") -} - -func TestMountOptSetErrorInvalidField(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,bogus"), "invalid field 'bogus'") -} - -func TestMountOptSetErrorInvalidReadOnly(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,readonly=no"), "invalid value for readonly: no") - assert.Error(t, mount.Set("type=volume,readonly=invalid"), "invalid value for readonly: invalid") -} - -func TestMountOptDefaultEnableReadOnly(t *testing.T) { - var m MountOpt - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo")) - assert.Equal(t, m.values[0].ReadOnly, false) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly")) - assert.Equal(t, m.values[0].ReadOnly, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=1")) - assert.Equal(t, m.values[0].ReadOnly, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=true")) - assert.Equal(t, m.values[0].ReadOnly, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=0")) - assert.Equal(t, m.values[0].ReadOnly, false) -} - -func TestMountOptVolumeNoCopy(t *testing.T) { - var m MountOpt - assert.NilError(t, m.Set("type=volume,target=/foo,volume-nocopy")) - assert.Equal(t, m.values[0].Source, "") - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo")) - assert.Equal(t, m.values[0].VolumeOptions == nil, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=true")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=1")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) -} - -func TestMountOptTypeConflict(t *testing.T) { - var m MountOpt - assert.Error(t, m.Set("type=bind,target=/foo,source=/foo,volume-nocopy=true"), "cannot mix") - assert.Error(t, m.Set("type=volume,target=/foo,source=/foo,bind-propagation=rprivate"), "cannot mix") -} - -func TestMountOptSetTmpfsNoError(t *testing.T) { - for _, testcase := range []string{ - // tests several aliases that should have same result. - "type=tmpfs,target=/target,tmpfs-size=1m,tmpfs-mode=0700", - "type=tmpfs,target=/target,tmpfs-size=1MB,tmpfs-mode=700", - } { - var mount MountOpt - - assert.NilError(t, mount.Set(testcase)) - - mounts := mount.Value() - assert.Equal(t, len(mounts), 1) - assert.DeepEqual(t, mounts[0], mounttypes.Mount{ - Type: mounttypes.TypeTmpfs, - Target: "/target", - TmpfsOptions: &mounttypes.TmpfsOptions{ - SizeBytes: 1024 * 1024, // not 1000 * 1000 - Mode: os.FileMode(0700), - }, - }) - } -} - -func TestMountOptSetTmpfsError(t *testing.T) { - var m MountOpt - assert.Error(t, m.Set("type=tmpfs,target=/foo,tmpfs-size=foo"), "invalid value for tmpfs-size") - assert.Error(t, m.Set("type=tmpfs,target=/foo,tmpfs-mode=foo"), "invalid value for tmpfs-mode") - assert.Error(t, m.Set("type=tmpfs"), "target is required") -} diff --git a/fn/vendor/github.com/docker/docker/opts/opts.go b/fn/vendor/github.com/docker/docker/opts/opts.go index f76f30805..8d82f7679 100644 --- a/fn/vendor/github.com/docker/docker/opts/opts.go +++ b/fn/vendor/github.com/docker/docker/opts/opts.go @@ -2,13 +2,11 @@ package opts import ( "fmt" - "math/big" "net" "path" "regexp" "strings" - "github.com/docker/docker/api/types/filters" units "github.com/docker/go-units" ) @@ -236,15 +234,6 @@ func ValidateIPAddress(val string) (string, error) { return "", fmt.Errorf("%s is not an ip address", val) } -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} - // ValidateDNSSearch validates domain for resolvconf search configuration. // A zero length domain is represented by a dot (.). func ValidateDNSSearch(val string) (string, error) { @@ -274,114 +263,6 @@ func ValidateLabel(val string) (string, error) { return val, nil } -// ValidateSysctl validates a sysctl and returns it. -func ValidateSysctl(val string) (string, error) { - validSysctlMap := map[string]bool{ - "kernel.msgmax": true, - "kernel.msgmnb": true, - "kernel.msgmni": true, - "kernel.sem": true, - "kernel.shmall": true, - "kernel.shmmax": true, - "kernel.shmmni": true, - "kernel.shm_rmid_forced": true, - } - validSysctlPrefixes := []string{ - "net.", - "fs.mqueue.", - } - arr := strings.Split(val, "=") - if len(arr) < 2 { - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) - } - if validSysctlMap[arr[0]] { - return val, nil - } - - for _, vp := range validSysctlPrefixes { - if strings.HasPrefix(arr[0], vp) { - return val, nil - } - } - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) -} - -// FilterOpt is a flag type for validating filters -type FilterOpt struct { - filter filters.Args -} - -// NewFilterOpt returns a new FilterOpt -func NewFilterOpt() FilterOpt { - return FilterOpt{filter: filters.NewArgs()} -} - -func (o *FilterOpt) String() string { - repr, err := filters.ToParam(o.filter) - if err != nil { - return "invalid filters" - } - return repr -} - -// Set sets the value of the opt by parsing the command line value -func (o *FilterOpt) Set(value string) error { - var err error - o.filter, err = filters.ParseFlag(value, o.filter) - return err -} - -// Type returns the option type -func (o *FilterOpt) Type() string { - return "filter" -} - -// Value returns the value of this option -func (o *FilterOpt) Value() filters.Args { - return o.filter -} - -// NanoCPUs is a type for fixed point fractional number. -type NanoCPUs int64 - -// String returns the string format of the number -func (c *NanoCPUs) String() string { - if *c == 0 { - return "" - } - return big.NewRat(c.Value(), 1e9).FloatString(3) -} - -// Set sets the value of the NanoCPU by passing a string -func (c *NanoCPUs) Set(value string) error { - cpus, err := ParseCPUs(value) - *c = NanoCPUs(cpus) - return err -} - -// Type returns the type -func (c *NanoCPUs) Type() string { - return "decimal" -} - -// Value returns the value in int64 -func (c *NanoCPUs) Value() int64 { - return int64(*c) -} - -// ParseCPUs takes a string ratio and returns an integer value of nano cpus -func ParseCPUs(value string) (int64, error) { - cpu, ok := new(big.Rat).SetString(value) - if !ok { - return 0, fmt.Errorf("failed to parse %v as a rational number", value) - } - nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) - if !nano.IsInt() { - return 0, fmt.Errorf("value is too precise") - } - return nano.Num().Int64(), nil -} - // ParseLink parses and validates the specified string as a link format (name:alias) func ParseLink(val string) (string, string, error) { if val == "" { @@ -404,12 +285,6 @@ func ParseLink(val string) (string, string, error) { return arr[0], arr[1], nil } -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - _, _, err := ParseLink(val) - return val, err -} - // MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) type MemBytes int64 @@ -450,39 +325,3 @@ func (m *MemBytes) UnmarshalJSON(s []byte) error { *m = MemBytes(val) return err } - -// MemSwapBytes is a type for human readable memory bytes (like 128M, 2g, etc). -// It differs from MemBytes in that -1 is valid and the default. -type MemSwapBytes int64 - -// Set sets the value of the MemSwapBytes by passing a string -func (m *MemSwapBytes) Set(value string) error { - if value == "-1" { - *m = MemSwapBytes(-1) - return nil - } - val, err := units.RAMInBytes(value) - *m = MemSwapBytes(val) - return err -} - -// Type returns the type -func (m *MemSwapBytes) Type() string { - return "bytes" -} - -// Value returns the value in int64 -func (m *MemSwapBytes) Value() int64 { - return int64(*m) -} - -func (m *MemSwapBytes) String() string { - b := MemBytes(*m) - return b.String() -} - -// UnmarshalJSON is the customized unmarshaler for MemSwapBytes -func (m *MemSwapBytes) UnmarshalJSON(s []byte) error { - b := MemBytes(*m) - return b.UnmarshalJSON(s) -} diff --git a/fn/vendor/github.com/docker/docker/opts/opts_test.go b/fn/vendor/github.com/docker/docker/opts/opts_test.go index c1e7735b5..269f88639 100644 --- a/fn/vendor/github.com/docker/docker/opts/opts_test.go +++ b/fn/vendor/github.com/docker/docker/opts/opts_test.go @@ -157,7 +157,7 @@ func TestValidateDNSSearch(t *testing.T) { `foo.bar-.baz`, `foo.-bar`, `foo.-bar.baz`, - `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, + `foo.bar.baz.this.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbe`, } for _, domain := range valid { @@ -231,49 +231,6 @@ func TestNamedMapOpts(t *testing.T) { } } -func TestValidateMACAddress(t *testing.T) { - if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) - } - - if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") - } - - if _, err := ValidateMACAddress(`random invalid string`); err == nil { - t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") - } -} - -func TestValidateLink(t *testing.T) { - valid := []string{ - "name", - "dcdfbe62ecd0:alias", - "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", - "angry_torvalds:linus", - } - invalid := map[string]string{ - "": "empty string specified for links", - "too:much:of:it": "bad format for links: too:much:of:it", - } - - for _, link := range valid { - if _, err := ValidateLink(link); err != nil { - t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) - } - } - - for link, expectedError := range invalid { - if _, err := ValidateLink(link); err == nil { - t.Fatalf("ValidateLink(`%q`) should have failed validation", link) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) - } - } - } -} - func TestParseLink(t *testing.T) { name, alias, err := ParseLink("name:alias") if err != nil { diff --git a/fn/vendor/github.com/docker/docker/opts/port.go b/fn/vendor/github.com/docker/docker/opts/port.go deleted file mode 100644 index 152683c98..000000000 --- a/fn/vendor/github.com/docker/docker/opts/port.go +++ /dev/null @@ -1,162 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" -) - -const ( - portOptTargetPort = "target" - portOptPublishedPort = "published" - portOptProtocol = "protocol" - portOptMode = "mode" -) - -// PortOpt represents a port config in swarm mode. -type PortOpt struct { - ports []swarm.PortConfig -} - -// Set a new port value -func (p *PortOpt) Set(value string) error { - longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) - if err != nil { - return err - } - if longSyntax { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - pConfig := swarm.PortConfig{} - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid field %s", field) - } - - key := strings.ToLower(parts[0]) - value := strings.ToLower(parts[1]) - - switch key { - case portOptProtocol: - if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) { - return fmt.Errorf("invalid protocol value %s", value) - } - - pConfig.Protocol = swarm.PortConfigProtocol(value) - case portOptMode: - if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) { - return fmt.Errorf("invalid publish mode value %s", value) - } - - pConfig.PublishMode = swarm.PortConfigPublishMode(value) - case portOptTargetPort: - tPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.TargetPort = uint32(tPort) - case portOptPublishedPort: - pPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.PublishedPort = uint32(pPort) - default: - return fmt.Errorf("invalid field key %s", key) - } - } - - if pConfig.TargetPort == 0 { - return fmt.Errorf("missing mandatory field %q", portOptTargetPort) - } - - if pConfig.PublishMode == "" { - pConfig.PublishMode = swarm.PortConfigPublishModeIngress - } - - if pConfig.Protocol == "" { - pConfig.Protocol = swarm.PortConfigProtocolTCP - } - - p.ports = append(p.ports, pConfig) - } else { - // short syntax - portConfigs := []swarm.PortConfig{} - ports, portBindingMap, err := nat.ParsePortSpecs([]string{value}) - if err != nil { - return err - } - for _, portBindings := range portBindingMap { - for _, portBinding := range portBindings { - if portBinding.HostIP != "" { - return fmt.Errorf("HostIP is not supported.") - } - } - } - - for port := range ports { - portConfig, err := ConvertPortToPortConfig(port, portBindingMap) - if err != nil { - return err - } - portConfigs = append(portConfigs, portConfig...) - } - p.ports = append(p.ports, portConfigs...) - } - return nil -} - -// Type returns the type of this option -func (p *PortOpt) Type() string { - return "port" -} - -// String returns a string repr of this option -func (p *PortOpt) String() string { - ports := []string{} - for _, port := range p.ports { - repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) - ports = append(ports, repr) - } - return strings.Join(ports, ", ") -} - -// Value returns the ports -func (p *PortOpt) Value() []swarm.PortConfig { - return p.ports -} - -// ConvertPortToPortConfig converts ports to the swarm type -func ConvertPortToPortConfig( - port nat.Port, - portBindings map[nat.Port][]nat.PortBinding, -) ([]swarm.PortConfig, error) { - ports := []swarm.PortConfig{} - - for _, binding := range portBindings[port] { - hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) - if err != nil && binding.HostPort != "" { - return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port()) - } - ports = append(ports, swarm.PortConfig{ - //TODO Name: ? - Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), - TargetPort: uint32(port.Int()), - PublishedPort: uint32(hostPort), - PublishMode: swarm.PortConfigPublishModeIngress, - }) - } - return ports, nil -} diff --git a/fn/vendor/github.com/docker/docker/opts/port_test.go b/fn/vendor/github.com/docker/docker/opts/port_test.go deleted file mode 100644 index a483d269a..000000000 --- a/fn/vendor/github.com/docker/docker/opts/port_test.go +++ /dev/null @@ -1,295 +0,0 @@ -package opts - -import ( - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestPortOptValidSimpleSyntax(t *testing.T) { - testCases := []struct { - value string - expected []swarm.PortConfig - }{ - { - value: "80", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "80:8080", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 8080, - PublishedPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "8080:80/tcp", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 80, - PublishedPort: 8080, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "80:8080/udp", - expected: []swarm.PortConfig{ - { - Protocol: "udp", - TargetPort: 8080, - PublishedPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "80-81:8080-8081/tcp", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 8080, - PublishedPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - { - Protocol: "tcp", - TargetPort: 8081, - PublishedPort: 81, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "80-82:8080-8082/udp", - expected: []swarm.PortConfig{ - { - Protocol: "udp", - TargetPort: 8080, - PublishedPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - { - Protocol: "udp", - TargetPort: 8081, - PublishedPort: 81, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - { - Protocol: "udp", - TargetPort: 8082, - PublishedPort: 82, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - } - for _, tc := range testCases { - var port PortOpt - assert.NilError(t, port.Set(tc.value)) - assert.Equal(t, len(port.Value()), len(tc.expected)) - for _, expectedPortConfig := range tc.expected { - assertContains(t, port.Value(), expectedPortConfig) - } - } -} - -func TestPortOptValidComplexSyntax(t *testing.T) { - testCases := []struct { - value string - expected []swarm.PortConfig - }{ - { - value: "target=80", - expected: []swarm.PortConfig{ - { - TargetPort: 80, - Protocol: "tcp", - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "target=80,protocol=tcp", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "target=80,published=8080,protocol=tcp", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 80, - PublishedPort: 8080, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "published=80,target=8080,protocol=tcp", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 8080, - PublishedPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "target=80,published=8080,protocol=tcp,mode=host", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 80, - PublishedPort: 8080, - PublishMode: "host", - }, - }, - }, - { - value: "target=80,published=8080,mode=host", - expected: []swarm.PortConfig{ - { - TargetPort: 80, - PublishedPort: 8080, - PublishMode: "host", - Protocol: "tcp", - }, - }, - }, - { - value: "target=80,published=8080,mode=ingress", - expected: []swarm.PortConfig{ - { - TargetPort: 80, - PublishedPort: 8080, - PublishMode: "ingress", - Protocol: "tcp", - }, - }, - }, - } - for _, tc := range testCases { - var port PortOpt - assert.NilError(t, port.Set(tc.value)) - assert.Equal(t, len(port.Value()), len(tc.expected)) - for _, expectedPortConfig := range tc.expected { - assertContains(t, port.Value(), expectedPortConfig) - } - } -} - -func TestPortOptInvalidComplexSyntax(t *testing.T) { - testCases := []struct { - value string - expectedError string - }{ - { - value: "invalid,target=80", - expectedError: "invalid field", - }, - { - value: "invalid=field", - expectedError: "invalid field", - }, - { - value: "protocol=invalid", - expectedError: "invalid protocol value", - }, - { - value: "target=invalid", - expectedError: "invalid syntax", - }, - { - value: "published=invalid", - expectedError: "invalid syntax", - }, - { - value: "mode=invalid", - expectedError: "invalid publish mode value", - }, - { - value: "published=8080,protocol=tcp,mode=ingress", - expectedError: "missing mandatory field", - }, - { - value: `target=80,protocol="tcp,mode=ingress"`, - expectedError: "non-quoted-field", - }, - { - value: `target=80,"protocol=tcp,mode=ingress"`, - expectedError: "invalid protocol value", - }, - } - for _, tc := range testCases { - var port PortOpt - assert.Error(t, port.Set(tc.value), tc.expectedError) - } -} - -func TestPortOptInvalidSimpleSyntax(t *testing.T) { - testCases := []struct { - value string - expectedError string - }{ - { - value: "9999999", - expectedError: "Invalid containerPort: 9999999", - }, - { - value: "80/xyz", - expectedError: "Invalid proto: xyz", - }, - { - value: "tcp", - expectedError: "Invalid containerPort: tcp", - }, - { - value: "udp", - expectedError: "Invalid containerPort: udp", - }, - { - value: "", - expectedError: "No port specified", - }, - { - value: "1.1.1.1:80:80", - expectedError: "HostIP is not supported", - }, - } - for _, tc := range testCases { - var port PortOpt - assert.Error(t, port.Set(tc.value), tc.expectedError) - } -} - -func assertContains(t *testing.T, portConfigs []swarm.PortConfig, expected swarm.PortConfig) { - var contains = false - for _, portConfig := range portConfigs { - if portConfig == expected { - contains = true - break - } - } - if !contains { - t.Errorf("expected %v to contain %v, did not", portConfigs, expected) - } -} diff --git a/fn/vendor/github.com/docker/docker/opts/quotedstring_test.go b/fn/vendor/github.com/docker/docker/opts/quotedstring_test.go index 0ebf04bbe..54dcbc19b 100644 --- a/fn/vendor/github.com/docker/docker/opts/quotedstring_test.go +++ b/fn/vendor/github.com/docker/docker/opts/quotedstring_test.go @@ -1,28 +1,29 @@ package opts import ( - "github.com/docker/docker/pkg/testutil/assert" "testing" + + "github.com/stretchr/testify/assert" ) func TestQuotedStringSetWithQuotes(t *testing.T) { value := "" qs := NewQuotedString(&value) - assert.NilError(t, qs.Set("\"something\"")) - assert.Equal(t, qs.String(), "something") - assert.Equal(t, value, "something") + assert.NoError(t, qs.Set(`"something"`)) + assert.Equal(t, "something", qs.String()) + assert.Equal(t, "something", value) } func TestQuotedStringSetWithMismatchedQuotes(t *testing.T) { value := "" qs := NewQuotedString(&value) - assert.NilError(t, qs.Set("\"something'")) - assert.Equal(t, qs.String(), "\"something'") + assert.NoError(t, qs.Set(`"something'`)) + assert.Equal(t, `"something'`, qs.String()) } func TestQuotedStringSetWithNoQuotes(t *testing.T) { value := "" qs := NewQuotedString(&value) - assert.NilError(t, qs.Set("something")) - assert.Equal(t, qs.String(), "something") + assert.NoError(t, qs.Set("something")) + assert.Equal(t, "something", qs.String()) } diff --git a/fn/vendor/github.com/docker/docker/opts/secret.go b/fn/vendor/github.com/docker/docker/opts/secret.go deleted file mode 100644 index 56ed29eb5..000000000 --- a/fn/vendor/github.com/docker/docker/opts/secret.go +++ /dev/null @@ -1,103 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - swarmtypes "github.com/docker/docker/api/types/swarm" -) - -// SecretOpt is a Value type for parsing secrets -type SecretOpt struct { - values []*swarmtypes.SecretReference -} - -// Set a new secret value -func (o *SecretOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - options := &swarmtypes.SecretReference{ - File: &swarmtypes.SecretReferenceFileTarget{ - UID: "0", - GID: "0", - Mode: 0444, - }, - } - - // support a simple syntax of --secret foo - if len(fields) == 1 { - options.File.Name = fields[0] - options.SecretName = fields[0] - o.values = append(o.values, options) - return nil - } - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "source", "src": - options.SecretName = value - case "target": - tDir, _ := filepath.Split(value) - if tDir != "" { - return fmt.Errorf("target must not be a path") - } - options.File.Name = value - case "uid": - options.File.UID = value - case "gid": - options.File.GID = value - case "mode": - m, err := strconv.ParseUint(value, 0, 32) - if err != nil { - return fmt.Errorf("invalid mode specified: %v", err) - } - - options.File.Mode = os.FileMode(m) - default: - return fmt.Errorf("invalid field in secret request: %s", key) - } - } - - if options.SecretName == "" { - return fmt.Errorf("source is required") - } - - o.values = append(o.values, options) - return nil -} - -// Type returns the type of this option -func (o *SecretOpt) Type() string { - return "secret" -} - -// String returns a string repr of this option -func (o *SecretOpt) String() string { - secrets := []string{} - for _, secret := range o.values { - repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name) - secrets = append(secrets, repr) - } - return strings.Join(secrets, ", ") -} - -// Value returns the secret requests -func (o *SecretOpt) Value() []*swarmtypes.SecretReference { - return o.values -} diff --git a/fn/vendor/github.com/docker/docker/opts/secret_test.go b/fn/vendor/github.com/docker/docker/opts/secret_test.go deleted file mode 100644 index 5654c79f1..000000000 --- a/fn/vendor/github.com/docker/docker/opts/secret_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package opts - -import ( - "os" - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestSecretOptionsSimple(t *testing.T) { - var opt SecretOpt - - testCase := "app-secret" - assert.NilError(t, opt.Set(testCase)) - - reqs := opt.Value() - assert.Equal(t, len(reqs), 1) - req := reqs[0] - assert.Equal(t, req.SecretName, "app-secret") - assert.Equal(t, req.File.Name, "app-secret") - assert.Equal(t, req.File.UID, "0") - assert.Equal(t, req.File.GID, "0") -} - -func TestSecretOptionsSourceTarget(t *testing.T) { - var opt SecretOpt - - testCase := "source=foo,target=testing" - assert.NilError(t, opt.Set(testCase)) - - reqs := opt.Value() - assert.Equal(t, len(reqs), 1) - req := reqs[0] - assert.Equal(t, req.SecretName, "foo") - assert.Equal(t, req.File.Name, "testing") -} - -func TestSecretOptionsShorthand(t *testing.T) { - var opt SecretOpt - - testCase := "src=foo,target=testing" - assert.NilError(t, opt.Set(testCase)) - - reqs := opt.Value() - assert.Equal(t, len(reqs), 1) - req := reqs[0] - assert.Equal(t, req.SecretName, "foo") -} - -func TestSecretOptionsCustomUidGid(t *testing.T) { - var opt SecretOpt - - testCase := "source=foo,target=testing,uid=1000,gid=1001" - assert.NilError(t, opt.Set(testCase)) - - reqs := opt.Value() - assert.Equal(t, len(reqs), 1) - req := reqs[0] - assert.Equal(t, req.SecretName, "foo") - assert.Equal(t, req.File.Name, "testing") - assert.Equal(t, req.File.UID, "1000") - assert.Equal(t, req.File.GID, "1001") -} - -func TestSecretOptionsCustomMode(t *testing.T) { - var opt SecretOpt - - testCase := "source=foo,target=testing,uid=1000,gid=1001,mode=0444" - assert.NilError(t, opt.Set(testCase)) - - reqs := opt.Value() - assert.Equal(t, len(reqs), 1) - req := reqs[0] - assert.Equal(t, req.SecretName, "foo") - assert.Equal(t, req.File.Name, "testing") - assert.Equal(t, req.File.UID, "1000") - assert.Equal(t, req.File.GID, "1001") - assert.Equal(t, req.File.Mode, os.FileMode(0444)) -} diff --git a/fn/vendor/github.com/docker/docker/opts/throttledevice.go b/fn/vendor/github.com/docker/docker/opts/throttledevice.go deleted file mode 100644 index 65dd3ebf6..000000000 --- a/fn/vendor/github.com/docker/docker/opts/throttledevice.go +++ /dev/null @@ -1,111 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/go-units" -) - -// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) - -// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := units.RAMInBytes(split[1]) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := strconv.ParseUint(split[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ThrottledeviceOpt defines a map of ThrottleDevices -type ThrottledeviceOpt struct { - values []*blkiodev.ThrottleDevice - validator ValidatorThrottleFctType -} - -// NewThrottledeviceOpt creates a new ThrottledeviceOpt -func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { - values := []*blkiodev.ThrottleDevice{} - return ThrottledeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt -func (opt *ThrottledeviceOpt) Set(val string) error { - var value *blkiodev.ThrottleDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns ThrottledeviceOpt values as a string. -func (opt *ThrottledeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to ThrottleDevices. -func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { - var throttledevice []*blkiodev.ThrottleDevice - throttledevice = append(throttledevice, opt.values...) - - return throttledevice -} - -// Type returns the option type -func (opt *ThrottledeviceOpt) Type() string { - return "list" -} diff --git a/fn/vendor/github.com/docker/docker/opts/ulimit.go b/fn/vendor/github.com/docker/docker/opts/ulimit.go index 5adfe3085..a2a65fcd2 100644 --- a/fn/vendor/github.com/docker/docker/opts/ulimit.go +++ b/fn/vendor/github.com/docker/docker/opts/ulimit.go @@ -55,3 +55,27 @@ func (o *UlimitOpt) GetList() []*units.Ulimit { func (o *UlimitOpt) Type() string { return "ulimit" } + +// NamedUlimitOpt defines a named map of Ulimits +type NamedUlimitOpt struct { + name string + UlimitOpt +} + +var _ NamedOption = &NamedUlimitOpt{} + +// NewNamedUlimitOpt creates a new NamedUlimitOpt +func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &NamedUlimitOpt{ + name: name, + UlimitOpt: *NewUlimitOpt(ref), + } +} + +// Name returns the option name +func (o *NamedUlimitOpt) Name() string { + return o.name +} diff --git a/fn/vendor/github.com/docker/docker/opts/weightdevice.go b/fn/vendor/github.com/docker/docker/opts/weightdevice.go deleted file mode 100644 index 7e3d064f2..000000000 --- a/fn/vendor/github.com/docker/docker/opts/weightdevice.go +++ /dev/null @@ -1,89 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" -) - -// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) - -// ValidateWeightDevice validates that the specified string has a valid device-weight format. -func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - weight, err := strconv.ParseUint(split[1], 10, 0) - if err != nil { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - if weight > 0 && (weight < 10 || weight > 1000) { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - - return &blkiodev.WeightDevice{ - Path: split[0], - Weight: uint16(weight), - }, nil -} - -// WeightdeviceOpt defines a map of WeightDevices -type WeightdeviceOpt struct { - values []*blkiodev.WeightDevice - validator ValidatorWeightFctType -} - -// NewWeightdeviceOpt creates a new WeightdeviceOpt -func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { - values := []*blkiodev.WeightDevice{} - return WeightdeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt -func (opt *WeightdeviceOpt) Set(val string) error { - var value *blkiodev.WeightDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns WeightdeviceOpt values as a string. -func (opt *WeightdeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to WeightDevices. -func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { - var weightdevice []*blkiodev.WeightDevice - for _, v := range opt.values { - weightdevice = append(weightdevice, v) - } - - return weightdevice -} - -// Type returns the option type -func (opt *WeightdeviceOpt) Type() string { - return "list" -} diff --git a/fn/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go b/fn/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go index e794c4c72..5de4a4d79 100644 --- a/fn/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go +++ b/fn/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go @@ -22,10 +22,11 @@ func GetVersion() (int, error) { return parseVersion(output) } -// LoadProfile runs `apparmor_parser -r` on a specified apparmor profile to -// replace the profile. +// LoadProfile runs `apparmor_parser -Kr` on a specified apparmor profile to +// replace the profile. The `-K` is necessary to make sure that apparmor_parser +// doesn't try to write to a read-only filesystem. func LoadProfile(profilePath string) error { - _, err := cmd("", "-r", profilePath) + _, err := cmd("", "-Kr", profilePath) return err } diff --git a/fn/vendor/github.com/docker/docker/pkg/archive/archive.go b/fn/vendor/github.com/docker/docker/pkg/archive/archive.go index 30b3c5b36..6cbc2e2bd 100644 --- a/fn/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/fn/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -6,7 +6,6 @@ import ( "bytes" "compress/bzip2" "compress/gzip" - "errors" "fmt" "io" "io/ioutil" @@ -31,10 +30,6 @@ type ( Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int - // TarChownOptions wraps the chown options UID and GID. - TarChownOptions struct { - UID, GID int - } // TarOptions wraps the tar options. TarOptions struct { @@ -44,7 +39,7 @@ type ( NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap - ChownOpts *TarChownOptions + ChownOpts *idtools.IDPair IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack @@ -58,33 +53,26 @@ type ( RebaseNames map[string]string InUserNS bool } - - // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. Also, to facilitate the passing of - // specific id mappings for untar, an archiver can be created with maps - // which will then be passed to Untar operations - Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - } - - // breakoutError is used to differentiate errors related to breaking out - // When testing archive breakout in the unit tests, this error is expected - // in order for the test to pass. - breakoutError error ) -var ( - // ErrNotImplemented is the error message of function not implemented. - ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} -) +// Archiver allows the reuse of most utility functions of this package +// with a pluggable Untar function. Also, to facilitate the passing of +// specific id mappings for untar, an archiver can be created with maps +// which will then be passed to Untar operations +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMappings *idtools.IDMappings +} -const ( - // HeaderSize is the size in bytes of a tar header - HeaderSize = 512 -) +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error const ( // Uncompressed represents the uncompressed. @@ -105,17 +93,15 @@ const ( OverlayWhiteoutFormat ) -// IsArchive checks for the magic bytes of a tar or any supported compression -// algorithm. -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. @@ -204,7 +190,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { } } -// CompressStream compresseses the dest with specified compression algorithm. +// CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) @@ -329,20 +315,14 @@ func (compression *Compression) Extension() string { // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. -func FileInfoHeader(path, name string, fi os.FileInfo) (*tar.Header, error) { - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return nil, err - } - } +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) name, err = canonicalTarName(name, fi.IsDir()) if err != nil { return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) @@ -351,12 +331,43 @@ func FileInfoHeader(path, name string, fi os.FileInfo) (*tar.Header, error) { if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability) } - return hdr, nil + return nil } type tarWhiteoutConverter interface { @@ -369,9 +380,8 @@ type tarAppender struct { Buffer *bufio.Writer // for hardlink mapping - SeenFiles map[uint64]string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined @@ -380,6 +390,15 @@ type tarAppender struct { WhiteoutConverter tarWhiteoutConverter } +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + } +} + // canonicalTarName provides a platform-independent and consistent posix-style //path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) (string, error) { @@ -402,10 +421,22 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } - hdr, err := FileInfoHeader(path, name, fi) + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly @@ -428,21 +459,15 @@ func (ta *tarAppender) addTarFile(path, name string) error { //handle re-mapping container ID mappings back to host ID mappings before //writing tar headers/files. We skip whiteout files because they were written //by the kernel and already have proper ownership relative to the host - if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { - uid, gid, err := getFileUIDGID(fi.Sys()) + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } - xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) if err != nil { return err } - xGID, err := idtools.ToContainer(gid, ta.GIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - hdr.Gid = xGID } if ta.WhiteoutConverter != nil { @@ -496,7 +521,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions, inUserns bool) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -576,7 +601,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { - chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err @@ -664,14 +689,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(compressWriter), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, - WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), - } + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) defer func() { // Make sure to check the error on Close. @@ -827,10 +849,8 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) // Iterate through the files in the archive. @@ -864,7 +884,7 @@ loop: parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) if err != nil { return err } @@ -909,26 +929,8 @@ loop: } trBuf.Reset(tr) - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if hdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if hdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID + if err := remapIDs(idMappings, hdr); err != nil { + return err } if whiteoutConverter != nil { @@ -1013,23 +1015,13 @@ func (archiver *Archiver) TarUntar(src, dst string) error { return err } defer archive.Close() - - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } + options := &TarOptions{ + UIDMaps: archiver.IDMappings.UIDs(), + GIDMaps: archiver.IDMappings.GIDs(), } return archiver.Untar(archive, dst, options) } -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) @@ -1037,22 +1029,13 @@ func (archiver *Archiver) UntarPath(src, dst string) error { return err } defer archive.Close() - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } + options := &TarOptions{ + UIDMaps: archiver.IDMappings.UIDs(), + GIDMaps: archiver.IDMappings.GIDs(), } return archiver.Untar(archive, dst, options) } -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no @@ -1069,27 +1052,16 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // if this archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner - rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } + rootIDs := archiver.IDMappings.RootPair() // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) return archiver.TarUntar(src, dst) } -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. @@ -1110,7 +1082,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { return err } @@ -1131,28 +1103,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { + if err := remapIDs(archiver.IDMappings, hdr); err != nil { return err } - // only perform mapping if the file being copied isn't already owned by the - // uid or gid of the remapped root in the container - if remappedRootUID != hdr.Uid { - xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if remappedRootGID != hdr.Gid { - xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { @@ -1176,16 +1130,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { return err } -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// Destination handling is in an operating specific manner depending -// where the daemon is running. If `dst` ends with a trailing slash -// the final destination path will be `dst/base(src)` (Linux) or -// `dst\base(src)` (Windows). -func CopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err } // cmdStream executes a command, and returns its stdout as a stream. diff --git a/fn/vendor/github.com/docker/docker/pkg/archive/archive_test.go b/fn/vendor/github.com/docker/docker/pkg/archive/archive_test.go index b9f8c65f5..e85878fd0 100644 --- a/fn/vendor/github.com/docker/docker/pkg/archive/archive_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/archive/archive_test.go @@ -4,7 +4,6 @@ import ( "archive/tar" "bytes" "fmt" - "github.com/docker/docker/pkg/testutil/assert" "io" "io/ioutil" "os" @@ -14,6 +13,9 @@ import ( "strings" "testing" "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var tmp string @@ -25,35 +27,22 @@ func init() { } } -func TestIsArchiveNilHeader(t *testing.T) { - out := IsArchive(nil) - if out { - t.Fatalf("isArchive should return false as nil is not a valid archive header") - } +var defaultArchiver = NewDefaultArchiver() + +func defaultTarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) } -func TestIsArchiveInvalidHeader(t *testing.T) { - header := []byte{0x00, 0x01, 0x02} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is not a valid archive header", header) - } +func defaultUntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) } -func TestIsArchiveBzip2(t *testing.T) { - header := []byte{0x42, 0x5A, 0x68} - out := IsArchive(header) - if !out { - t.Fatalf("isArchive should return true as %s is a bz2 header", header) - } +func defaultCopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) } -func TestIsArchive7zip(t *testing.T) { - header := []byte{0x50, 0x4b, 0x03, 0x04} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) - } +func defaultCopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { @@ -299,7 +288,7 @@ func TestUntarPathWithInvalidDest(t *testing.T) { t.Fatal(err) } - err = UntarPath(tarFile, invalidDestFolder) + err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } @@ -311,7 +300,7 @@ func TestUntarPathWithInvalidSrc(t *testing.T) { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) - err = UntarPath("/invalid/path", dest) + err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } @@ -346,7 +335,7 @@ func TestUntarPath(t *testing.T) { t.Fatal(err) } - err = UntarPath(tarFile, destFolder) + err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } @@ -385,7 +374,7 @@ func TestUntarPathWithDestinationFile(t *testing.T) { if err != nil { t.Fatalf("Fail to create the destination file") } - err = UntarPath(tarFile, destFile) + err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } @@ -428,7 +417,7 @@ func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { if err != nil { t.Fatal(err) } - err = UntarPath(tarFile, destFolder) + err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } @@ -445,7 +434,7 @@ func TestCopyWithTarInvalidSrc(t *testing.T) { if err != nil { t.Fatal(err) } - err = CopyWithTar(invalidSrc, destFolder) + err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } @@ -462,7 +451,7 @@ func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { if err != nil { t.Fatal(err) } - err = CopyWithTar(srcFolder, inexistentDestFolder) + err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } @@ -491,7 +480,7 @@ func TestCopyWithTarSrcFile(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) + err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } @@ -520,7 +509,7 @@ func TestCopyWithTarSrcFolder(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) - err = CopyWithTar(src, dest) + err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } @@ -543,7 +532,7 @@ func TestCopyFileWithTarInvalidSrc(t *testing.T) { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") - err = CopyFileWithTar(invalidFile, destFolder) + err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } @@ -561,7 +550,7 @@ func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { if err != nil { t.Fatal(err) } - err = CopyFileWithTar(srcFile, inexistentDestFolder) + err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } @@ -588,7 +577,7 @@ func TestCopyFileWithTarSrcFolder(t *testing.T) { if err != nil { t.Fatal(err) } - err = CopyFileWithTar(src, dest) + err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } @@ -612,7 +601,7 @@ func TestCopyFileWithTarSrcFile(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest+"/") + err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } @@ -655,7 +644,7 @@ func checkNoChanges(fileNum int, hardlinks bool) error { return err } - err = TarUntar(srcDir, destDir) + err = defaultTarUntar(srcDir, destDir) if err != nil { return err } @@ -869,7 +858,7 @@ func BenchmarkTarUntar(b *testing.B) { b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) + err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } @@ -897,7 +886,7 @@ func BenchmarkTarUntarWithLinks(b *testing.B) { b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) + err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } @@ -1210,19 +1199,19 @@ func TestReplaceFileTarWrapper(t *testing.T) { map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) - assert.Equal(t, actual, testcase.expected, testcase.doc) + assert.Equal(t, testcase.expected, actual, testcase.doc) } } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - assert.NilError(t, err) + require.NoError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) - assert.NilError(t, err) + require.NoError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) - assert.NilError(t, err) + require.NoError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() @@ -1256,16 +1245,16 @@ func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.He func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { destDir, err := ioutil.TempDir("", "docker-test-destDir") - assert.NilError(t, err) + require.NoError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) - assert.NilError(t, err) + require.NoError(t, err) files, _ := ioutil.ReadDir(destDir) - assert.Equal(t, len(files), expectedCount, doc) + assert.Len(t, files, expectedCount, doc) content, err := ioutil.ReadFile(filepath.Join(destDir, name)) - assert.NilError(t, err) + assert.NoError(t, err) return string(content) } diff --git a/fn/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/fn/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index 68d3c97d2..a33f0fe77 100644 --- a/fn/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/fn/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -9,6 +9,7 @@ import ( "path/filepath" "syscall" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/system" rsystem "github.com/opencontainers/runc/libcontainer/system" ) @@ -44,16 +45,13 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { s, ok := stat.(*syscall.Stat_t) - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return - } - - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) + if ok { + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } } return @@ -62,23 +60,20 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( func getInodeFromStat(stat interface{}) (inode uint64, err error) { s, ok := stat.(*syscall.Stat_t) - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return + if ok { + inode = uint64(s.Ino) } - inode = uint64(s.Ino) - return } -func getFileUIDGID(stat interface{}) (int, int, error) { +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { s, ok := stat.(*syscall.Stat_t) if !ok { - return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") } - return int(s.Uid), int(s.Gid), nil + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil } func major(device uint64) uint64 { diff --git a/fn/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/fn/vendor/github.com/docker/docker/pkg/archive/archive_windows.go index 3d0f6277c..a22410c03 100644 --- a/fn/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ b/fn/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -9,6 +9,7 @@ import ( "path/filepath" "strings" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/longpath" ) @@ -42,11 +43,14 @@ func CanonicalTarNameForPath(p string) (string, error) { // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm // Add the x bit: make everything +x from windows - perm |= 0111 + permPart |= 0111 + permPart &= 0755 - return perm + return noPermPart | permPart } func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { @@ -69,7 +73,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { return nil } -func getFileUIDGID(stat interface{}) (int, int, error) { +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { // no notion of file ownership mapping yet on Windows - return 0, 0, nil + return idtools.IDPair{0, 0}, nil } diff --git a/fn/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go b/fn/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go index 0c6733d6b..685e114ba 100644 --- a/fn/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go @@ -27,7 +27,7 @@ func TestCopyFileWithInvalidDest(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) + err = defaultCopyWithTar(src, dest) if err == nil { t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") } @@ -82,6 +82,8 @@ func TestChmodTarEntry(t *testing.T) { {0644, 0755}, {0755, 0755}, {0444, 0555}, + {0755 | os.ModeDir, 0755 | os.ModeDir}, + {0755 | os.ModeSymlink, 0755 | os.ModeSymlink}, } for _, v := range cases { if out := chmodTarEntry(v.in); out != v.expected { diff --git a/fn/vendor/github.com/docker/docker/pkg/archive/changes.go b/fn/vendor/github.com/docker/docker/pkg/archive/changes.go index ca2c0ca1b..5ca39b721 100644 --- a/fn/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/fn/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -394,13 +394,8 @@ func ChangesSize(newDir string, changes []Change) int64 { func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { reader, writer := io.Pipe() go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer) + // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) diff --git a/fn/vendor/github.com/docker/docker/pkg/archive/changes_test.go b/fn/vendor/github.com/docker/docker/pkg/archive/changes_test.go index eae1d022c..c5d1629e7 100644 --- a/fn/vendor/github.com/docker/docker/pkg/archive/changes_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/archive/changes_test.go @@ -102,10 +102,10 @@ func createSampleDir(t *testing.T, root string) { } func TestChangeString(t *testing.T) { - modifiyChange := Change{"change", ChangeModify} - toString := modifiyChange.String() + modifyChange := Change{"change", ChangeModify} + toString := modifyChange.String() if toString != "C change" { - t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + t.Fatalf("String() of a change with ChangeModify Kind should have been %s but was %s", "C change", toString) } addChange := Change{"change", ChangeAdd} toString = addChange.String() diff --git a/fn/vendor/github.com/docker/docker/pkg/archive/diff.go b/fn/vendor/github.com/docker/docker/pkg/archive/diff.go index 229219394..a2766b592 100644 --- a/fn/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/fn/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -33,10 +33,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return 0, err - } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) @@ -87,7 +84,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) + err = system.MkdirAll(parentPath, 0600, "") if err != nil { return 0, err } @@ -195,27 +192,10 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, srcData = tmpFile } - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if srcHdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) - if err != nil { - return 0, err - } - srcHdr.Uid = xUID - } - if srcHdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) - if err != nil { - return 0, err - } - srcHdr.Gid = xGID + if err := remapIDs(idMappings, srcHdr); err != nil { + return 0, err } + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { return 0, err } diff --git a/fn/vendor/github.com/docker/docker/pkg/authorization/api_test.go b/fn/vendor/github.com/docker/docker/pkg/authorization/api_test.go new file mode 100644 index 000000000..103194906 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/authorization/api_test.go @@ -0,0 +1,75 @@ +package authorization + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestPeerCertificateMarshalJSON(t *testing.T) { + template := &x509.Certificate{ + IsCA: true, + BasicConstraintsValid: true, + SubjectKeyId: []byte{1, 2, 3}, + SerialNumber: big.NewInt(1234), + Subject: pkix.Name{ + Country: []string{"Earth"}, + Organization: []string{"Mother Nature"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(5, 5, 5), + + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + } + // generate private key + privatekey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + publickey := &privatekey.PublicKey + + // create a self-signed certificate. template = parent + var parent = template + raw, err := x509.CreateCertificate(rand.Reader, template, parent, publickey, privatekey) + require.NoError(t, err) + + cert, err := x509.ParseCertificate(raw) + require.NoError(t, err) + + var certs = []*x509.Certificate{cert} + addr := "www.authz.com/auth" + req, err := http.NewRequest("GET", addr, nil) + require.NoError(t, err) + + req.RequestURI = addr + req.TLS = &tls.ConnectionState{} + req.TLS.PeerCertificates = certs + req.Header.Add("header", "value") + + for _, c := range req.TLS.PeerCertificates { + pcObj := PeerCertificate(*c) + + t.Run("Marshalling :", func(t *testing.T) { + raw, err = pcObj.MarshalJSON() + require.NotNil(t, raw) + require.Nil(t, err) + }) + + t.Run("UnMarshalling :", func(t *testing.T) { + err := pcObj.UnmarshalJSON(raw) + require.Nil(t, err) + require.Equal(t, "Earth", pcObj.Subject.Country[0]) + require.Equal(t, true, pcObj.IsCA) + + }) + + } + +} diff --git a/fn/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go b/fn/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go index a787f3cd8..a692802d5 100644 --- a/fn/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go @@ -99,7 +99,7 @@ func TestAuthZResponsePlugin(t *testing.T) { request := Request{ User: "user", - RequestURI: "someting.com/auth", + RequestURI: "something.com/auth", RequestBody: []byte("sample body"), } server.replayResponse = Response{ diff --git a/fn/vendor/github.com/docker/docker/pkg/authorization/middleware.go b/fn/vendor/github.com/docker/docker/pkg/authorization/middleware.go index 05130121e..7789a758d 100644 --- a/fn/vendor/github.com/docker/docker/pkg/authorization/middleware.go +++ b/fn/vendor/github.com/docker/docker/pkg/authorization/middleware.go @@ -25,20 +25,12 @@ func NewMiddleware(names []string, pg plugingetter.PluginGetter) *Middleware { } } -// GetAuthzPlugins gets authorization plugins -func (m *Middleware) GetAuthzPlugins() []Plugin { +func (m *Middleware) getAuthzPlugins() []Plugin { m.mu.Lock() defer m.mu.Unlock() return m.plugins } -// SetAuthzPlugins sets authorization plugins -func (m *Middleware) SetAuthzPlugins(plugins []Plugin) { - m.mu.Lock() - m.plugins = plugins - m.mu.Unlock() -} - // SetPlugins sets the plugin used for authorization func (m *Middleware) SetPlugins(names []string) { m.mu.Lock() @@ -46,10 +38,23 @@ func (m *Middleware) SetPlugins(names []string) { m.mu.Unlock() } +// RemovePlugin removes a single plugin from this authz middleware chain +func (m *Middleware) RemovePlugin(name string) { + m.mu.Lock() + defer m.mu.Unlock() + plugins := m.plugins[:0] + for _, authPlugin := range m.plugins { + if authPlugin.Name() != name { + plugins = append(plugins, authPlugin) + } + } + m.plugins = plugins +} + // WrapHandler returns a new handler function wrapping the previous one in the request chain. func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - plugins := m.GetAuthzPlugins() + plugins := m.getAuthzPlugins() if len(plugins) == 0 { return handler(ctx, w, r, vars) } @@ -83,7 +88,7 @@ func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.Respon // There's a chance that the authCtx.plugins was updated. One of the reasons // this can happen is when an authzplugin is disabled. - plugins = m.GetAuthzPlugins() + plugins = m.getAuthzPlugins() if len(plugins) == 0 { logrus.Debug("There are no authz plugins in the chain") return nil diff --git a/fn/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go b/fn/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go new file mode 100644 index 000000000..fc7401135 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go @@ -0,0 +1,53 @@ +package authorization + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/stretchr/testify/require" +) + +func TestMiddleware(t *testing.T) { + pluginNames := []string{"testPlugin1", "testPlugin2"} + var pluginGetter plugingetter.PluginGetter + m := NewMiddleware(pluginNames, pluginGetter) + authPlugins := m.getAuthzPlugins() + require.Equal(t, 2, len(authPlugins)) + require.EqualValues(t, pluginNames[0], authPlugins[0].Name()) + require.EqualValues(t, pluginNames[1], authPlugins[1].Name()) +} + +func TestNewResponseModifier(t *testing.T) { + recorder := httptest.NewRecorder() + modifier := NewResponseModifier(recorder) + modifier.Header().Set("H1", "V1") + modifier.Write([]byte("body")) + require.False(t, modifier.Hijacked()) + modifier.WriteHeader(http.StatusInternalServerError) + require.NotNil(t, modifier.RawBody()) + + raw, err := modifier.RawHeaders() + require.NotNil(t, raw) + require.Nil(t, err) + + headerData := strings.Split(strings.TrimSpace(string(raw)), ":") + require.EqualValues(t, "H1", strings.TrimSpace(headerData[0])) + require.EqualValues(t, "V1", strings.TrimSpace(headerData[1])) + + modifier.Flush() + modifier.FlushAll() + + if recorder.Header().Get("H1") != "V1" { + t.Fatalf("Header value must exists %s", recorder.Header().Get("H1")) + } + +} + +func setAuthzPlugins(m *Middleware, plugins []Plugin) { + m.mu.Lock() + m.plugins = plugins + m.mu.Unlock() +} diff --git a/fn/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go b/fn/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go new file mode 100644 index 000000000..fd684f120 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go @@ -0,0 +1,65 @@ +// +build !windows + +package authorization + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestMiddlewareWrapHandler(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + pluginNames := []string{authZPlugin.name} + + var pluginGetter plugingetter.PluginGetter + middleWare := NewMiddleware(pluginNames, pluginGetter) + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return nil + } + + authList := []Plugin{authZPlugin} + middleWare.SetPlugins([]string{"My Test Plugin"}) + setAuthzPlugins(middleWare, authList) + mdHandler := middleWare.WrapHandler(handler) + require.NotNil(t, mdHandler) + + addr := "www.example.com/auth" + req, _ := http.NewRequest("GET", addr, nil) + req.RequestURI = addr + req.Header.Add("header", "value") + + resp := httptest.NewRecorder() + ctx := context.Background() + + t.Run("Error Test Case :", func(t *testing.T) { + server.replayResponse = Response{ + Allow: false, + Msg: "Server Auth Not Allowed", + } + if err := mdHandler(ctx, resp, req, map[string]string{}); err == nil { + require.Error(t, err) + } + + }) + + t.Run("Positive Test Case :", func(t *testing.T) { + server.replayResponse = Response{ + Allow: true, + Msg: "Server Auth Allowed", + } + if err := mdHandler(ctx, resp, req, map[string]string{}); err != nil { + require.NoError(t, err) + } + + }) + +} diff --git a/fn/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/fn/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go index a7814f5b9..760441876 100644 --- a/fn/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go +++ b/fn/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -11,7 +11,13 @@ import ( "github.com/docker/docker/pkg/idtools" ) -var chrootArchiver = &archive.Archiver{Untar: Untar} +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &archive.Archiver{Untar: Untar, IDMappings: idMappings} +} // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. @@ -30,7 +36,6 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { - if tarArchive == nil { return fmt.Errorf("Empty archive") } @@ -41,14 +46,12 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions options.ExcludePatterns = []string{} } - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { return err } } @@ -65,33 +68,3 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions return invokeUnpack(r, dest, options) } - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return chrootArchiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return chrootArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// If `dst` ends with a trailing slash '/' ('\' on Windows), the final -// destination path will be `dst/base(src)` or `dst\base(src)` -func CopyFileWithTar(src, dst string) (err error) { - return chrootArchiver.CopyFileWithTar(src, dst) -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return chrootArchiver.UntarPath(src, dst) -} diff --git a/fn/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go b/fn/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go index 80e54a0ed..bd2deb2dd 100644 --- a/fn/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go @@ -22,6 +22,24 @@ func init() { reexec.Init() } +var chrootArchiver = NewArchiver(nil) + +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} + +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + func TestChrootTarUntar(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") if err != nil { @@ -29,7 +47,7 @@ func TestChrootTarUntar(t *testing.T) { } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { @@ -43,7 +61,7 @@ func TestChrootTarUntar(t *testing.T) { t.Fatal(err) } dest := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(dest, 0700); err != nil { + if err := system.MkdirAll(dest, 0700, ""); err != nil { t.Fatal(err) } if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { @@ -60,7 +78,7 @@ func TestChrootUntarWithHugeExcludesList(t *testing.T) { } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { @@ -71,7 +89,7 @@ func TestChrootUntarWithHugeExcludesList(t *testing.T) { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { + if err := system.MkdirAll(dest, 0700, ""); err != nil { t.Fatal(err) } options := &archive.TarOptions{} @@ -162,7 +180,7 @@ func TestChrootTarUntarWithSymlink(t *testing.T) { } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, false); err != nil { @@ -188,7 +206,7 @@ func TestChrootCopyWithTar(t *testing.T) { } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, true); err != nil { @@ -234,7 +252,7 @@ func TestChrootCopyFileWithTar(t *testing.T) { } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, true); err != nil { @@ -281,7 +299,7 @@ func TestChrootUntarPath(t *testing.T) { } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, false); err != nil { @@ -342,7 +360,7 @@ func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { + if err := system.MkdirAll(dest, 0700, ""); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} @@ -358,7 +376,7 @@ func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { + if err := system.MkdirAll(dest, 0700, ""); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} @@ -374,7 +392,7 @@ func TestChrootApplyDotDotFile(t *testing.T) { } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { @@ -385,7 +403,7 @@ func TestChrootApplyDotDotFile(t *testing.T) { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { + if err := system.MkdirAll(dest, 0700, ""); err != nil { t.Fatal(err) } if _, err := ApplyLayer(dest, stream); err != nil { diff --git a/fn/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go b/fn/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go index 07161d43e..08e0c06aa 100644 --- a/fn/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go +++ b/fn/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go @@ -155,6 +155,7 @@ func (t *Task) run() error { if res := DmTaskRun(t.unmanaged); res != 1 { return ErrTaskRun } + runtime.KeepAlive(t) return nil } @@ -257,7 +258,7 @@ func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start // UdevWait waits for any processes that are waiting for udev to complete the specified cookie. func UdevWait(cookie *uint) error { if res := DmUdevWait(*cookie); res != 1 { - logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) return ErrUdevWait } return nil @@ -328,11 +329,11 @@ func RemoveDevice(name string) error { return err } - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can not set cookie: %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) dmSawBusy = false // reset before the task is run if err = task.run(); err != nil { @@ -361,10 +362,10 @@ func RemoveDeviceDeferred(name string) error { // set a task cookie and disable library fallback, or else libdevmapper will // disable udev dm rules and delete the symlink under /dev/mapper by itself, // even if the removal is deferred by the kernel. - var cookie uint + cookie := new(uint) var flags uint16 flags = DmUdevDisableLibraryFallback - if err := task.setCookie(&cookie, flags); err != nil { + if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can not set cookie: %s", err) } @@ -372,12 +373,12 @@ func RemoveDeviceDeferred(name string) error { // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. // So these two function call must come in pairs, otherwise semaphores will // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` - // will be reached, which will eventually make all follwing calls to 'task.SetCookie' + // will be reached, which will eventually make all following calls to 'task.SetCookie' // fail. // this call will not wait for the deferred removal's final executing, since no // udev event will be generated, and the semaphore's value will not be incremented // by udev, what UdevWait is just cleaning up the semaphore. - defer UdevWait(&cookie) + defer UdevWait(cookie) if err = task.run(); err != nil { return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) @@ -471,13 +472,13 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize return fmt.Errorf("devicemapper: Can't add target %s", err) } - var cookie uint + cookie := new(uint) var flags uint16 flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag - if err := task.setCookie(&cookie, flags); err != nil { + if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) @@ -659,11 +660,11 @@ func ResumeDevice(name string) error { return err } - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceResume %s", err) @@ -757,12 +758,12 @@ func activateDevice(poolName string, name string, deviceID int, size uint64, ext return fmt.Errorf("devicemapper: Can't add node %s", err) } - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) diff --git a/fn/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/fn/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go index 5d08a997a..2614e05d3 100644 --- a/fn/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go +++ b/fn/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go @@ -2,7 +2,7 @@ package filenotify import "github.com/fsnotify/fsnotify" -// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifer interface +// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifier interface type fsNotifyWatcher struct { *fsnotify.Watcher } diff --git a/fn/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go b/fn/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go index b74bd935a..d85420199 100644 --- a/fn/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go @@ -61,7 +61,7 @@ func TestPollerEvent(t *testing.T) { default: } - if err := ioutil.WriteFile(f.Name(), []byte("hello"), 644); err != nil { + if err := ioutil.WriteFile(f.Name(), []byte("hello"), 0644); err != nil { t.Fatal(err) } if err := assertEvent(w, fsnotify.Write); err != nil { diff --git a/fn/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/fn/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go index 4b178a16b..3d61d55c3 100644 --- a/fn/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -8,6 +8,10 @@ import ( "runtime" "strings" "testing" + + "fmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // CopyFile with invalid src @@ -299,17 +303,14 @@ func TestMatchesWithMalformedPatterns(t *testing.T) { } } -// Test lots of variants of patterns & strings +type matchesTestCase struct { + pattern string + text string + pass bool +} + func TestMatches(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - tests := []struct { - pattern string - text string - pass bool - }{ + tests := []matchesTestCase{ {"**", "file", true}, {"**", "file/", true}, {"**/", "file", true}, // weird one @@ -361,9 +362,6 @@ func TestMatches(t *testing.T) { {"abc.def", "abcZdef", false}, {"abc?def", "abcZdef", true}, {"abc?def", "abcdef", false}, - {"a\\*b", "a*b", true}, - {"a\\", "a", false}, - {"a\\", "a\\", false}, {"a\\\\", "a\\", true}, {"**/foo/bar", "foo/bar", true}, {"**/foo/bar", "dir/foo/bar", true}, @@ -375,15 +373,20 @@ func TestMatches(t *testing.T) { {"**/.foo", "bar.foo", false}, } + if runtime.GOOS != "windows" { + tests = append(tests, []matchesTestCase{ + {"a\\*b", "a*b", true}, + {"a\\", "a", false}, + {"a\\", "a\\", false}, + }...) + } + for _, test := range tests { + desc := fmt.Sprintf("pattern=%q text=%q", test.pattern, test.text) pm, err := NewPatternMatcher([]string{test.pattern}) - if err != nil { - t.Fatalf("invalid pattern %s", test.pattern) - } + require.NoError(t, err, desc) res, _ := pm.Matches(test.text) - if res != test.pass { - t.Fatalf("Failed: %v - res:%v", test, res) - } + assert.Equal(t, test.pass, res, desc) } } diff --git a/fn/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go b/fn/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go deleted file mode 100644 index ded091f2a..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go +++ /dev/null @@ -1,100 +0,0 @@ -package gitutils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/urlutil" -) - -// Clone clones a repository into a newly created directory which -// will be under "docker-build-git" -func Clone(remoteURL string) (string, error) { - if !urlutil.IsGitTransport(remoteURL) { - remoteURL = "https://" + remoteURL - } - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return "", err - } - - u, err := url.Parse(remoteURL) - if err != nil { - return "", err - } - - fragment := u.Fragment - clone := cloneArgs(u, root) - - if output, err := git(clone...); err != nil { - return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - - return checkoutGit(fragment, root) -} - -func cloneArgs(remoteURL *url.URL, root string) []string { - args := []string{"clone", "--recursive"} - shallow := len(remoteURL.Fragment) == 0 - - if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { - res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) - if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { - shallow = false - } - } - - if shallow { - args = append(args, "--depth", "1") - } - - if remoteURL.Fragment != "" { - remoteURL.Fragment = "" - } - - return append(args, remoteURL.String(), root) -} - -func checkoutGit(fragment, root string) (string, error) { - refAndDir := strings.SplitN(fragment, ":", 2) - - if len(refAndDir[0]) != 0 { - if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { - return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - } - - if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { - newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) - if err != nil { - return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) - } - - fi, err := os.Stat(newCtx) - if err != nil { - return "", err - } - if !fi.IsDir() { - return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) - } - root = newCtx - } - - return root, nil -} - -func gitWithinDir(dir string, args ...string) ([]byte, error) { - a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} - return git(append(a, args...)...) -} - -func git(args ...string) ([]byte, error) { - return exec.Command("git", args...).CombinedOutput() -} diff --git a/fn/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go b/fn/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go deleted file mode 100644 index d197058d2..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package gitutils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "testing" -) - -func TestCloneArgsSmartHttp(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/repo.git" - gitURL := serverURL.String() - - mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { - q := r.URL.Query().Get("service") - w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) - }) - - args := cloneArgs(serverURL, "/tmp") - exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsDumbHttp(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/repo.git" - gitURL := serverURL.String() - - mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - }) - - args := cloneArgs(serverURL, "/tmp") - exp := []string{"clone", "--recursive", gitURL, "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsGit(t *testing.T) { - u, _ := url.Parse("git://github.com/docker/docker") - args := cloneArgs(u, "/tmp") - exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsStripFragment(t *testing.T) { - u, _ := url.Parse("git://github.com/docker/docker#test") - args := cloneArgs(u, "/tmp") - exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func gitGetConfig(name string) string { - b, err := git([]string{"config", "--get", name}...) - if err != nil { - // since we are interested in empty or non empty string, - // we can safely ignore the err here. - return "" - } - return strings.TrimSpace(string(b)) -} - -func TestCheckoutGit(t *testing.T) { - root, err := ioutil.TempDir("", "docker-build-git-checkout") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - - autocrlf := gitGetConfig("core.autocrlf") - if !(autocrlf == "true" || autocrlf == "false" || - autocrlf == "input" || autocrlf == "") { - t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) - } - eol := "\n" - if autocrlf == "true" { - eol = "\r\n" - } - - gitDir := filepath.Join(root, "repo") - _, err = git("init", gitDir) - if err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { - t.Fatal(err) - } - - subDir := filepath.Join(gitDir, "subdir") - if err = os.Mkdir(subDir, 0755); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { - t.Fatal(err) - } - - if runtime.GOOS != "windows" { - if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { - t.Fatal(err) - } - - if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { - t.Fatal(err) - } - } - - if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { - t.Fatal(err) - } - - type singleCase struct { - frag string - exp string - fail bool - } - - cases := []singleCase{ - {"", "FROM scratch", false}, - {"master", "FROM scratch", false}, - {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, - {":nosubdir", "", true}, // missing directory error - {":Dockerfile", "", true}, // not a directory error - {"master:nosubdir", "", true}, - {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, - {"master:../subdir", "", true}, - {"test", "FROM scratch" + eol + "EXPOSE 3000", false}, - {"test:", "FROM scratch" + eol + "EXPOSE 3000", false}, - {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false}, - } - - if runtime.GOOS != "windows" { - // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below - // git --work-tree .\repo --git-dir .\repo\.git add -A - // error: readlink("absolutelink"): Function not implemented - // error: unable to index file absolutelink - // fatal: adding files failed - cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) - cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) - } - - for _, c := range cases { - r, err := checkoutGit(c.frag, gitDir) - - fail := err != nil - if fail != c.fail { - t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) - } - if c.fail { - continue - } - - b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) - if err != nil { - t.Fatal(err) - } - - if string(b) != c.exp { - t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) - } - } -} diff --git a/fn/vendor/github.com/docker/docker/pkg/httputils/httputils.go b/fn/vendor/github.com/docker/docker/pkg/httputils/httputils.go deleted file mode 100644 index af86835bd..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/httputils/httputils.go +++ /dev/null @@ -1,56 +0,0 @@ -package httputils - -import ( - "errors" - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/docker/docker/pkg/jsonmessage" -) - -var ( - headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) - errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") -) - -// Download requests a given URL and returns an io.Reader. -func Download(url string) (resp *http.Response, err error) { - if resp, err = http.Get(url); err != nil { - return nil, err - } - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) - } - return resp, nil -} - -// NewHTTPRequestError returns a JSON response error. -func NewHTTPRequestError(msg string, res *http.Response) error { - return &jsonmessage.JSONError{ - Message: msg, - Code: res.StatusCode, - } -} - -// ServerHeader contains the server information. -type ServerHeader struct { - App string // docker - Ver string // 1.8.0-dev - OS string // windows or linux -} - -// ParseServerHeader extracts pieces from an HTTP server header -// which is in the format "docker/version (os)" e.g. docker/1.8.0-dev (windows). -func ParseServerHeader(hdr string) (*ServerHeader, error) { - matches := headerRegexp.FindStringSubmatch(hdr) - if len(matches) != 4 { - return nil, errInvalidHeader - } - return &ServerHeader{ - App: strings.TrimSpace(matches[1]), - Ver: strings.TrimSpace(matches[2]), - OS: strings.TrimSpace(matches[3]), - }, nil -} diff --git a/fn/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go b/fn/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go deleted file mode 100644 index 725c68f41..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package httputils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestDownload(t *testing.T) { - expected := "Hello, docker !" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, expected) - })) - defer ts.Close() - response, err := Download(ts.URL) - if err != nil { - t.Fatal(err) - } - - actual, err := ioutil.ReadAll(response.Body) - response.Body.Close() - - if err != nil || string(actual) != expected { - t.Fatalf("Expected the response %q, got err:%q, actual:%q", expected, err, string(actual)) - } -} - -func TestDownload400Errors(t *testing.T) { - expectedError := "Got HTTP status code >= 400: 403 Forbidden" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // 403 - http.Error(w, "something failed (forbidden)", http.StatusForbidden) - })) - defer ts.Close() - // Expected status code = 403 - if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { - t.Fatalf("Expected the error %q, got %q", expectedError, err) - } -} - -func TestDownloadOtherErrors(t *testing.T) { - if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { - t.Fatalf("Expected an error with 'unsupported protocol scheme', got %q", err) - } -} - -func TestNewHTTPRequestError(t *testing.T) { - errorMessage := "Some error message" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // 403 - http.Error(w, errorMessage, http.StatusForbidden) - })) - defer ts.Close() - httpResponse, err := http.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { - t.Fatalf("Expected err to be %q, got %q", errorMessage, err) - } -} - -func TestParseServerHeader(t *testing.T) { - inputs := map[string][]string{ - "bad header": {"error"}, - "(bad header)": {"error"}, - "(without/spaces)": {"error"}, - "(header/with spaces)": {"error"}, - "foo/bar (baz)": {"foo", "bar", "baz"}, - "foo/bar": {"error"}, - "foo": {"error"}, - "foo/bar (baz space)": {"foo", "bar", "baz space"}, - " f f / b b ( b s ) ": {"f f", "b b", "b s"}, - "foo/bar (baz) ignore": {"foo", "bar", "baz"}, - "foo/bar ()": {"error"}, - "foo/bar()": {"error"}, - "foo/bar(baz)": {"foo", "bar", "baz"}, - "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, - "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, - "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, - } - - for header, values := range inputs { - serverHeader, err := ParseServerHeader(header) - if err != nil { - if err != errInvalidHeader { - t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) - } - if values[0] == "error" { - continue - } - t.Fatalf("Header %q failed to parse when it shouldn't have", header) - } - if values[0] == "error" { - t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) - } - - if serverHeader.App != values[0] { - t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) - } - - if serverHeader.Ver != values[1] { - t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) - } - - if serverHeader.OS != values[2] { - t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) - } - - } - -} diff --git a/fn/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go b/fn/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go deleted file mode 100644 index a6cc047db..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package httputils - -import ( - "testing" -) - -func TestDetectContentType(t *testing.T) { - input := []byte("That is just a plain text") - - if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { - t.Error("TestDetectContentType failed") - } -} diff --git a/fn/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/fn/vendor/github.com/docker/docker/pkg/idtools/idtools.go index 6bca46628..68a072db2 100644 --- a/fn/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/fn/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -37,49 +37,56 @@ const ( // MkdirAllAs creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. +// Deprecated: Use MkdirAllAndChown func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { return mkdirAs(path, mode, ownerUID, ownerGID, true, true) } -// MkdirAllNewAs creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, false) -} - // MkdirAs creates a directory and then modifies ownership to the requested uid/gid. // If the directory already exists, this function still changes ownership +// Deprecated: Use MkdirAndChown with a IDPair func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { return mkdirAs(path, mode, ownerUID, ownerGID, false, true) } +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, false) +} + // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. // If the maps are empty, then the root uid/gid will default to "real" 0/0 func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - - if uidMap != nil { - xUID, err := ToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - uid = xUID + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err } - if gidMap != nil { - xGID, err := ToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - gid = xGID + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err } return uid, gid, nil } -// ToContainer takes an id mapping, and uses it to translate a +// toContainer takes an id mapping, and uses it to translate a // host ID to the remapped ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id -func ToContainer(hostID int, idMap []IDMap) (int, error) { +func toContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } @@ -92,10 +99,10 @@ func ToContainer(hostID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } -// ToHost takes an id mapping and a remapped ID, and translates the +// toHost takes an id mapping and a remapped ID, and translates the // ID to the mapped host ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id # -func ToHost(contID int, idMap []IDMap) (int, error) { +func toHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } @@ -108,26 +115,101 @@ func ToHost(contID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) } -// CreateIDMappings takes a requested user and group name and +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair -func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { +func NewIDMappings(username, groupname string) (*IDMappings, error) { subuidRanges, err := parseSubuid(username) if err != nil { - return nil, nil, err + return nil, err } subgidRanges, err := parseSubgid(groupname) if err != nil { - return nil, nil, err + return nil, err } if len(subuidRanges) == 0 { - return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + return nil, fmt.Errorf("No subuid ranges found for user %q", username) } if len(subgidRanges) == 0 { - return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) } - return createIDMap(subuidRanges), createIDMap(subgidRanges), nil + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids } func createIDMap(subidRanges ranges) []IDMap { diff --git a/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index 7c7e82aee..8701bb7fa 100644 --- a/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -49,7 +49,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown paths = append(paths, dirPath) } } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { return err } } else { @@ -69,15 +69,15 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, uid, gid int) bool { +func CanAccess(path string, pair IDPair) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(uid), - statInfo.GID() == uint32(gid), permBits) + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { diff --git a/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go index 540d3079e..31522a547 100644 --- a/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go @@ -9,6 +9,8 @@ import ( "path/filepath" "syscall" "testing" + + "github.com/stretchr/testify/require" ) type node struct { @@ -76,12 +78,9 @@ func TestMkdirAllAs(t *testing.T) { } } -func TestMkdirAllNewAs(t *testing.T) { - +func TestMkdirAllAndChownNew(t *testing.T) { dirName, err := ioutil.TempDir("", "mkdirnew") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } + require.NoError(t, err) defer os.RemoveAll(dirName) testTree := map[string]node{ @@ -91,49 +90,32 @@ func TestMkdirAllNewAs(t *testing.T) { "lib/x86_64": {45, 45}, "lib/x86_64/share": {1, 1}, } - - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } + require.NoError(t, buildTree(dirName, testTree)) // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { - t.Fatal(err) - } + err = MkdirAllAndChownNew(filepath.Join(dirName, "usr", "share"), 0755, IDPair{99, 99}) + require.NoError(t, err) + testTree["usr/share"] = node{99, 99} verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, compareTrees(testTree, verifyTree)) // test 2-deep new directories--both should be owned by the uid/gid pair - if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { - t.Fatal(err) - } + err = MkdirAllAndChownNew(filepath.Join(dirName, "lib", "some", "other"), 0755, IDPair{101, 101}) + require.NoError(t, err) testTree["lib/some"] = node{101, 101} testTree["lib/some/other"] = node{101, 101} verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, compareTrees(testTree, verifyTree)) // test a directory that already exists; should NOT be chowned - if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { - t.Fatal(err) - } + err = MkdirAllAndChownNew(filepath.Join(dirName, "usr"), 0755, IDPair{102, 102}) + require.NoError(t, err) verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, compareTrees(testTree, verifyTree)) } func TestMkdirAs(t *testing.T) { diff --git a/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index 49f67e78c..45d2878e3 100644 --- a/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/fn/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -11,7 +11,7 @@ import ( // Platforms such as Windows do not support the UID/GID concept. So make this // just a wrapper around system.MkdirAll. func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { return err } return nil @@ -20,6 +20,6 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory // Windows does not require/support this function, so always return true -func CanAccess(path string, uid, gid int) bool { +func CanAccess(path string, pair IDPair) bool { return true } diff --git a/fn/vendor/github.com/docker/docker/pkg/ioutils/fmt.go b/fn/vendor/github.com/docker/docker/pkg/ioutils/fmt.go deleted file mode 100644 index 0b04b0ba3..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/ioutils/fmt.go +++ /dev/null @@ -1,22 +0,0 @@ -package ioutils - -import ( - "fmt" - "io" -) - -// FprintfIfNotEmpty prints the string value if it's not empty -func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { - if value != "" { - return fmt.Fprintf(w, format, value) - } - return 0, nil -} - -// FprintfIfTrue prints the boolean value if it's true -func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { - if ok { - return fmt.Fprintf(w, format, ok) - } - return 0, nil -} diff --git a/fn/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go b/fn/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go deleted file mode 100644 index 896886329..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package ioutils - -import "testing" - -func TestFprintfIfNotEmpty(t *testing.T) { - wc := NewWriteCounter(&NopWriter{}) - n, _ := FprintfIfNotEmpty(wc, "foo%s", "") - - if wc.Count != 0 || n != 0 { - t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) - } - - n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") - if wc.Count != 6 || n != 6 { - t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) - } -} diff --git a/fn/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go b/fn/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go index 3edb27141..8b0d072cd 100644 --- a/fn/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go @@ -7,17 +7,17 @@ import ( func TestJSONLogMarshalJSON(t *testing.T) { logs := map[*JSONLog]string{ - &JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, - &JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, - &JSONLog{}: `^{\"time\":\".{20,}\"}$`, + {Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, + {Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, + {Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, + {Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, + {Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, + {Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, + {}: `^{\"time\":\".{20,}\"}$`, // These ones are a little weird - &JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, + {Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, + {Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, + {Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, } for jsonLog, expression := range logs { data, err := jsonLog.MarshalJSON() diff --git a/fn/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go b/fn/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go index 6d6ad2158..41049aaea 100644 --- a/fn/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go @@ -8,21 +8,21 @@ import ( func TestJSONLogsMarshalJSONBuf(t *testing.T) { logs := map[*JSONLogs]string{ - &JSONLogs{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, - &JSONLogs{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, - &JSONLogs{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, - &JSONLogs{Created: "time"}: `^{\"time\":time}$`, - &JSONLogs{}: `^{\"time\":}$`, + {Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, + {Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, + {Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, + {Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, + {Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, + {Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, + {Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, + {Created: "time"}: `^{\"time\":time}$`, + {}: `^{\"time\":}$`, // These ones are a little weird - &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, - &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, - &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, + {Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, + {Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, + {Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, // with raw attributes - &JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, + {Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, } for jsonLog, expression := range logs { var buf bytes.Buffer diff --git a/fn/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/fn/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index c3b1371cd..dc785d618 100644 --- a/fn/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/fn/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -36,7 +36,8 @@ type JSONProgress struct { Total int64 `json:"total,omitempty"` Start int64 `json:"start,omitempty"` // If true, don't show xB/yB - HideCounts bool `json:"hidecounts,omitempty"` + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` } func (p *JSONProgress) String() string { @@ -55,11 +56,16 @@ func (p *JSONProgress) String() string { if p.Current <= 0 && p.Total <= 0 { return "" } - current := units.HumanSize(float64(p.Current)) if p.Total <= 0 { - return fmt.Sprintf("%8v", current) + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } } - total := units.HumanSize(float64(p.Total)) + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if percentage > 50 { percentage = 50 @@ -73,13 +79,25 @@ func (p *JSONProgress) String() string { pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) } - if !p.HideCounts { + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + numbersBox = fmt.Sprintf("%8v/%v", current, total) if p.Current > p.Total { // remove total display if the reported current is wonky. numbersBox = fmt.Sprintf("%8v", current) } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } } if p.Current > 0 && p.Start > 0 && percentage < 50 { @@ -109,7 +127,7 @@ type JSONMessage struct { TimeNano int64 `json:"timeNano,omitempty"` Error *JSONError `json:"errorDetail,omitempty"` ErrorMessage string `json:"error,omitempty"` //deprecated - // Aux contains out-of-band data, such as digests for push signing. + // Aux contains out-of-band data, such as digests for push signing and image id after building. Aux *json.RawMessage `json:"aux,omitempty"` } diff --git a/fn/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go b/fn/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go index ce3b6de8c..c3ed6c046 100644 --- a/fn/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go @@ -65,22 +65,50 @@ func TestProgress(t *testing.T) { if jp5.String() != expected { t.Fatalf("Expected %q, got %q", expected, jp5.String()) } + + expected = "[=========================> ] 50/100 units" + if termsz != nil && termsz.Width <= 110 { + expected = " 50/100 units" + } + jp6 := JSONProgress{Current: 50, Total: 100, Units: "units"} + if jp6.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp6.String()) + } + + // this number can't be negative + expected = "[==================================================>] 50 units" + if termsz != nil && termsz.Width <= 110 { + expected = " 50 units" + } + jp7 := JSONProgress{Current: 50, Total: 40, Units: "units"} + if jp7.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp7.String()) + } + + expected = "[=========================> ] " + if termsz != nil && termsz.Width <= 110 { + expected = "" + } + jp8 := JSONProgress{Current: 50, Total: 100, HideCounts: true} + if jp8.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp8.String()) + } } func TestJSONMessageDisplay(t *testing.T) { now := time.Now() messages := map[JSONMessage][]string{ // Empty - JSONMessage{}: {"\n", "\n"}, + {}: {"\n", "\n"}, // Status - JSONMessage{ + { Status: "status", }: { "status\n", "status\n", }, // General - JSONMessage{ + { Time: now.Unix(), ID: "ID", From: "From", @@ -90,7 +118,7 @@ func TestJSONMessageDisplay(t *testing.T) { fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), }, // General, with nano precision time - JSONMessage{ + { TimeNano: now.UnixNano(), ID: "ID", From: "From", @@ -100,7 +128,7 @@ func TestJSONMessageDisplay(t *testing.T) { fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), }, // General, with both times Nano is preferred - JSONMessage{ + { Time: now.Unix(), TimeNano: now.UnixNano(), ID: "ID", @@ -111,7 +139,7 @@ func TestJSONMessageDisplay(t *testing.T) { fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), }, // Stream over status - JSONMessage{ + { Status: "status", Stream: "stream", }: { @@ -119,7 +147,7 @@ func TestJSONMessageDisplay(t *testing.T) { "stream", }, // With progress message - JSONMessage{ + { Status: "status", ProgressMessage: "progressMessage", }: { @@ -127,7 +155,7 @@ func TestJSONMessageDisplay(t *testing.T) { "status progressMessage", }, // With progress, stream empty - JSONMessage{ + { Status: "status", Stream: "", Progress: &JSONProgress{Current: 1}, diff --git a/fn/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go b/fn/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go index 0482f3d73..c9003bcf6 100644 --- a/fn/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go +++ b/fn/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go @@ -22,10 +22,12 @@ func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.List case "unix": gid, err := lookupGID(socketGroup) if err != nil { - if socketGroup != defaultSocketGroup { - return nil, err + if socketGroup != "" { + if socketGroup != defaultSocketGroup { + return nil, err + } + logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) } - logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) gid = os.Getgid() } l, err := sockets.NewUnixSocket(addr, gid) diff --git a/fn/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go b/fn/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go index 23ead610b..25c98fba1 100644 --- a/fn/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go +++ b/fn/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go @@ -35,10 +35,12 @@ func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listene case "unix": gid, err := lookupGID(socketGroup) if err != nil { - if socketGroup != defaultSocketGroup { - return nil, err + if socketGroup != "" { + if socketGroup != defaultSocketGroup { + return nil, err + } + logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) } - logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) gid = os.Getgid() } l, err := sockets.NewUnixSocket(addr, gid) diff --git a/fn/vendor/github.com/docker/docker/pkg/mount/mount.go b/fn/vendor/github.com/docker/docker/pkg/mount/mount.go index a57f3979b..c9fdfd694 100644 --- a/fn/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/fn/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -1,5 +1,10 @@ package mount +import ( + "sort" + "strings" +) + // GetMounts retrieves a list of mounts for the current running process. func GetMounts() ([]*Info, error) { return parseMountTable() @@ -53,3 +58,29 @@ func Unmount(target string) error { } return unmount(target, mntDetach) } + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Sort(sort.Reverse(byMountpoint(mounts))) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { + return err + } + // Ignore errors for submounts and continue trying to unmount others + // The final unmount should fail if there ane any submounts remaining + } + } + return nil +} diff --git a/fn/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/fn/vendor/github.com/docker/docker/pkg/mount/mountinfo.go index e3fc3535e..ff4cc1d86 100644 --- a/fn/vendor/github.com/docker/docker/pkg/mount/mountinfo.go +++ b/fn/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -38,3 +38,17 @@ type Info struct { // VfsOpts represents per super block options. VfsOpts string } + +type byMountpoint []*Info + +func (by byMountpoint) Len() int { + return len(by) +} + +func (by byMountpoint) Less(i, j int) bool { + return by[i].Mountpoint < by[j].Mountpoint +} + +func (by byMountpoint) Swap(i, j int) { + by[i], by[j] = by[j], by[i] +} diff --git a/fn/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/fn/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go index d732a4795..2f869ed92 100644 --- a/fn/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go +++ b/fn/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -2,8 +2,7 @@ package namesgenerator import ( "fmt" - - "github.com/docker/docker/pkg/random" + "math/rand" ) var ( @@ -594,15 +593,14 @@ var ( // formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random // integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` func GetRandomName(retry int) string { - rnd := random.Rand begin: - name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) if name == "boring_wozniak" /* Steve Wozniak is not boring */ { goto begin } if retry > 0 { - name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + name = fmt.Sprintf("%s%d", name, rand.Intn(10)) } return name } diff --git a/fn/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go b/fn/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go index 88a195835..0fc3997a1 100644 --- a/fn/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go +++ b/fn/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go @@ -37,7 +37,7 @@ func New(path string) (*PIDFile, error) { return nil, err } // Note MkdirAll returns nil if a directory already exists - if err := system.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { + if err := system.MkdirAll(filepath.Dir(path), os.FileMode(0755), ""); err != nil { return nil, err } if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { diff --git a/fn/vendor/github.com/docker/docker/pkg/platform/utsname_int8_test.go b/fn/vendor/github.com/docker/docker/pkg/platform/utsname_int8_test.go new file mode 100644 index 000000000..9dada2359 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/platform/utsname_int8_test.go @@ -0,0 +1,16 @@ +// +build linux,386 linux,amd64 linux,arm64 + +package platform + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCharToString(t *testing.T) { + machineInBytes := [65]int8{120, 56, 54, 95, 54, 52} + machineInString := charsToString(machineInBytes) + assert.NotNil(t, machineInString, "Unable to convert char into string.") + assert.Equal(t, string("x86_64"), machineInString, "Parsed machine code not equal.") +} diff --git a/fn/vendor/github.com/docker/docker/pkg/platform/utsname_uint8_test.go b/fn/vendor/github.com/docker/docker/pkg/platform/utsname_uint8_test.go new file mode 100644 index 000000000..444b83bdb --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/platform/utsname_uint8_test.go @@ -0,0 +1,16 @@ +// +build linux,arm linux,ppc64 linux,ppc64le s390x + +package platform + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTestCharToString(t *testing.T) { + machineInBytes := [65]uint8{120, 56, 54, 95, 54, 52} + machineInString := charsToString(machineInBytes) + assert.NotNil(t, machineInString, "Unable to convert char into string.") + assert.Equal(t, string("x86_64"), machineInString, "Parsed machine code not equal.") +} diff --git a/fn/vendor/github.com/docker/docker/pkg/plugins/client.go b/fn/vendor/github.com/docker/docker/pkg/plugins/client.go index e8e730eb5..f221a46fc 100644 --- a/fn/vendor/github.com/docker/docker/pkg/plugins/client.go +++ b/fn/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -57,20 +57,20 @@ func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { } // NewClientWithTimeout creates a new plugin client (http). -func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeoutInSecs int) (*Client, error) { +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) { clientTransport, err := newTransport(addr, tlsConfig) if err != nil { return nil, err } - return newClientWithTransport(clientTransport, timeoutInSecs), nil + return newClientWithTransport(clientTransport, timeout), nil } // newClientWithTransport creates a new plugin client with a given transport. -func newClientWithTransport(tr transport.Transport, timeoutInSecs int) *Client { +func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client { return &Client{ http: &http.Client{ Transport: tr, - Timeout: time.Duration(timeoutInSecs) * time.Second, + Timeout: timeout, }, requestFactory: tr, } @@ -129,15 +129,15 @@ func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) } func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { - req, err := c.requestFactory.NewRequest(serviceMethod, data) - if err != nil { - return nil, err - } - var retries int start := time.Now() for { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + resp, err := c.http.Do(req) if err != nil { if !retry { diff --git a/fn/vendor/github.com/docker/docker/pkg/plugins/client_test.go b/fn/vendor/github.com/docker/docker/pkg/plugins/client_test.go index 9faad86a1..7c519a276 100644 --- a/fn/vendor/github.com/docker/docker/pkg/plugins/client_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/plugins/client_test.go @@ -1,16 +1,19 @@ package plugins import ( + "bytes" + "encoding/json" "io" "net/http" "net/http/httptest" "net/url" - "reflect" + "strings" "testing" "time" "github.com/docker/docker/pkg/plugins/transport" "github.com/docker/go-connections/tlsconfig" + "github.com/stretchr/testify/assert" ) var ( @@ -38,6 +41,26 @@ func TestFailedConnection(t *testing.T) { } } +func TestFailOnce(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + failed := false + mux.HandleFunc("/Test.FailOnce", func(w http.ResponseWriter, r *http.Request) { + if !failed { + failed = true + panic("Plugin not ready") + } + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + b := strings.NewReader("body") + _, err := c.callWithRetry("Test.FailOnce", b, true) + if err != nil { + t.Fatal(err) + } +} + func TestEchoInputOutput(t *testing.T) { addr := setupRemotePluginServer() defer teardownRemotePluginServer() @@ -62,9 +85,7 @@ func TestEchoInputOutput(t *testing.T) { t.Fatal(err) } - if !reflect.DeepEqual(output, m) { - t.Fatalf("Expected %v, was %v\n", m, output) - } + assert.Equal(t, m, output) err = c.Call("Test.Echo", nil, nil) if err != nil { t.Fatal(err) @@ -132,3 +153,82 @@ func TestClientScheme(t *testing.T) { } } } + +func TestNewClientWithTimeout(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + time.Sleep(time.Duration(600) * time.Millisecond) + io.Copy(w, r.Body) + }) + + // setting timeout of 500ms + timeout := time.Duration(500) * time.Millisecond + c, _ := NewClientWithTimeout(addr, &tlsconfig.Options{InsecureSkipVerify: true}, timeout) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err == nil { + t.Fatal("Expected timeout error") + } +} + +func TestClientStream(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + var output Manifest + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + body, err := c.Stream("Test.Echo", m) + if err != nil { + t.Fatal(err) + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&output); err != nil { + t.Fatalf("Test.Echo: error reading plugin resp: %v", err) + } + assert.Equal(t, m, output) +} + +func TestClientSendFile(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + var output Manifest + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(m); err != nil { + t.Fatal(err) + } + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + if err := c.SendFile("Test.Echo", &buf, &output); err != nil { + t.Fatal(err) + } + assert.Equal(t, m, output) +} diff --git a/fn/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go b/fn/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go index 1b232b7ad..66f50353c 100644 --- a/fn/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go @@ -4,6 +4,7 @@ package plugins import ( "fmt" + "io/ioutil" "net" "os" "path/filepath" @@ -59,3 +60,41 @@ func TestLocalSocket(t *testing.T) { l.Close() } } + +func TestScan(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + pluginNames, err := Scan() + if err != nil { + t.Fatal(err) + } + if pluginNames != nil { + t.Fatal("Plugin names should be empty.") + } + + path := filepath.Join(tmpdir, "echo.spec") + addr := "unix://var/lib/docker/plugins/echo.sock" + name := "echo" + + err = os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + t.Fatal(err) + } + + err = ioutil.WriteFile(path, []byte(addr), 0644) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(name) + + pluginNamesNotEmpty, err := Scan() + if err != nil { + t.Fatal(err) + } + if p.Name() != pluginNamesNotEmpty[0] { + t.Fatalf("Unable to scan plugin with name %s", p.name) + } +} diff --git a/fn/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go b/fn/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go index b19c0d52f..00fcb85f5 100644 --- a/fn/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go @@ -1,12 +1,26 @@ package plugins import ( + "bytes" + "encoding/json" "errors" + "io" + "io/ioutil" + "net/http" "path/filepath" "runtime" "sync" "testing" "time" + + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/tlsconfig" + "github.com/stretchr/testify/assert" +) + +const ( + fruitPlugin = "fruit" + fruitImplements = "apple" ) // regression test for deadlock in handlers @@ -42,3 +56,101 @@ func testActive(t *testing.T, p *Plugin) { } } + +func TestGet(t *testing.T) { + p := &Plugin{name: fruitPlugin, activateWait: sync.NewCond(&sync.Mutex{})} + p.Manifest = &Manifest{Implements: []string{fruitImplements}} + storage.plugins[fruitPlugin] = p + + plugin, err := Get(fruitPlugin, fruitImplements) + if err != nil { + t.Fatal(err) + } + if p.Name() != plugin.Name() { + t.Fatalf("No matching plugin with name %s found", plugin.Name()) + } + if plugin.Client() != nil { + t.Fatal("expected nil Client but found one") + } + if !plugin.IsV1() { + t.Fatal("Expected true for V1 plugin") + } + + // check negative case where plugin fruit doesn't implement banana + _, err = Get("fruit", "banana") + assert.Equal(t, err, ErrNotImplements) + + // check negative case where plugin vegetable doesn't exist + _, err = Get("vegetable", "potato") + assert.Equal(t, err, ErrNotFound) + +} + +func TestPluginWithNoManifest(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{fruitImplements}} + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(m); err != nil { + t.Fatal(err) + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, &buf) + }) + + p := &Plugin{ + name: fruitPlugin, + activateWait: sync.NewCond(&sync.Mutex{}), + Addr: addr, + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + } + storage.plugins[fruitPlugin] = p + + plugin, err := Get(fruitPlugin, fruitImplements) + if err != nil { + t.Fatal(err) + } + if p.Name() != plugin.Name() { + t.Fatalf("No matching plugin with name %s found", plugin.Name()) + } +} + +func TestGetAll(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "example", + "Addr": "https://example.com/docker/plugin" +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + plugin.Manifest = &Manifest{Implements: []string{"apple"}} + storage.plugins["example"] = plugin + + fetchedPlugins, err := GetAll("apple") + if err != nil { + t.Fatal(err) + } + if fetchedPlugins[0].Name() != plugin.Name() { + t.Fatalf("Expected to get plugin with name %s", plugin.Name()) + } +} diff --git a/fn/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go b/fn/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go index a1b1ac956..fe7fa5ade 100644 --- a/fn/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go @@ -136,7 +136,7 @@ func TestParseWithMultipleFuncs(t *testing.T) { } } -func TestParseWithUnamedReturn(t *testing.T) { +func TestParseWithUnnamedReturn(t *testing.T) { _, err := Parse(testFixture, "Fooer4") if !strings.HasSuffix(err.Error(), errBadReturn.Error()) { t.Fatalf("expected ErrBadReturn, got %v", err) diff --git a/fn/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go b/fn/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go new file mode 100644 index 000000000..b724fd0df --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go @@ -0,0 +1,20 @@ +package transport + +import ( + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHTTPTransport(t *testing.T) { + var r io.Reader + roundTripper := &http.Transport{} + newTransport := NewHTTPTransport(roundTripper, "http", "0.0.0.0") + request, err := newTransport.NewRequest("", r) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, "POST", request.Method) +} diff --git a/fn/vendor/github.com/docker/docker/pkg/pools/pools.go b/fn/vendor/github.com/docker/docker/pkg/pools/pools.go index 5c5aead69..6a111a3ba 100644 --- a/fn/vendor/github.com/docker/docker/pkg/pools/pools.go +++ b/fn/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -17,15 +17,16 @@ import ( "github.com/docker/docker/pkg/ioutils" ) +const buffer32K = 32 * 1024 + var ( // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) + buffer32KPool = newBufferPoolWithSize(buffer32K) ) -const buffer32K = 32 * 1024 - // BufioReaderPool is a bufio reader that uses sync.Pool. type BufioReaderPool struct { pool sync.Pool @@ -54,11 +55,31 @@ func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { bufPool.pool.Put(b) } +type bufferPool struct { + pool sync.Pool +} + +func newBufferPoolWithSize(size int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { return make([]byte, size) }, + }, + } +} + +func (bp *bufferPool) Get() []byte { + return bp.pool.Get().([]byte) +} + +func (bp *bufferPool) Put(b []byte) { + bp.pool.Put(b) +} + // Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := BufioReader32KPool.Get(src) - written, err = io.Copy(dst, buf) - BufioReader32KPool.Put(buf) + buf := buffer32KPool.Get() + written, err = io.CopyBuffer(dst, src, buf) + buffer32KPool.Put(buf) return } diff --git a/fn/vendor/github.com/docker/docker/pkg/pools/pools_test.go b/fn/vendor/github.com/docker/docker/pkg/pools/pools_test.go index 1661b780c..d71cb99ac 100644 --- a/fn/vendor/github.com/docker/docker/pkg/pools/pools_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/pools/pools_test.go @@ -159,3 +159,8 @@ func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { t.Fatalf("The ReaderCloser should have been closed, it is not.") } } + +func TestBufferPoolPutAndGet(t *testing.T) { + buf := buffer32KPool.Get() + buffer32KPool.Put(buf) +} diff --git a/fn/vendor/github.com/docker/docker/pkg/progress/progress.go b/fn/vendor/github.com/docker/docker/pkg/progress/progress.go index e78fc120b..7c3d3a514 100644 --- a/fn/vendor/github.com/docker/docker/pkg/progress/progress.go +++ b/fn/vendor/github.com/docker/docker/pkg/progress/progress.go @@ -18,6 +18,8 @@ type Progress struct { // If true, don't show xB/yB HideCounts bool + // If not empty, use units instead of bytes for counts + Units string // Aux contains extra information not presented to the user, such as // digests for push signing. diff --git a/fn/vendor/github.com/docker/docker/pkg/promise/promise_test.go b/fn/vendor/github.com/docker/docker/pkg/promise/promise_test.go new file mode 100644 index 000000000..287213b50 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/promise/promise_test.go @@ -0,0 +1,25 @@ +package promise + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGo(t *testing.T) { + errCh := Go(functionWithError) + er := <-errCh + require.EqualValues(t, "Error Occurred", er.Error()) + + noErrCh := Go(functionWithNoError) + er = <-noErrCh + require.Nil(t, er) +} + +func functionWithError() (err error) { + return errors.New("Error Occurred") +} +func functionWithNoError() (err error) { + return nil +} diff --git a/fn/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/fn/vendor/github.com/docker/docker/pkg/pubsub/publisher.go index 09364617e..8e30d16ae 100644 --- a/fn/vendor/github.com/docker/docker/pkg/pubsub/publisher.go +++ b/fn/vendor/github.com/docker/docker/pkg/pubsub/publisher.go @@ -53,6 +53,16 @@ func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { return ch } +// SubscribeTopicWithBuffer adds a new subscriber that filters messages sent by a topic. +// The returned channel has a buffer of the specified size. +func (p *Publisher) SubscribeTopicWithBuffer(topic topicFunc, buffer int) chan interface{} { + ch := make(chan interface{}, buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + // Evict removes the specified subscriber from receiving any more messages. func (p *Publisher) Evict(sub chan interface{}) { p.m.Lock() diff --git a/fn/vendor/github.com/docker/docker/pkg/random/random.go b/fn/vendor/github.com/docker/docker/pkg/random/random.go deleted file mode 100644 index 70de4d130..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/random/random.go +++ /dev/null @@ -1,71 +0,0 @@ -package random - -import ( - cryptorand "crypto/rand" - "io" - "math" - "math/big" - "math/rand" - "sync" - "time" -) - -// Rand is a global *rand.Rand instance, which initialized with NewSource() source. -var Rand = rand.New(NewSource()) - -// Reader is a global, shared instance of a pseudorandom bytes generator. -// It doesn't consume entropy. -var Reader io.Reader = &reader{rnd: Rand} - -// copypaste from standard math/rand -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// NewSource returns math/rand.Source safe for concurrent use and initialized -// with current unix-nano timestamp -func NewSource() rand.Source { - var seed int64 - if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { - // This should not happen, but worst-case fallback to time-based seed. - seed = time.Now().UnixNano() - } else { - seed = cryptoseed.Int64() - } - return &lockedSource{ - src: rand.NewSource(seed), - } -} - -type reader struct { - rnd *rand.Rand -} - -func (r *reader) Read(b []byte) (int, error) { - i := 0 - for { - val := r.rnd.Int63() - for val > 0 { - b[i] = byte(val) - i++ - if i == len(b) { - return i, nil - } - val >>= 8 - } - } -} diff --git a/fn/vendor/github.com/docker/docker/pkg/random/random_test.go b/fn/vendor/github.com/docker/docker/pkg/random/random_test.go deleted file mode 100644 index cf405f78c..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/random/random_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package random - -import ( - "math/rand" - "sync" - "testing" -) - -// for go test -v -race -func TestConcurrency(t *testing.T) { - rnd := rand.New(NewSource()) - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - rnd.Int63() - wg.Done() - }() - } - wg.Wait() -} diff --git a/fn/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go b/fn/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go new file mode 100644 index 000000000..39e87a4a2 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go @@ -0,0 +1,53 @@ +package reexec + +import ( + "os" + "os/exec" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func init() { + Register("reexec", func() { + panic("Return Error") + }) + Init() +} + +func TestRegister(t *testing.T) { + defer func() { + if r := recover(); r != nil { + require.Equal(t, `reexec func already registered under name "reexec"`, r) + } + }() + Register("reexec", func() {}) +} + +func TestCommand(t *testing.T) { + cmd := Command("reexec") + w, err := cmd.StdinPipe() + require.NoError(t, err, "Error on pipe creation: %v", err) + defer w.Close() + + err = cmd.Start() + require.NoError(t, err, "Error on re-exec cmd: %v", err) + err = cmd.Wait() + require.EqualError(t, err, "exit status 2") +} + +func TestNaiveSelf(t *testing.T) { + if os.Getenv("TEST_CHECK") == "1" { + os.Exit(2) + } + cmd := exec.Command(naiveSelf(), "-test.run=TestNaiveSelf") + cmd.Env = append(os.Environ(), "TEST_CHECK=1") + err := cmd.Start() + require.NoError(t, err, "Unable to start command") + err = cmd.Wait() + require.EqualError(t, err, "exit status 2") + + os.Args[0] = "mkdir" + assert.NotEqual(t, naiveSelf(), os.Args[0]) +} diff --git a/fn/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go b/fn/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go index 0c1ef312a..70f8084b3 100644 --- a/fn/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go @@ -60,7 +60,7 @@ func TestGetNames(t *testing.T) { } if !reflect.DeepEqual(names, names2) { - t.Fatalf("Exepected: %v, Got: %v", names, names2) + t.Fatalf("Expected: %v, Got: %v", names, names2) } } diff --git a/fn/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go b/fn/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go new file mode 100644 index 000000000..32c056fe4 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go @@ -0,0 +1,58 @@ +// +build darwin linux solaris + +package signal + +import ( + "os" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCatchAll(t *testing.T) { + sigs := make(chan os.Signal, 1) + CatchAll(sigs) + defer StopCatch(sigs) + + listOfSignals := map[string]string{ + "CONT": syscall.SIGCONT.String(), + "HUP": syscall.SIGHUP.String(), + "CHLD": syscall.SIGCHLD.String(), + "ILL": syscall.SIGILL.String(), + "FPE": syscall.SIGFPE.String(), + "CLD": syscall.SIGCLD.String(), + } + + for sigStr := range listOfSignals { + signal, ok := SignalMap[sigStr] + if ok { + go func() { + time.Sleep(1 * time.Millisecond) + syscall.Kill(syscall.Getpid(), signal) + }() + + s := <-sigs + assert.EqualValues(t, s.String(), signal.String()) + } + + } +} + +func TestStopCatch(t *testing.T) { + signal, _ := SignalMap["HUP"] + channel := make(chan os.Signal, 1) + CatchAll(channel) + go func() { + + time.Sleep(1 * time.Millisecond) + syscall.Kill(syscall.Getpid(), signal) + }() + signalString := <-channel + assert.EqualValues(t, signalString.String(), signal.String()) + + StopCatch(channel) + _, ok := <-channel + assert.EqualValues(t, ok, false) +} diff --git a/fn/vendor/github.com/docker/docker/pkg/signal/signal_test.go b/fn/vendor/github.com/docker/docker/pkg/signal/signal_test.go new file mode 100644 index 000000000..df02f5bed --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/signal/signal_test.go @@ -0,0 +1,33 @@ +package signal + +import ( + "syscall" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseSignal(t *testing.T) { + _, checkAtoiError := ParseSignal("0") + assert.EqualError(t, checkAtoiError, "Invalid signal: 0") + + _, error := ParseSignal("SIG") + assert.EqualError(t, error, "Invalid signal: SIG") + + for sigStr := range SignalMap { + responseSignal, error := ParseSignal(sigStr) + assert.NoError(t, error) + signal := SignalMap[sigStr] + assert.EqualValues(t, signal, responseSignal) + } +} + +func TestValidSignalForPlatform(t *testing.T) { + isValidSignal := ValidSignalForPlatform(syscall.Signal(0)) + assert.EqualValues(t, false, isValidSignal) + + for _, sigN := range SignalMap { + isValidSignal = ValidSignalForPlatform(syscall.Signal(sigN)) + assert.EqualValues(t, true, isValidSignal) + } +} diff --git a/fn/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/fn/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go index b3e2c4dfd..3f992fda6 100644 --- a/fn/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go @@ -16,7 +16,7 @@ func TestNewStdWriter(t *testing.T) { } } -func TestWriteWithUnitializedStdWriter(t *testing.T) { +func TestWriteWithUninitializedStdWriter(t *testing.T) { writer := stdWriter{ Writer: nil, prefix: byte(Stdout), diff --git a/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go index f2868441e..c4f55755e 100644 --- a/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go +++ b/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go @@ -10,91 +10,76 @@ import ( "github.com/docker/docker/pkg/progress" ) -// StreamFormatter formats a stream, optionally using JSON. -type StreamFormatter struct { - json bool -} - -// NewStreamFormatter returns a simple StreamFormatter -func NewStreamFormatter() *StreamFormatter { - return &StreamFormatter{} -} - -// NewJSONStreamFormatter returns a StreamFormatter configured to stream json -func NewJSONStreamFormatter() *StreamFormatter { - return &StreamFormatter{true} -} - const streamNewline = "\r\n" -var streamNewlineBytes = []byte(streamNewline) +type jsonProgressFormatter struct{} -// FormatStream formats the specified stream. -func (sf *StreamFormatter) FormatStream(str string) []byte { - if sf.json { - b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + "\r") +func appendNewline(source []byte) []byte { + return append(source, []byte(streamNewline)...) } // FormatStatus formats the specified objects according to the specified format (and id). -func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { +func FormatStatus(id, format string, a ...interface{}) []byte { str := fmt.Sprintf(format, a...) - if sf.json { - b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return FormatError(err) } - return []byte(str + streamNewline) + return appendNewline(b) } -// FormatError formats the specified error. -func (sf *StreamFormatter) FormatError(err error) []byte { - if sf.json { - jsonError, ok := err.(*jsonmessage.JSONError) - if !ok { - jsonError = &jsonmessage.JSONError{Message: err.Error()} - } - if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { - return append(b, streamNewlineBytes...) - } - return []byte("{\"error\":\"format error\"}" + streamNewline) +// FormatError formats the error as a JSON object +func FormatError(err error) []byte { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} } - return []byte("Error: " + err.Error() + streamNewline) + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return appendNewline(b) + } + return []byte(`{"error":"format error"}` + streamNewline) } -// FormatProgress formats the progress information for a specified action. -func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { +func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return FormatStatus(id, format, a...) +} + +// formatProgress formats the progress information for a specified action. +func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { if progress == nil { progress = &jsonmessage.JSONProgress{} } - if sf.json { - var auxJSON *json.RawMessage - if aux != nil { - auxJSONBytes, err := json.Marshal(aux) - if err != nil { - return nil - } - auxJSON = new(json.RawMessage) - *auxJSON = auxJSONBytes - } - b, err := json.Marshal(&jsonmessage.JSONMessage{ - Status: action, - ProgressMessage: progress.String(), - Progress: progress, - ID: id, - Aux: auxJSON, - }) + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) if err != nil { return nil } - return append(b, streamNewlineBytes...) + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return appendNewline(b) +} + +type rawProgressFormatter struct{} + +func (sf *rawProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return []byte(fmt.Sprintf(format, a...) + streamNewline) +} + +func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} } endl := "\r" if progress.String() == "" { @@ -105,16 +90,23 @@ func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessa // NewProgressOutput returns a progress.Output object that can be passed to // progress.NewProgressReader. -func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output { - return &progressOutput{ - sf: sf, - out: out, - newLines: newLines, - } +func NewProgressOutput(out io.Writer) progress.Output { + return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true} +} + +// NewJSONProgressOutput returns a progress.Output that that formats output +// using JSON objects +func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines} +} + +type formatProgress interface { + formatStatus(id, format string, a ...interface{}) []byte + formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte } type progressOutput struct { - sf *StreamFormatter + sf formatProgress out io.Writer newLines bool } @@ -123,10 +115,10 @@ type progressOutput struct { func (out *progressOutput) WriteProgress(prog progress.Progress) error { var formatted []byte if prog.Message != "" { - formatted = out.sf.FormatStatus(prog.ID, prog.Message) + formatted = out.sf.formatStatus(prog.ID, prog.Message) } else { - jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts} - formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} + formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) } _, err := out.out.Write(formatted) if err != nil { @@ -134,39 +126,34 @@ func (out *progressOutput) WriteProgress(prog progress.Progress) error { } if out.newLines && prog.LastUpdate { - _, err = out.out.Write(out.sf.FormatStatus("", "")) + _, err = out.out.Write(out.sf.formatStatus("", "")) return err } return nil } -// StdoutFormatter is a streamFormatter that writes to the standard output. -type StdoutFormatter struct { +// AuxFormatter is a streamFormatter that writes aux progress messages +type AuxFormatter struct { io.Writer - *StreamFormatter } -func (sf *StdoutFormatter) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite +// Emit emits the given interface as an aux progress message +func (sf *AuxFormatter) Emit(aux interface{}) error { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return err } - return len(buf), err -} - -// StderrFormatter is a streamFormatter that writes to the standard error. -type StderrFormatter struct { - io.Writer - *StreamFormatter -} - -func (sf *StderrFormatter) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite + auxJSON := new(json.RawMessage) + *auxJSON = auxJSONBytes + msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{Aux: auxJSON}) + if err != nil { + return err } - return len(buf), err + msgJSON = appendNewline(msgJSON) + n, err := sf.Writer.Write(msgJSON) + if n != len(msgJSON) { + return io.ErrShortWrite + } + return err } diff --git a/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go b/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go index f087b92d8..c5c70d7e1 100644 --- a/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go @@ -1,98 +1,77 @@ package streamformatter import ( + "bytes" "encoding/json" "errors" - "reflect" "strings" "testing" "github.com/docker/docker/pkg/jsonmessage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestFormatStream(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatStream("stream") - if string(res) != "stream"+"\r" { - t.Fatalf("%q", res) - } +func TestRawProgressFormatterFormatStatus(t *testing.T) { + sf := rawProgressFormatter{} + res := sf.formatStatus("ID", "%s%d", "a", 1) + assert.Equal(t, "a1\r\n", string(res)) } -func TestFormatJSONStatus(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatStatus("ID", "%s%d", "a", 1) - if string(res) != "a1\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatSimpleError(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatError(errors.New("Error for formatter")) - if string(res) != "Error: Error for formatter\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatStream(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatStream("stream") - if string(res) != `{"stream":"stream"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatStatus(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatStatus("ID", "%s%d", "a", 1) - if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatSimpleError(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatError(errors.New("Error for formatter")) - if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatJSONError(t *testing.T) { - sf := NewJSONStreamFormatter() - err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} - res := sf.FormatError(err) - if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatProgress(t *testing.T) { - sf := NewJSONStreamFormatter() - progress := &jsonmessage.JSONProgress{ +func TestRawProgressFormatterFormatProgress(t *testing.T) { + sf := rawProgressFormatter{} + jsonProgress := &jsonmessage.JSONProgress{ Current: 15, Total: 30, Start: 1, } - res := sf.FormatProgress("id", "action", progress, nil) - msg := &jsonmessage.JSONMessage{} - if err := json.Unmarshal(res, msg); err != nil { - t.Fatal(err) - } - if msg.ID != "id" { - t.Fatalf("ID must be 'id', got: %s", msg.ID) - } - if msg.Status != "action" { - t.Fatalf("Status must be 'action', got: %s", msg.Status) - } + res := sf.formatProgress("id", "action", jsonProgress, nil) + out := string(res) + assert.True(t, strings.HasPrefix(out, "action [====")) + assert.Contains(t, out, "15B/30B") + assert.True(t, strings.HasSuffix(out, "\r")) +} - // The progress will always be in the format of: +func TestFormatStatus(t *testing.T) { + res := FormatStatus("ID", "%s%d", "a", 1) + expected := `{"status":"a1","id":"ID"}` + streamNewline + assert.Equal(t, expected, string(res)) +} + +func TestFormatError(t *testing.T) { + res := FormatError(errors.New("Error for formatter")) + expected := `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}` + "\r\n" + assert.Equal(t, expected, string(res)) +} + +func TestFormatJSONError(t *testing.T) { + err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} + res := FormatError(err) + expected := `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}` + streamNewline + assert.Equal(t, expected, string(res)) +} + +func TestJsonProgressFormatterFormatProgress(t *testing.T) { + sf := &jsonProgressFormatter{} + jsonProgress := &jsonmessage.JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.formatProgress("id", "action", jsonProgress, &AuxFormatter{Writer: &bytes.Buffer{}}) + msg := &jsonmessage.JSONMessage{} + + require.NoError(t, json.Unmarshal(res, msg)) + assert.Equal(t, "id", msg.ID) + assert.Equal(t, "action", msg.Status) + + // jsonProgress will always be in the format of: // [=========================> ] 15B/30B 412910h51m30s // The last entry '404933h7m11s' is the timeLeftBox. - // However, the timeLeftBox field may change as progress.String() depends on time.Now(). + // However, the timeLeftBox field may change as jsonProgress.String() depends on time.Now(). // Therefore, we have to strip the timeLeftBox from the strings to do the comparison. - // Compare the progress strings before the timeLeftBox + // Compare the jsonProgress strings before the timeLeftBox expectedProgress := "[=========================> ] 15B/30B" // if terminal column is <= 110, expectedProgressShort is expected. expectedProgressShort := " 15B/30B" @@ -102,7 +81,29 @@ func TestJSONFormatProgress(t *testing.T) { expectedProgress, expectedProgressShort, msg.ProgressMessage) } - if !reflect.DeepEqual(msg.Progress, progress) { - t.Fatal("Original progress not equals progress from FormatProgress") - } + assert.Equal(t, jsonProgress, msg.Progress) +} + +func TestJsonProgressFormatterFormatStatus(t *testing.T) { + sf := jsonProgressFormatter{} + res := sf.formatStatus("ID", "%s%d", "a", 1) + assert.Equal(t, `{"status":"a1","id":"ID"}`+streamNewline, string(res)) +} + +func TestNewJSONProgressOutput(t *testing.T) { + b := bytes.Buffer{} + b.Write(FormatStatus("id", "Downloading")) + _ = NewJSONProgressOutput(&b, false) + assert.Equal(t, `{"status":"Downloading","id":"id"}`+streamNewline, b.String()) +} + +func TestAuxFormatterEmit(t *testing.T) { + b := bytes.Buffer{} + aux := &AuxFormatter{Writer: &b} + sampleAux := &struct { + Data string + }{"Additional data"} + err := aux.Emit(sampleAux) + require.NoError(t, err) + assert.Equal(t, `{"aux":{"Data":"Additional data"}}`+streamNewline, b.String()) } diff --git a/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go b/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go new file mode 100644 index 000000000..141d12e20 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go @@ -0,0 +1,47 @@ +package streamformatter + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/pkg/jsonmessage" +) + +type streamWriter struct { + io.Writer + lineFormat func([]byte) string +} + +func (sw *streamWriter) Write(buf []byte) (int, error) { + formattedBuf := sw.format(buf) + n, err := sw.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +func (sw *streamWriter) format(buf []byte) []byte { + msg := &jsonmessage.JSONMessage{Stream: sw.lineFormat(buf)} + b, err := json.Marshal(msg) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// NewStdoutWriter returns a writer which formats the output as json message +// representing stdout lines +func NewStdoutWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return string(buf) + }} +} + +// NewStderrWriter returns a writer which formats the output as json message +// representing stderr lines +func NewStderrWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return "\033[91m" + string(buf) + "\033[0m" + }} +} diff --git a/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go b/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go new file mode 100644 index 000000000..4935cc595 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go @@ -0,0 +1,35 @@ +package streamformatter + +import ( + "testing" + + "bytes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStreamWriterStdout(t *testing.T) { + buffer := &bytes.Buffer{} + content := "content" + sw := NewStdoutWriter(buffer) + size, err := sw.Write([]byte(content)) + + require.NoError(t, err) + assert.Equal(t, len(content), size) + + expected := `{"stream":"content"}` + streamNewline + assert.Equal(t, expected, buffer.String()) +} + +func TestStreamWriterStderr(t *testing.T) { + buffer := &bytes.Buffer{} + content := "content" + sw := NewStderrWriter(buffer) + size, err := sw.Write([]byte(content)) + + require.NoError(t, err) + assert.Equal(t, len(content), size) + + expected := `{"stream":"\u001b[91mcontent\u001b[0m"}` + streamNewline + assert.Equal(t, expected, buffer.String()) +} diff --git a/fn/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/fn/vendor/github.com/docker/docker/pkg/stringid/stringid.go index 82f85596c..a0c7c42a0 100644 --- a/fn/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ b/fn/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -2,15 +2,17 @@ package stringid import ( - "crypto/rand" + cryptorand "crypto/rand" "encoding/hex" "fmt" "io" + "math" + "math/big" + "math/rand" "regexp" "strconv" "strings" - - "github.com/docker/docker/pkg/random" + "time" ) const shortLen = 12 @@ -39,12 +41,8 @@ func TruncateID(id string) string { return id } -func generateID(crypto bool) string { +func generateID(r io.Reader) string { b := make([]byte, 32) - r := random.Reader - if crypto { - r = rand.Reader - } for { if _, err := io.ReadFull(r, b); err != nil { panic(err) // This shouldn't happen @@ -62,14 +60,14 @@ func generateID(crypto bool) string { // GenerateRandomID returns a unique id. func GenerateRandomID() string { - return generateID(true) + return generateID(cryptorand.Reader) } // GenerateNonCryptoID generates unique id without using cryptographically // secure sources of random. // It helps you to save entropy. func GenerateNonCryptoID() string { - return generateID(false) + return generateID(readerFunc(rand.Read)) } // ValidateID checks whether an ID string is a valid image ID. @@ -79,3 +77,23 @@ func ValidateID(id string) error { } return nil } + +func init() { + // safely set the seed globally so we generate random ids. Tries to use a + // crypto seed before falling back to time. + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + + rand.Seed(seed) +} + +type readerFunc func(p []byte) (int, error) + +func (fn readerFunc) Read(p []byte) (int, error) { + return fn(p) +} diff --git a/fn/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go b/fn/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go index e17951bfc..8c4c39875 100644 --- a/fn/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go +++ b/fn/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go @@ -5,8 +5,6 @@ import ( "bytes" "math/rand" "strings" - - "github.com/docker/docker/pkg/random" ) // GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. @@ -15,7 +13,7 @@ func GenerateRandomAlphaOnlyString(n int) string { letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") b := make([]byte, n) for i := range b { - b[i] = letters[random.Rand.Intn(len(letters))] + b[i] = letters[rand.Intn(len(letters))] } return string(b) } diff --git a/fn/vendor/github.com/docker/docker/pkg/symlink/fs.go b/fn/vendor/github.com/docker/docker/pkg/symlink/fs.go index f6bc2231f..52fb9a691 100644 --- a/fn/vendor/github.com/docker/docker/pkg/symlink/fs.go +++ b/fn/vendor/github.com/docker/docker/pkg/symlink/fs.go @@ -40,7 +40,7 @@ func FollowSymlinkInScope(path, root string) (string, error) { // // Example: // If /foo/bar -> /outside, -// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside" // // IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks // are created and not to create subsequently, additional symlinks that could potentially make a diff --git a/fn/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go b/fn/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go index fae0fdffb..77c54f27c 100644 --- a/fn/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go @@ -5,30 +5,30 @@ import ( "os" "path" "path/filepath" + "syscall" "testing" + + "github.com/stretchr/testify/require" ) func TestReadProcBool(t *testing.T) { tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer os.RemoveAll(tmpDir) procFile := filepath.Join(tmpDir, "read-proc-bool") - if err := ioutil.WriteFile(procFile, []byte("1"), 644); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(procFile, []byte("1"), 0644) + require.NoError(t, err) if !readProcBool(procFile) { t.Fatal("expected proc bool to be true, got false") } - if err := ioutil.WriteFile(procFile, []byte("0"), 644); err != nil { + if err := ioutil.WriteFile(procFile, []byte("0"), 0644); err != nil { t.Fatal(err) } if readProcBool(procFile) { - t.Fatal("expected proc bool to be false, got false") + t.Fatal("expected proc bool to be false, got true") } if readProcBool(path.Join(tmpDir, "no-exist")) { @@ -39,20 +39,66 @@ func TestReadProcBool(t *testing.T) { func TestCgroupEnabled(t *testing.T) { cgroupDir, err := ioutil.TempDir("", "cgroup-test") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer os.RemoveAll(cgroupDir) if cgroupEnabled(cgroupDir, "test") { t.Fatal("cgroupEnabled should be false") } - if err := ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 644); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 0644) + require.NoError(t, err) if !cgroupEnabled(cgroupDir, "test") { t.Fatal("cgroupEnabled should be true") } } + +func TestNew(t *testing.T) { + sysInfo := New(false) + require.NotNil(t, sysInfo) + checkSysInfo(t, sysInfo) + + sysInfo = New(true) + require.NotNil(t, sysInfo) + checkSysInfo(t, sysInfo) +} + +func checkSysInfo(t *testing.T, sysInfo *SysInfo) { + // Check if Seccomp is supported, via CONFIG_SECCOMP.then sysInfo.Seccomp must be TRUE , else FALSE + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + require.True(t, sysInfo.Seccomp) + } + } else { + require.False(t, sysInfo.Seccomp) + } +} + +func TestNewAppArmorEnabled(t *testing.T) { + // Check if AppArmor is supported. then it must be TRUE , else FALSE + if _, err := os.Stat("/sys/kernel/security/apparmor"); err != nil { + t.Skip("App Armor Must be Enabled") + } + + sysInfo := New(true) + require.True(t, sysInfo.AppArmor) +} + +func TestNewAppArmorDisabled(t *testing.T) { + // Check if AppArmor is supported. then it must be TRUE , else FALSE + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + t.Skip("App Armor Must be Disabled") + } + + sysInfo := New(true) + require.False(t, sysInfo.AppArmor) +} + +func TestNumCPU(t *testing.T) { + cpuNumbers := NumCPU() + if cpuNumbers <= 0 { + t.Fatal("CPU returned must be greater than zero") + } +} diff --git a/fn/vendor/github.com/docker/docker/pkg/system/chtimes.go b/fn/vendor/github.com/docker/docker/pkg/system/chtimes.go index 7637f12e1..056d19954 100644 --- a/fn/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ b/fn/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -2,26 +2,9 @@ package system import ( "os" - "syscall" "time" - "unsafe" ) -var ( - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - // Chtimes changes the access time and modified time of a file at the given path func Chtimes(name string, atime time.Time, mtime time.Time) error { unixMinTime := time.Unix(0, 0) diff --git a/fn/vendor/github.com/docker/docker/pkg/system/filesys.go b/fn/vendor/github.com/docker/docker/pkg/system/filesys.go index 7aa920de1..102565f76 100644 --- a/fn/vendor/github.com/docker/docker/pkg/system/filesys.go +++ b/fn/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -8,15 +8,14 @@ import ( "path/filepath" ) -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return MkdirAll(path, perm) +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) } // MkdirAll creates a directory named path along with any necessary parents, // with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { +func MkdirAll(path string, perm os.FileMode, sddl string) error { return os.MkdirAll(path, perm) } diff --git a/fn/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/fn/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index 626d2ad88..20117db91 100644 --- a/fn/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/fn/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -16,21 +16,28 @@ import ( winio "github.com/Microsoft/go-winio" ) +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return mkdirall(path, true) +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) } // MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, false) +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) } // mkdirall is a custom version of os.MkdirAll modified for use on Windows // so that it is both volume path aware, and can create a directory with // a DACL. -func mkdirall(path string, adminAndLocalSystem bool) error { +func mkdirall(path string, applyACL bool, sddl string) error { if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { return nil } @@ -64,15 +71,15 @@ func mkdirall(path string, adminAndLocalSystem bool) error { if j > 1 { // Create parent - err = mkdirall(path[0:j-1], false) + err = mkdirall(path[0:j-1], false, sddl) if err != nil { return err } } // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if adminAndLocalSystem { - err = mkdirWithACL(path) + if applyACL { + err = mkdirWithACL(path, sddl) } else { err = os.Mkdir(path, 0) } @@ -96,9 +103,9 @@ func mkdirall(path string, adminAndLocalSystem bool) error { // in golang to cater for creating a directory am ACL permitting full // access, with inheritance, to any subfolder/file for Built-in Administrators // and Local System. -func mkdirWithACL(name string) error { +func mkdirWithACL(name string, sddl string) error { sa := syscall.SecurityAttributes{Length: 0} - sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + sd, err := winio.SddlToSecurityDescriptor(sddl) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} diff --git a/fn/vendor/github.com/docker/docker/pkg/system/init.go b/fn/vendor/github.com/docker/docker/pkg/system/init.go new file mode 100644 index 000000000..17935088d --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/fn/vendor/github.com/docker/docker/pkg/system/init_windows.go b/fn/vendor/github.com/docker/docker/pkg/system/init_windows.go new file mode 100644 index 000000000..019c66441 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -0,0 +1,17 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } + +} diff --git a/fn/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/fn/vendor/github.com/docker/docker/pkg/system/lcow_unix.go new file mode 100644 index 000000000..cff33bb40 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/fn/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/fn/vendor/github.com/docker/docker/pkg/system/lcow_windows.go new file mode 100644 index 000000000..e54d01e69 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/fn/vendor/github.com/docker/docker/pkg/system/path.go b/fn/vendor/github.com/docker/docker/pkg/system/path.go new file mode 100644 index 000000000..f634a6be6 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/system/path.go @@ -0,0 +1,21 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} diff --git a/fn/vendor/github.com/docker/docker/pkg/system/path_unix.go b/fn/vendor/github.com/docker/docker/pkg/system/path_unix.go index c607c4db0..f3762e69d 100644 --- a/fn/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ b/fn/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -2,11 +2,6 @@ package system -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - // CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, // is the system drive. This is a no-op on Linux. func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { diff --git a/fn/vendor/github.com/docker/docker/pkg/system/path_windows.go b/fn/vendor/github.com/docker/docker/pkg/system/path_windows.go index cbfe2c157..aab891522 100644 --- a/fn/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ b/fn/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -8,15 +8,11 @@ import ( "strings" ) -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" - // CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. // This is used, for example, when validating a user provided path in docker cp. // If a drive letter is supplied, it must be the system drive. The drive letter // is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with +// need the path in this syntax so that it can ultimately be concatenated with // a Windows long-path which doesn't support drive-letters. Examples: // C: --> Fail // C:\ --> \ diff --git a/fn/vendor/github.com/docker/docker/pkg/system/rm.go b/fn/vendor/github.com/docker/docker/pkg/system/rm.go new file mode 100644 index 000000000..101b569a5 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system + +import ( + "os" + "syscall" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 5 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return err + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/fn/vendor/github.com/docker/docker/pkg/system/rm_test.go b/fn/vendor/github.com/docker/docker/pkg/system/rm_test.go new file mode 100644 index 000000000..fc2821f89 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/system/rm_test.go @@ -0,0 +1,84 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/docker/docker/pkg/mount" +) + +func TestEnsureRemoveAllNotExist(t *testing.T) { + // should never return an error for a non-existent path + if err := EnsureRemoveAll("/non/existent/path"); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithDir(t *testing.T) { + dir, err := ioutil.TempDir("", "test-ensure-removeall-with-dir") + if err != nil { + t.Fatal(err) + } + if err := EnsureRemoveAll(dir); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithFile(t *testing.T) { + tmp, err := ioutil.TempFile("", "test-ensure-removeall-with-dir") + if err != nil { + t.Fatal(err) + } + tmp.Close() + if err := EnsureRemoveAll(tmp.Name()); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithMount(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("mount not supported on Windows") + } + + dir1, err := ioutil.TempDir("", "test-ensure-removeall-with-dir1") + if err != nil { + t.Fatal(err) + } + dir2, err := ioutil.TempDir("", "test-ensure-removeall-with-dir2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir2) + + bindDir := filepath.Join(dir1, "bind") + if err := os.MkdirAll(bindDir, 0755); err != nil { + t.Fatal(err) + } + + if err := mount.Mount(dir2, bindDir, "none", "bind"); err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + go func() { + err = EnsureRemoveAll(dir1) + close(done) + }() + + select { + case <-done: + if err != nil { + t.Fatal(err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for EnsureRemoveAll to finish") + } + + if _, err := os.Stat(dir1); !os.IsNotExist(err) { + t.Fatalf("expected %q to not exist", dir1) + } +} diff --git a/fn/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/fn/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index 1f311874f..c328a6fb8 100644 --- a/fn/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/fn/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -8,8 +8,9 @@ import ( ) var ( - ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") ) // OSVersion is a wrapper for Windows version information @@ -66,6 +67,22 @@ func IsWindowsClient() bool { return osviex.ProductType == verNTWorkstation } +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows func Unmount(dest string) error { diff --git a/fn/vendor/github.com/docker/docker/pkg/templates/templates.go b/fn/vendor/github.com/docker/docker/pkg/templates/templates.go index 2ac44fad4..d2d7e0c3d 100644 --- a/fn/vendor/github.com/docker/docker/pkg/templates/templates.go +++ b/fn/vendor/github.com/docker/docker/pkg/templates/templates.go @@ -1,6 +1,7 @@ package templates import ( + "bytes" "encoding/json" "strings" "text/template" @@ -10,8 +11,12 @@ import ( // functions provided to every template. var basicFunctions = template.FuncMap{ "json": func(v interface{}) string { - a, _ := json.Marshal(v) - return string(a) + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.Encode(v) + // Remove the trailing new line added by the encoder + return strings.TrimSpace(buf.String()) }, "split": strings.Split, "join": strings.Join, @@ -25,7 +30,7 @@ var basicFunctions = template.FuncMap{ // HeaderFunctions are used to created headers of a table. // This is a replacement of basicFunctions for header generation // because we want the header to remain intact. -// Some functions like `split` are irrevelant so not added. +// Some functions like `split` are irrelevant so not added. var HeaderFunctions = template.FuncMap{ "json": func(v string) string { return v diff --git a/fn/vendor/github.com/docker/docker/pkg/templates/templates_test.go b/fn/vendor/github.com/docker/docker/pkg/templates/templates_test.go index 7fa1df64e..296bcb710 100644 --- a/fn/vendor/github.com/docker/docker/pkg/templates/templates_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/templates/templates_test.go @@ -4,27 +4,38 @@ import ( "bytes" "testing" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/stretchr/testify/assert" ) +// Github #32120 +func TestParseJSONFunctions(t *testing.T) { + tm, err := Parse(`{{json .Ports}}`) + assert.NoError(t, err) + + var b bytes.Buffer + assert.NoError(t, tm.Execute(&b, map[string]string{"Ports": "0.0.0.0:2->8/udp"})) + want := "\"0.0.0.0:2->8/udp\"" + assert.Equal(t, want, b.String()) +} + func TestParseStringFunctions(t *testing.T) { tm, err := Parse(`{{join (split . ":") "/"}}`) - assert.NilError(t, err) + assert.NoError(t, err) var b bytes.Buffer - assert.NilError(t, tm.Execute(&b, "text:with:colon")) + assert.NoError(t, tm.Execute(&b, "text:with:colon")) want := "text/with/colon" - assert.Equal(t, b.String(), want) + assert.Equal(t, want, b.String()) } func TestNewParse(t *testing.T) { tm, err := NewParse("foo", "this is a {{ . }}") - assert.NilError(t, err) + assert.NoError(t, err) var b bytes.Buffer - assert.NilError(t, tm.Execute(&b, "string")) + assert.NoError(t, tm.Execute(&b, "string")) want := "this is a string" - assert.Equal(t, b.String(), want) + assert.Equal(t, want, b.String()) } func TestParseTruncateFunction(t *testing.T) { @@ -46,14 +57,32 @@ func TestParseTruncateFunction(t *testing.T) { template: `{{truncate . 30}}`, expected: "tupx5xzf6hvsrhnruz5cr8gwp", }, + { + template: `{{pad . 3 3}}`, + expected: " tupx5xzf6hvsrhnruz5cr8gwp ", + }, } for _, testCase := range testCases { tm, err := Parse(testCase.template) - assert.NilError(t, err) + assert.NoError(t, err) - var b bytes.Buffer - assert.NilError(t, tm.Execute(&b, source)) - assert.Equal(t, b.String(), testCase.expected) + t.Run("Non Empty Source Test with template: "+testCase.template, func(t *testing.T) { + var b bytes.Buffer + assert.NoError(t, tm.Execute(&b, source)) + assert.Equal(t, testCase.expected, b.String()) + }) + + t.Run("Empty Source Test with template: "+testCase.template, func(t *testing.T) { + var c bytes.Buffer + assert.NoError(t, tm.Execute(&c, "")) + assert.Equal(t, "", c.String()) + }) + + t.Run("Nil Source Test with template: "+testCase.template, func(t *testing.T) { + var c bytes.Buffer + assert.Error(t, tm.Execute(&c, nil)) + assert.Equal(t, "", c.String()) + }) } } diff --git a/fn/vendor/github.com/docker/docker/pkg/term/proxy.go b/fn/vendor/github.com/docker/docker/pkg/term/proxy.go new file mode 100644 index 000000000..e648eb812 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/term/proxy.go @@ -0,0 +1,74 @@ +package term + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (int, error) { + nr, err := r.r.Read(buf) + + preserve := func() { + // this preserves the original key presses in the passed in buffer + nr += r.escapeKeyPos + preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf...) + r.escapeKeyPos = 0 + copy(buf[0:nr], preserve) + } + + if nr != 1 || err != nil { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, err + } + + if buf[0] != r.escapeKeys[r.escapeKeyPos] { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, nil + } + + if r.escapeKeyPos == len(r.escapeKeys)-1 { + return 0, EscapeError{} + } + + // Looks like we've got an escape key, but we need to match again on the next + // read. + // Store the current escape key we found so we can look for the next one on + // the next read. + // Since this is an escape key, make sure we don't let the caller read it + // If later on we find that this is not the escape sequence, we'll add the + // keys back + r.escapeKeyPos++ + return nr - r.escapeKeyPos, nil +} diff --git a/fn/vendor/github.com/docker/docker/pkg/term/proxy_test.go b/fn/vendor/github.com/docker/docker/pkg/term/proxy_test.go new file mode 100644 index 000000000..baba193d1 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/term/proxy_test.go @@ -0,0 +1,92 @@ +package term + +import ( + "bytes" + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEscapeProxyRead(t *testing.T) { + escapeKeys, _ := ToBytes("DEL") + keys, _ := ToBytes("a,b,c,+") + reader := NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf := make([]byte, len(keys)) + nr, err := reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr %d should be equal to the number of %d", nr, len(keys))) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + keys, _ = ToBytes("") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.Error(t, err, "Should throw error when no keys are to read") + require.EqualValues(t, nr, 0, "nr should be zero") + require.Condition(t, func() (success bool) { return len(keys) == 0 && len(buf) == 0 }, "keys & the read buffer size should be zero") + + escapeKeys, _ = ToBytes("ctrl-x,ctrl-@") + keys, _ = ToBytes("DEL") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 1, fmt.Sprintf("nr %d should be equal to the number of 1", nr)) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c") + keys, _ = ToBytes("ctrl-c") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.Condition(t, func() (success bool) { + return reflect.TypeOf(err).Name() == "EscapeError" + }, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,ctrl-z") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + nr, err = reader.Read(buf) + require.Condition(t, func() (success bool) { + return reflect.TypeOf(err).Name() == "EscapeError" + }, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[1:], buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,DEL,+") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,DEL") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") +} diff --git a/fn/vendor/github.com/docker/docker/pkg/term/tc_other.go b/fn/vendor/github.com/docker/docker/pkg/term/tc.go similarity index 50% rename from fn/vendor/github.com/docker/docker/pkg/term/tc_other.go rename to fn/vendor/github.com/docker/docker/pkg/term/tc.go index 750d7c3f6..6d2dfd3a8 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/tc_other.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/tc.go @@ -1,5 +1,4 @@ // +build !windows -// +build !linux !cgo // +build !solaris !cgo package term @@ -7,14 +6,16 @@ package term import ( "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) return err } func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) return err } diff --git a/fn/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/fn/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go deleted file mode 100644 index 59dac5ba8..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build linux,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/fn/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/fn/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go index c9139d0ca..50234affc 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go @@ -5,15 +5,17 @@ package term import ( "syscall" "unsafe" + + "golang.org/x/sys/unix" ) // #include import "C" // Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with +// It is passthrough for unix.Termios in order to make it portable with // other platforms where it is not available or handled differently. -type Termios syscall.Termios +type Termios unix.Termios // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be @@ -26,11 +28,11 @@ func MakeRaw(fd uintptr) (*State, error) { newState := oldState.termios - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON | unix.IXANY) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 /* VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned diff --git a/fn/vendor/github.com/docker/docker/pkg/term/term.go b/fn/vendor/github.com/docker/docker/pkg/term/term.go index 816f8d752..4f59d8d93 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/term.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/term.go @@ -10,7 +10,8 @@ import ( "io" "os" "os/signal" - "syscall" + + "golang.org/x/sys/unix" ) var ( @@ -79,7 +80,7 @@ func SaveState(fd uintptr) (*State, error) { // descriptor, with echo disabled. func DisableEcho(fd uintptr, state *State) error { newState := state.termios - newState.Lflag &^= syscall.ECHO + newState.Lflag &^= unix.ECHO if err := tcset(fd, &newState); err != 0 { return err diff --git a/fn/vendor/github.com/docker/docker/pkg/term/term_linux_test.go b/fn/vendor/github.com/docker/docker/pkg/term/term_linux_test.go new file mode 100644 index 000000000..a1628c4c6 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/term/term_linux_test.go @@ -0,0 +1,120 @@ +//+build linux + +package term + +import ( + "flag" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var rootEnabled bool + +func init() { + flag.BoolVar(&rootEnabled, "test.root", false, "enable tests that require root") +} + +// RequiresRoot skips tests that require root, unless the test.root flag has +// been set +func RequiresRoot(t *testing.T) { + if !rootEnabled { + t.Skip("skipping test that requires root") + return + } + assert.Equal(t, 0, os.Getuid(), "This test must be run as root.") +} + +func newTtyForTest(t *testing.T) (*os.File, error) { + RequiresRoot(t) + return os.OpenFile("/dev/tty", os.O_RDWR, os.ModeDevice) +} + +func newTempFile() (*os.File, error) { + return ioutil.TempFile(os.TempDir(), "temp") +} + +func TestGetWinsize(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + winSize, err := GetWinsize(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, winSize) + require.NotNil(t, winSize.Height) + require.NotNil(t, winSize.Width) + newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} + err = SetWinsize(tty.Fd(), &newSize) + require.NoError(t, err) + winSize, err = GetWinsize(tty.Fd()) + require.NoError(t, err) + require.Equal(t, *winSize, newSize) +} + +func TestSetWinsize(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + winSize, err := GetWinsize(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, winSize) + newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} + err = SetWinsize(tty.Fd(), &newSize) + require.NoError(t, err) + winSize, err = GetWinsize(tty.Fd()) + require.NoError(t, err) + require.Equal(t, *winSize, newSize) +} + +func TestGetFdInfo(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + inFd, isTerminal := GetFdInfo(tty) + require.Equal(t, inFd, tty.Fd()) + require.Equal(t, isTerminal, true) + tmpFile, err := newTempFile() + defer tmpFile.Close() + inFd, isTerminal = GetFdInfo(tmpFile) + require.Equal(t, inFd, tmpFile.Fd()) + require.Equal(t, isTerminal, false) +} + +func TestIsTerminal(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + isTerminal := IsTerminal(tty.Fd()) + require.Equal(t, isTerminal, true) + tmpFile, err := newTempFile() + defer tmpFile.Close() + isTerminal = IsTerminal(tmpFile.Fd()) + require.Equal(t, isTerminal, false) +} + +func TestSaveState(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + state, err := SaveState(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, state) + tty, err = newTtyForTest(t) + defer tty.Close() + err = RestoreTerminal(tty.Fd(), state) + require.NoError(t, err) +} + +func TestDisableEcho(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + state, err := SetRawTerminal(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, state) + err = DisableEcho(tty.Fd(), state) + require.NoError(t, err) +} diff --git a/fn/vendor/github.com/docker/docker/pkg/term/term_windows.go b/fn/vendor/github.com/docker/docker/pkg/term/term_windows.go index 62e9a9e69..fd023ba31 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -6,10 +6,10 @@ import ( "io" "os" "os/signal" - "syscall" "github.com/Azure/go-ansiterm/winterm" "github.com/docker/docker/pkg/term/windows" + "golang.org/x/sys/windows" ) // State holds the console mode for the terminal. @@ -79,19 +79,19 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { } if emulateStdin { - stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE) + stdIn = windowsconsole.NewAnsiReader(windows.STD_INPUT_HANDLE) } else { stdIn = os.Stdin } if emulateStdout { - stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) + stdOut = windowsconsole.NewAnsiWriter(windows.STD_OUTPUT_HANDLE) } else { stdOut = os.Stdout } if emulateStderr { - stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE) + stdErr = windowsconsole.NewAnsiWriter(windows.STD_ERROR_HANDLE) } else { stdErr = os.Stderr } @@ -101,7 +101,7 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { - return windows.GetHandleInfo(in) + return windowsconsole.GetHandleInfo(in) } // GetWinsize returns the window size based on the specified file descriptor. @@ -121,7 +121,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) { // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - return windows.IsConsole(fd) + return windowsconsole.IsConsole(fd) } // RestoreTerminal restores the terminal connected to the given file descriptor diff --git a/fn/vendor/github.com/docker/docker/pkg/term/termios_bsd.go b/fn/vendor/github.com/docker/docker/pkg/term/termios_bsd.go new file mode 100644 index 000000000..c47341e87 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/term/termios_bsd.go @@ -0,0 +1,42 @@ +// +build darwin freebsd openbsd + +package term + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TIOCGETA + setTermios = unix.TIOCSETA +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 + newState.Cc[unix.VMIN] = 1 + newState.Cc[unix.VTIME] = 0 + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/fn/vendor/github.com/docker/docker/pkg/term/termios_darwin.go b/fn/vendor/github.com/docker/docker/pkg/term/termios_darwin.go deleted file mode 100644 index 480db900a..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/term/termios_darwin.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]byte - Ispeed uint64 - Ospeed uint64 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/fn/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go b/fn/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/fn/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/fn/vendor/github.com/docker/docker/pkg/term/termios_linux.go index 22921b6ae..31bfa8419 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/termios_linux.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/termios_linux.go @@ -1,46 +1,37 @@ -// +build !cgo - package term import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) const ( - getTermios = syscall.TCGETS - setTermios = syscall.TCSETS + getTermios = unix.TCGETS + setTermios = unix.TCSETS ) // Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} +type Termios unix.Termios // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil diff --git a/fn/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go b/fn/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/fn/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/fn/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go index cb0b88356..29d396318 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go @@ -1,6 +1,6 @@ // +build windows -package windows +package windowsconsole import ( "bytes" diff --git a/fn/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/fn/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go index a3ce5697d..256577e1f 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go @@ -1,6 +1,6 @@ // +build windows -package windows +package windowsconsole import ( "io" diff --git a/fn/vendor/github.com/docker/docker/pkg/term/windows/console.go b/fn/vendor/github.com/docker/docker/pkg/term/windows/console.go index ca5c3b2e5..4bad32ea7 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/windows/console.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/windows/console.go @@ -1,6 +1,6 @@ // +build windows -package windows +package windowsconsole import ( "os" diff --git a/fn/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/fn/vendor/github.com/docker/docker/pkg/term/windows/windows.go index ce4cb5990..d67021e45 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/windows/windows.go @@ -2,7 +2,7 @@ // When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create // and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. -package windows +package windowsconsole import ( "io/ioutil" diff --git a/fn/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go b/fn/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go index 52aeab54e..3c8084b3d 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go @@ -1,3 +1,3 @@ // This file is necessary to pass the Docker tests. -package windows +package windowsconsole diff --git a/fn/vendor/github.com/docker/docker/pkg/term/term_unix.go b/fn/vendor/github.com/docker/docker/pkg/term/winsize.go similarity index 67% rename from fn/vendor/github.com/docker/docker/pkg/term/term_unix.go rename to fn/vendor/github.com/docker/docker/pkg/term/winsize.go index ddf87a0e5..f58367fe6 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/term_unix.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/winsize.go @@ -3,14 +3,15 @@ package term import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) // Skipp errno = 0 if err == 0 { return ws, nil @@ -20,7 +21,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) { // SetWinsize tries to set the specified window size for the specified file descriptor. func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) // Skipp errno = 0 if err == 0 { return nil diff --git a/fn/vendor/github.com/docker/docker/pkg/term/term_solaris.go b/fn/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go similarity index 73% rename from fn/vendor/github.com/docker/docker/pkg/term/term_solaris.go rename to fn/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go index 112debbec..39c1d3207 100644 --- a/fn/vendor/github.com/docker/docker/pkg/term/term_solaris.go +++ b/fn/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go @@ -1,10 +1,11 @@ -// +build solaris +// +build solaris,cgo package term import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) /* @@ -22,7 +23,7 @@ import "C" // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { ws := &Winsize{} - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) // Skip retval = 0 if ret == 0 { return ws, nil @@ -32,7 +33,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) { // SetWinsize tries to set the specified window size for the specified file descriptor. func SetWinsize(fd uintptr, ws *Winsize) error { - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) // Skip retval = 0 if ret == 0 { return nil diff --git a/fn/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go b/fn/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go deleted file mode 100644 index fdc0fab5d..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go +++ /dev/null @@ -1,132 +0,0 @@ -// Package assert contains functions for making assertions in unit tests -package assert - -import ( - "fmt" - "path/filepath" - "reflect" - "runtime" - "strings" - "unicode" - - "github.com/davecgh/go-spew/spew" -) - -// TestingT is an interface which defines the methods of testing.T that are -// required by this package -type TestingT interface { - Fatalf(string, ...interface{}) -} - -// Equal compare the actual value to the expected value and fails the test if -// they are not equal. -func Equal(t TestingT, actual, expected interface{}, extra ...string) { - if expected != actual { - fatalWithExtra(t, extra, "Expected '%v' (%T) got '%v' (%T)", expected, expected, actual, actual) - } -} - -// EqualNormalizedString compare the actual value to the expected value after applying the specified -// transform function. It fails the test if these two transformed string are not equal. -// For example `EqualNormalizedString(t, RemoveSpace, "foo\n", "foo")` wouldn't fail the test as -// spaces (and thus '\n') are removed before comparing the string. -func EqualNormalizedString(t TestingT, transformFun func(rune) rune, actual, expected string) { - if strings.Map(transformFun, actual) != strings.Map(transformFun, expected) { - fatal(t, "Expected '%v' got '%v'", expected, expected, actual, actual) - } -} - -// RemoveSpace returns -1 if the specified runes is considered as a space (unicode) -// and the rune itself otherwise. -func RemoveSpace(r rune) rune { - if unicode.IsSpace(r) { - return -1 - } - return r -} - -//EqualStringSlice compares two slices and fails the test if they do not contain -// the same items. -func EqualStringSlice(t TestingT, actual, expected []string) { - if len(actual) != len(expected) { - fatal(t, "Expected (length %d): %q\nActual (length %d): %q", - len(expected), expected, len(actual), actual) - } - for i, item := range actual { - if item != expected[i] { - fatal(t, "Slices differ at element %d, expected %q got %q", - i, expected[i], item) - } - } -} - -// NilError asserts that the error is nil, otherwise it fails the test. -func NilError(t TestingT, err error) { - if err != nil { - fatal(t, "Expected no error, got: %s", err.Error()) - } -} - -// DeepEqual compare the actual value to the expected value and fails the test if -// they are not "deeply equal". -func DeepEqual(t TestingT, actual, expected interface{}) { - if !reflect.DeepEqual(actual, expected) { - fatal(t, "Expected (%T):\n%v\n\ngot (%T):\n%s\n", - expected, spew.Sdump(expected), actual, spew.Sdump(actual)) - } -} - -// Error asserts that error is not nil, and contains the expected text, -// otherwise it fails the test. -func Error(t TestingT, err error, contains string) { - if err == nil { - fatal(t, "Expected an error, but error was nil") - } - - if !strings.Contains(err.Error(), contains) { - fatal(t, "Expected error to contain '%s', got '%s'", contains, err.Error()) - } -} - -// Contains asserts that the string contains a substring, otherwise it fails the -// test. -func Contains(t TestingT, actual, contains string) { - if !strings.Contains(actual, contains) { - fatal(t, "Expected '%s' to contain '%s'", actual, contains) - } -} - -// NotNil fails the test if the object is nil -func NotNil(t TestingT, obj interface{}) { - if obj == nil { - fatal(t, "Expected non-nil value.") - } -} - -// Nil fails the test if the object is not nil -func Nil(t TestingT, obj interface{}) { - if obj != nil { - fatal(t, "Expected nil value, got (%T) %s", obj, obj) - } -} - -func fatal(t TestingT, format string, args ...interface{}) { - t.Fatalf(errorSource()+format, args...) -} - -func fatalWithExtra(t TestingT, extra []string, format string, args ...interface{}) { - msg := fmt.Sprintf(errorSource()+format, args...) - if len(extra) > 0 { - msg += ": " + strings.Join(extra, ", ") - } - t.Fatalf(msg) -} - -// See testing.decorate() -func errorSource() string { - _, filename, line, ok := runtime.Caller(3) - if !ok { - return "" - } - return fmt.Sprintf("%s:%d: ", filepath.Base(filename), line) -} diff --git a/fn/vendor/github.com/docker/docker/pkg/testutil/cmd/command.go b/fn/vendor/github.com/docker/docker/pkg/testutil/cmd/command.go index 36aba593b..6f36d6790 100644 --- a/fn/vendor/github.com/docker/docker/pkg/testutil/cmd/command.go +++ b/fn/vendor/github.com/docker/docker/pkg/testutil/cmd/command.go @@ -53,7 +53,7 @@ type Result struct { } // Assert compares the Result against the Expected struct, and fails the test if -// any of the expcetations are not met. +// any of the expectations are not met. func (r *Result) Assert(t testingT, exp Expected) *Result { err := r.Compare(exp) if err == nil { diff --git a/fn/vendor/github.com/docker/docker/pkg/testutil/cmd/command_test.go b/fn/vendor/github.com/docker/docker/pkg/testutil/cmd/command_test.go index df2344207..d24b42b72 100644 --- a/fn/vendor/github.com/docker/docker/pkg/testutil/cmd/command_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/testutil/cmd/command_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/docker/docker/pkg/testutil/assert" + "github.com/stretchr/testify/assert" ) func TestRunCommand(t *testing.T) { @@ -74,7 +74,7 @@ func TestRunCommandWithTimeoutKilled(t *testing.T) { result.Assert(t, Expected{Timeout: true}) ones := strings.Split(result.Stdout(), "\n") - assert.Equal(t, len(ones), 4) + assert.Len(t, ones, 4) } func TestRunCommandWithErrors(t *testing.T) { diff --git a/fn/vendor/github.com/docker/docker/pkg/testutil/helpers.go b/fn/vendor/github.com/docker/docker/pkg/testutil/helpers.go new file mode 100644 index 000000000..c29114871 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/pkg/testutil/helpers.go @@ -0,0 +1,33 @@ +package testutil + +import ( + "strings" + "unicode" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// ErrorContains checks that the error is not nil, and contains the expected +// substring. +func ErrorContains(t require.TestingT, err error, expectedError string) { + require.Error(t, err) + assert.Contains(t, err.Error(), expectedError) +} + +// EqualNormalizedString compare the actual value to the expected value after applying the specified +// transform function. It fails the test if these two transformed string are not equal. +// For example `EqualNormalizedString(t, RemoveSpace, "foo\n", "foo")` wouldn't fail the test as +// spaces (and thus '\n') are removed before comparing the string. +func EqualNormalizedString(t require.TestingT, transformFun func(rune) rune, actual, expected string) { + require.Equal(t, strings.Map(transformFun, expected), strings.Map(transformFun, actual)) +} + +// RemoveSpace returns -1 if the specified runes is considered as a space (unicode) +// and the rune itself otherwise. +func RemoveSpace(r rune) rune { + if unicode.IsSpace(r) { + return -1 + } + return r +} diff --git a/fn/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go b/fn/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go index 48c654d1f..01474babf 100644 --- a/fn/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go +++ b/fn/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go @@ -1,9 +1,10 @@ package tempfile import ( - "github.com/docker/docker/pkg/testutil/assert" "io/ioutil" "os" + + "github.com/stretchr/testify/require" ) // TempFile is a temporary file that can be used with unit tests. TempFile @@ -14,12 +15,12 @@ type TempFile struct { } // NewTempFile returns a new temp file with contents -func NewTempFile(t assert.TestingT, prefix string, content string) *TempFile { +func NewTempFile(t require.TestingT, prefix string, content string) *TempFile { file, err := ioutil.TempFile("", prefix+"-") - assert.NilError(t, err) + require.NoError(t, err) _, err = file.Write([]byte(content)) - assert.NilError(t, err) + require.NoError(t, err) file.Close() return &TempFile{File: file} } @@ -33,3 +34,23 @@ func (f *TempFile) Name() string { func (f *TempFile) Remove() { os.Remove(f.Name()) } + +// TempDir is a temporary directory that can be used with unit tests. TempDir +// reduces the boilerplate setup required in each test case by handling +// setup errors. +type TempDir struct { + Path string +} + +// NewTempDir returns a new temp file with contents +func NewTempDir(t require.TestingT, prefix string) *TempDir { + path, err := ioutil.TempDir("", prefix+"-") + require.NoError(t, err) + + return &TempDir{Path: path} +} + +// Remove removes the file +func (f *TempDir) Remove() { + os.Remove(f.Path) +} diff --git a/fn/vendor/github.com/docker/docker/pkg/testutil/utils.go b/fn/vendor/github.com/docker/docker/pkg/testutil/utils.go index 194675385..0522dde2b 100644 --- a/fn/vendor/github.com/docker/docker/pkg/testutil/utils.go +++ b/fn/vendor/github.com/docker/docker/pkg/testutil/utils.go @@ -16,7 +16,6 @@ import ( "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/pkg/system" - icmd "github.com/docker/docker/pkg/testutil/cmd" ) // IsKilled process the specified error and returns whether the process was killed or not. @@ -212,20 +211,6 @@ func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { } } -// RunAtDifferentDate runs the specified function with the given time. -// It changes the date of the system, which can led to weird behaviors. -func RunAtDifferentDate(date time.Time, block func()) { - // Layout for date. MMDDhhmmYYYY - const timeLayout = "010203042006" - // Ensure we bring time back to now - now := time.Now().Format(timeLayout) - defer icmd.RunCommand("date", now) - - icmd.RunCommand("date", date.Format(timeLayout)) - block() - return -} - // ReadBody read the specified ReadCloser content and returns it func ReadBody(b io.ReadCloser) ([]byte, error) { defer b.Close() diff --git a/fn/vendor/github.com/docker/docker/pkg/testutil/utils_test.go b/fn/vendor/github.com/docker/docker/pkg/testutil/utils_test.go index d1dddaffa..d37f3f4f8 100644 --- a/fn/vendor/github.com/docker/docker/pkg/testutil/utils_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/testutil/utils_test.go @@ -339,25 +339,3 @@ func TestChannelBuffer(t *testing.T) { t.Fatalf("Expected '%s', got '%s'", expected, string(b)) } } - -// FIXME doesn't work -// func TestRunAtDifferentDate(t *testing.T) { -// var date string - -// // Layout for date. MMDDhhmmYYYY -// const timeLayout = "20060102" -// expectedDate := "20100201" -// theDate, err := time.Parse(timeLayout, expectedDate) -// if err != nil { -// t.Fatal(err) -// } - -// RunAtDifferentDate(theDate, func() { -// cmd := exec.Command("date", "+%Y%M%d") -// out, err := cmd.Output() -// if err != nil { -// t.Fatal(err) -// } -// date = string(out) -// }) -// } diff --git a/fn/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go b/fn/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go deleted file mode 100644 index 0b816650e..000000000 --- a/fn/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build go1.6,!go1.7 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.6 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} diff --git a/fn/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/fn/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go index 44152873b..cfcd58203 100644 --- a/fn/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go +++ b/fn/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -29,12 +29,6 @@ func IsGitURL(str string) bool { return checkURL(str, "git") } -// IsGitTransport returns true if the provided str is a git transport by inspecting -// the prefix of the string for known protocols used in git. -func IsGitTransport(str string) bool { - return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") -} - // IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. func IsTransportURL(str string) bool { return checkURL(str, "transport") diff --git a/fn/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go b/fn/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go index d84145a16..e7579f554 100644 --- a/fn/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go +++ b/fn/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go @@ -27,20 +27,6 @@ var ( } ) -func TestValidGitTransport(t *testing.T) { - for _, url := range gitUrls { - if !IsGitTransport(url) { - t.Fatalf("%q should be detected as valid Git prefix", url) - } - } - - for _, url := range incompleteGitUrls { - if IsGitTransport(url) { - t.Fatalf("%q should not be detected as valid Git prefix", url) - } - } -} - func TestIsGIT(t *testing.T) { for _, url := range gitUrls { if !IsGitURL(url) { diff --git a/fn/vendor/github.com/docker/docker/plugin/backend_linux.go b/fn/vendor/github.com/docker/docker/plugin/backend_linux.go index 1f01dab6a..055b8e310 100644 --- a/fn/vendor/github.com/docker/docker/plugin/backend_linux.go +++ b/fn/vendor/github.com/docker/docker/plugin/backend_linux.go @@ -13,7 +13,6 @@ import ( "os" "path" "path/filepath" - "sort" "strings" "github.com/Sirupsen/logrus" @@ -32,6 +31,7 @@ import ( "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin/v2" refstore "github.com/docker/docker/reference" "github.com/opencontainers/go-digest" @@ -60,20 +60,14 @@ func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) er for _, typ := range p.GetTypes() { if typ.Capability == authorization.AuthZApiImplements { - authzList := pm.config.AuthzMiddleware.GetAuthzPlugins() - for i, authPlugin := range authzList { - if authPlugin.Name() == p.Name() { - // Remove plugin from authzmiddleware chain - authzList = append(authzList[:i], authzList[i+1:]...) - pm.config.AuthzMiddleware.SetAuthzPlugins(authzList) - } - } + pm.config.AuthzMiddleware.RemovePlugin(p.Name()) } } if err := pm.disable(p, c); err != nil { return err } + pm.publisher.Publish(EventDisable{Plugin: p.PluginObj}) pm.config.LogPluginEvent(p.GetID(), refOrID, "disable") return nil } @@ -89,6 +83,7 @@ func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) erro if err := pm.enable(p, c, false); err != nil { return err } + pm.publisher.Publish(EventEnable{Plugin: p.PluginObj}) pm.config.LogPluginEvent(p.GetID(), refOrID, "enable") return nil } @@ -152,7 +147,7 @@ func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) { return s.config, nil } -func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { +func (s *tempConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { return configToRootFS(c) } @@ -262,11 +257,9 @@ func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string defer pm.muGC.RUnlock() // revalidate because Pull is public - nameref, err := reference.ParseNormalizedNamed(name) - if err != nil { + if _, err := reference.ParseNormalizedNamed(name); err != nil { return errors.Wrapf(err, "failed to parse %q", name) } - name = reference.FamiliarString(reference.TagNameOnly(nameref)) tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") if err != nil { @@ -305,7 +298,7 @@ func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string } // Pull pulls a plugin, check if the correct privileges are provided and install the plugin. -func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...CreateOpt) (err error) { pm.muGC.RLock() defer pm.muGC.RUnlock() @@ -349,12 +342,19 @@ func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, m return err } - p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges) + refOpt := func(p *v2.Plugin) { + p.PluginObj.PluginReference = ref.String() + } + optsList := make([]CreateOpt, 0, len(opts)+1) + optsList = append(optsList, opts...) + optsList = append(optsList, refOpt) + + p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges, optsList...) if err != nil { return err } - p.PluginObj.PluginReference = ref.String() + pm.publisher.Publish(EventCreate{Plugin: p.PluginObj}) return nil } @@ -534,7 +534,7 @@ func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) { return ioutil.ReadAll(rwc) } -func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { +func (s *pluginConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { return configToRootFS(c) } @@ -633,15 +633,23 @@ func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { }() id := p.GetID() - pm.config.Store.Remove(p) pluginDir := filepath.Join(pm.config.Root, id) - if err := recursiveUnmount(pm.config.Root); err != nil { - logrus.WithField("dir", pm.config.Root).WithField("id", id).Warn(err) + + if err := mount.RecursiveUnmount(pluginDir); err != nil { + return errors.Wrap(err, "error unmounting plugin data") } - if err := os.RemoveAll(pluginDir); err != nil { - logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err) + + removeDir := pluginDir + "-removing" + if err := os.Rename(pluginDir, removeDir); err != nil { + return errors.Wrap(err, "error performing atomic remove of plugin dir") } + + if err := system.EnsureRemoveAll(removeDir); err != nil { + return errors.Wrap(err, "error removing plugin dir") + } + pm.config.Store.Remove(p) pm.config.LogPluginEvent(id, name, "remove") + pm.publisher.Publish(EventRemove{Plugin: p.PluginObj}) return nil } @@ -661,27 +669,6 @@ func getMounts(root string) ([]string, error) { return mounts, nil } -func recursiveUnmount(root string) error { - mounts, err := getMounts(root) - if err != nil { - return err - } - - // sort in reverse-lexicographic order so the root mount will always be last - sort.Sort(sort.Reverse(sort.StringSlice(mounts))) - - for i, m := range mounts { - if err := mount.Unmount(m); err != nil { - if i == len(mounts)-1 { - return errors.Wrapf(err, "error performing recursive unmount on %s", root) - } - logrus.WithError(err).WithField("mountpoint", m).Warn("could not unmount") - } - } - - return nil -} - // Set sets plugin args func (pm *Manager) Set(name string, args []string) error { p, err := pm.config.Store.GetV2Plugin(name) @@ -794,6 +781,7 @@ func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, } p.PluginObj.PluginReference = name + pm.publisher.Publish(EventCreate{Plugin: p.PluginObj}) pm.config.LogPluginEvent(p.PluginObj.ID, name, "create") return nil diff --git a/fn/vendor/github.com/docker/docker/plugin/backend_unsupported.go b/fn/vendor/github.com/docker/docker/plugin/backend_unsupported.go index 2d4850eeb..e69bb883d 100644 --- a/fn/vendor/github.com/docker/docker/plugin/backend_unsupported.go +++ b/fn/vendor/github.com/docker/docker/plugin/backend_unsupported.go @@ -36,7 +36,7 @@ func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHead } // Pull pulls a plugin, check if the correct privileges are provided and install the plugin. -func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer) error { +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer, opts ...CreateOpt) error { return errNotSupported } diff --git a/fn/vendor/github.com/docker/docker/plugin/blobstore.go b/fn/vendor/github.com/docker/docker/plugin/blobstore.go index b407884cc..2b79a4427 100644 --- a/fn/vendor/github.com/docker/docker/plugin/blobstore.go +++ b/fn/vendor/github.com/docker/docker/plugin/blobstore.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/progress" "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -125,7 +126,8 @@ type downloadManager struct { configDigest digest.Digest } -func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { +func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + // TODO @jhowardmsft LCOW: May need revisiting. for _, l := range layers { b, err := dm.blobStore.New() if err != nil { @@ -143,7 +145,7 @@ func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.Roo return initialRootFS, nil, err } digester := digest.Canonical.Digester() - if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { + if _, err := chrootarchive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { return initialRootFS, nil, err } initialRootFS.Append(layer.DiffID(digester.Digest())) @@ -177,6 +179,6 @@ func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) { func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { return nil, fmt.Errorf("digest not found") } -func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) { +func (dm *downloadManager) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { return configToRootFS(c) } diff --git a/fn/vendor/github.com/docker/docker/plugin/defs.go b/fn/vendor/github.com/docker/docker/plugin/defs.go index cf44c97ec..3e930de04 100644 --- a/fn/vendor/github.com/docker/docker/plugin/defs.go +++ b/fn/vendor/github.com/docker/docker/plugin/defs.go @@ -24,3 +24,14 @@ func NewStore() *Store { handlers: make(map[string][]func(string, *plugins.Client)), } } + +// CreateOpt is used to configure specific plugin details when created +type CreateOpt func(p *v2.Plugin) + +// WithSwarmService is a CreateOpt that flags the passed in a plugin as a plugin +// managed by swarm +func WithSwarmService(id string) CreateOpt { + return func(p *v2.Plugin) { + p.SwarmServiceID = id + } +} diff --git a/fn/vendor/github.com/docker/docker/plugin/events.go b/fn/vendor/github.com/docker/docker/plugin/events.go new file mode 100644 index 000000000..92e603850 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/plugin/events.go @@ -0,0 +1,111 @@ +package plugin + +import ( + "fmt" + "reflect" + + "github.com/docker/docker/api/types" +) + +// Event is emitted for actions performed on the plugin manager +type Event interface { + matches(Event) bool +} + +// EventCreate is an event which is emitted when a plugin is created +// This is either by pull or create from context. +// +// Use the `Interfaces` field to match only plugins that implement a specific +// interface. +// These are matched against using "or" logic. +// If no interfaces are listed, all are matched. +type EventCreate struct { + Interfaces map[string]bool + Plugin types.Plugin +} + +func (e EventCreate) matches(observed Event) bool { + oe, ok := observed.(EventCreate) + if !ok { + return false + } + if len(e.Interfaces) == 0 { + return true + } + + var ifaceMatch bool + for _, in := range oe.Plugin.Config.Interface.Types { + if e.Interfaces[in.Capability] { + ifaceMatch = true + break + } + } + return ifaceMatch +} + +// EventRemove is an event which is emitted when a plugin is removed +// It maches on the passed in plugin's ID only. +type EventRemove struct { + Plugin types.Plugin +} + +func (e EventRemove) matches(observed Event) bool { + oe, ok := observed.(EventRemove) + if !ok { + return false + } + return e.Plugin.ID == oe.Plugin.ID +} + +// EventDisable is an event that is emitted when a plugin is disabled +// It maches on the passed in plugin's ID only. +type EventDisable struct { + Plugin types.Plugin +} + +func (e EventDisable) matches(observed Event) bool { + oe, ok := observed.(EventDisable) + if !ok { + return false + } + return e.Plugin.ID == oe.Plugin.ID +} + +// EventEnable is an event that is emitted when a plugin is disabled +// It maches on the passed in plugin's ID only. +type EventEnable struct { + Plugin types.Plugin +} + +func (e EventEnable) matches(observed Event) bool { + oe, ok := observed.(EventEnable) + if !ok { + return false + } + return e.Plugin.ID == oe.Plugin.ID +} + +// SubscribeEvents provides an event channel to listen for structured events from +// the plugin manager actions, CRUD operations. +// The caller must call the returned `cancel()` function once done with the channel +// or this will leak resources. +func (pm *Manager) SubscribeEvents(buffer int, watchEvents ...Event) (eventCh <-chan interface{}, cancel func()) { + topic := func(i interface{}) bool { + observed, ok := i.(Event) + if !ok { + panic(fmt.Sprintf("unexpected type passed to event channel: %v", reflect.TypeOf(i))) + } + for _, e := range watchEvents { + if e.matches(observed) { + return true + } + } + // If no specific events are specified always assume a matched event + // If some events were specified and none matched above, then the event + // doesn't match + return watchEvents == nil + } + ch := pm.publisher.SubscribeTopicWithBuffer(topic, buffer) + cancelFunc := func() { pm.publisher.Evict(ch) } + return ch, cancelFunc +} diff --git a/fn/vendor/github.com/docker/docker/plugin/manager.go b/fn/vendor/github.com/docker/docker/plugin/manager.go index f1c5788a9..fada0d667 100644 --- a/fn/vendor/github.com/docker/docker/plugin/manager.go +++ b/fn/vendor/github.com/docker/docker/plugin/manager.go @@ -8,6 +8,7 @@ import ( "path/filepath" "reflect" "regexp" + "runtime" "sort" "strings" "sync" @@ -21,6 +22,8 @@ import ( "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/pubsub" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin/v2" "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" @@ -61,6 +64,7 @@ type Manager struct { cMap map[*v2.Plugin]*controller containerdClient libcontainerd.Client blobStore *basicBlobStore + publisher *pubsub.Publisher } // controller represents the manager's control on a plugin. @@ -115,6 +119,8 @@ func NewManager(config ManagerConfig) (*Manager, error) { if err := manager.reload(); err != nil { return nil, errors.Wrap(err, "failed to restore plugins") } + + manager.publisher = pubsub.NewPublisher(0, 0) return manager, nil } @@ -161,6 +167,19 @@ func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { return nil } +func handleLoadError(err error, id string) { + if err == nil { + return + } + logger := logrus.WithError(err).WithField("id", id) + if os.IsNotExist(errors.Cause(err)) { + // Likely some error while removing on an older version of docker + logger.Warn("missing plugin config, skipping: this may be caused due to a failed remove and requires manual cleanup.") + return + } + logger.Error("error loading plugin, skipping") +} + func (pm *Manager) reload() error { // todo: restore dir, err := ioutil.ReadDir(pm.config.Root) if err != nil { @@ -171,9 +190,17 @@ func (pm *Manager) reload() error { // todo: restore if validFullID.MatchString(v.Name()) { p, err := pm.loadPlugin(v.Name()) if err != nil { - return err + handleLoadError(err, v.Name()) + continue } plugins[p.GetID()] = p + } else { + if validFullID.MatchString(strings.TrimSuffix(v.Name(), "-removing")) { + // There was likely some error while removing this plugin, let's try to remove again here + if err := system.EnsureRemoveAll(v.Name()); err != nil { + logrus.WithError(err).WithField("id", v.Name()).Warn("error while attempting to clean up previously removed plugin") + } + } } } @@ -245,6 +272,11 @@ func (pm *Manager) reload() error { // todo: restore return nil } +// Get looks up the requested plugin in the store. +func (pm *Manager) Get(idOrName string) (*v2.Plugin, error) { + return pm.config.Store.GetV2Plugin(idOrName) +} + func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) { p := filepath.Join(pm.config.Root, id, configFileName) dt, err := ioutil.ReadFile(p) @@ -269,7 +301,7 @@ func (pm *Manager) save(p *v2.Plugin) error { return nil } -// GC cleans up unrefrenced blobs. This is recommended to run in a goroutine +// GC cleans up unreferenced blobs. This is recommended to run in a goroutine func (pm *Manager) GC() { pm.muGC.Lock() defer pm.muGC.Unlock() @@ -348,17 +380,22 @@ func isEqualPrivilege(a, b types.PluginPrivilege) bool { return reflect.DeepEqual(a.Value, b.Value) } -func configToRootFS(c []byte) (*image.RootFS, error) { +func configToRootFS(c []byte) (*image.RootFS, layer.Platform, error) { + // TODO @jhowardmsft LCOW - Will need to revisit this. For now, calculate the platform. + platform := layer.Platform(runtime.GOOS) + if system.LCOWSupported() { + platform = "linux" + } var pluginConfig types.PluginConfig if err := json.Unmarshal(c, &pluginConfig); err != nil { - return nil, err + return nil, "", err } // validation for empty rootfs is in distribution code if pluginConfig.Rootfs == nil { - return nil, nil + return nil, platform, nil } - return rootFSFromPlugin(pluginConfig.Rootfs), nil + return rootFSFromPlugin(pluginConfig.Rootfs), platform, nil } func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS { diff --git a/fn/vendor/github.com/docker/docker/plugin/manager_linux.go b/fn/vendor/github.com/docker/docker/plugin/manager_linux.go index 80fc04162..7f79e6900 100644 --- a/fn/vendor/github.com/docker/docker/plugin/manager_linux.go +++ b/fn/vendor/github.com/docker/docker/plugin/manager_linux.go @@ -15,6 +15,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/initlayer" "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/plugins" "github.com/docker/docker/pkg/stringid" @@ -58,7 +59,7 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { } } - if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), 0, 0); err != nil { + if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), idtools.IDPair{0, 0}); err != nil { return errors.WithStack(err) } @@ -79,7 +80,7 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { sockAddr := filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()) - client, err := plugins.NewClientWithTimeout("unix://"+sockAddr, nil, c.timeoutInSecs) + client, err := plugins.NewClientWithTimeout("unix://"+sockAddr, nil, time.Duration(c.timeoutInSecs)*time.Second) if err != nil { c.restart = false shutdownPlugin(p, c, pm.containerdClient) @@ -203,7 +204,7 @@ func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobs // Make sure nothing is mounted // This could happen if the plugin was disabled with `-f` with active mounts. // If there is anything in `orig` is still mounted, this should error out. - if err := recursiveUnmount(orig); err != nil { + if err := mount.RecursiveUnmount(orig); err != nil { return err } @@ -273,7 +274,7 @@ func (pm *Manager) setupNewPlugin(configDigest digest.Digest, blobsums []digest. } // createPlugin creates a new plugin. take lock before calling. -func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges) (p *v2.Plugin, err error) { +func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges, opts ...CreateOpt) (p *v2.Plugin, err error) { if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store return nil, err } @@ -293,6 +294,9 @@ func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsum Blobsums: blobsums, } p.InitEmptySettings() + for _, o := range opts { + o(p) + } pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) if err := os.MkdirAll(pdir, 0700); err != nil { diff --git a/fn/vendor/github.com/docker/docker/plugin/store.go b/fn/vendor/github.com/docker/docker/plugin/store.go index 244522e10..7f6e954bf 100644 --- a/fn/vendor/github.com/docker/docker/plugin/store.go +++ b/fn/vendor/github.com/docker/docker/plugin/store.go @@ -29,13 +29,20 @@ type ErrNotFound string func (name ErrNotFound) Error() string { return fmt.Sprintf("plugin %q not found", string(name)) } -// ErrAmbiguous indicates that a plugin was not found locally. +// ErrAmbiguous indicates that more than one plugin was found type ErrAmbiguous string func (name ErrAmbiguous) Error() string { return fmt.Sprintf("multiple plugins found for %q", string(name)) } +// ErrDisabled indicates that a plugin was found but it is disabled +type ErrDisabled string + +func (name ErrDisabled) Error() string { + return fmt.Sprintf("plugin %s found but disabled", string(name)) +} + // GetV2Plugin retrieves a plugin by name, id or partial ID. func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { ps.RLock() @@ -138,7 +145,7 @@ func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlug } // Plugin was found but it is disabled, so we should not fall back to legacy plugins // but we should error out right away - return nil, ErrNotFound(name) + return nil, ErrDisabled(name) } if _, ok := errors.Cause(err).(ErrNotFound); !ok { return nil, err diff --git a/fn/vendor/github.com/docker/docker/plugin/v2/plugin.go b/fn/vendor/github.com/docker/docker/plugin/v2/plugin.go index 74ff64080..b77536c98 100644 --- a/fn/vendor/github.com/docker/docker/plugin/v2/plugin.go +++ b/fn/vendor/github.com/docker/docker/plugin/v2/plugin.go @@ -22,6 +22,8 @@ type Plugin struct { Config digest.Digest Blobsums []digest.Digest + + SwarmServiceID string } const defaultPluginRuntimeDestination = "/run/docker/plugins" diff --git a/fn/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/fn/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go index 6da63b3b6..9cae180e3 100644 --- a/fn/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go +++ b/fn/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go @@ -5,6 +5,7 @@ package v2 import ( "os" "path/filepath" + "runtime" "strings" "github.com/docker/docker/api/types" @@ -42,7 +43,7 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { if p.PluginObj.Config.Network.Type != "" { // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) if p.PluginObj.Config.Network.Type == "host" { - oci.RemoveNamespace(&s, specs.NamespaceType("network")) + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("network")) } etcHosts := "/etc/hosts" resolvConf := "/etc/resolv.conf" @@ -61,11 +62,11 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { }) } if p.PluginObj.Config.PidHost { - oci.RemoveNamespace(&s, specs.NamespaceType("pid")) + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("pid")) } if p.PluginObj.Config.IpcHost { - oci.RemoveNamespace(&s, specs.NamespaceType("ipc")) + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("ipc")) } for _, mnt := range mounts { @@ -95,8 +96,7 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { } if p.PluginObj.Config.Linux.AllowAllDevices { - rwm := "rwm" - s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}} + s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{{Allow: true, Access: "rwm"}} } for _, dev := range p.PluginObj.Settings.Devices { path := *dev.Path @@ -109,7 +109,7 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { } envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) - envs[0] = "PATH=" + system.DefaultPathEnv + envs[0] = "PATH=" + system.DefaultPathEnv(runtime.GOOS) envs = append(envs, p.PluginObj.Settings.Env...) args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) @@ -122,7 +122,11 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { s.Process.Cwd = cwd s.Process.Env = envs - s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...) + caps := s.Process.Capabilities + caps.Bounding = append(caps.Bounding, p.PluginObj.Config.Linux.Capabilities...) + caps.Permitted = append(caps.Permitted, p.PluginObj.Config.Linux.Capabilities...) + caps.Inheritable = append(caps.Inheritable, p.PluginObj.Config.Linux.Capabilities...) + caps.Effective = append(caps.Effective, p.PluginObj.Config.Linux.Capabilities...) return &s, nil } diff --git a/fn/vendor/github.com/docker/docker/plugin/v2/settable_test.go b/fn/vendor/github.com/docker/docker/plugin/v2/settable_test.go index 7183f3a67..1094c472b 100644 --- a/fn/vendor/github.com/docker/docker/plugin/v2/settable_test.go +++ b/fn/vendor/github.com/docker/docker/plugin/v2/settable_test.go @@ -68,7 +68,7 @@ func TestIsSettable(t *testing.T) { } } -func TestUpdateSettinsEnv(t *testing.T) { +func TestUpdateSettingsEnv(t *testing.T) { contexts := []struct { env []string set settable diff --git a/fn/vendor/github.com/docker/docker/poule.yml b/fn/vendor/github.com/docker/docker/poule.yml index 07222931b..2abf0df7f 100644 --- a/fn/vendor/github.com/docker/docker/poule.yml +++ b/fn/vendor/github.com/docker/docker/poule.yml @@ -35,12 +35,6 @@ } - type: version-label -# When a pull request is closed, attach it to the currently active milestone. -- triggers: - pull_request: [ closed ] - operations: - - type: version-milestone - # Labeling a PR with `rebuild/` triggers a rebuild job for the associated # configuration. The label is automatically removed after the rebuild is initiated. There's no such # thing as "templating" in this configuration, so we need one operation for each type of @@ -114,7 +108,6 @@ "aaronlehmann", "akihirosuda", "aluzzardi", - "anusha-ragunathan", "coolljt0725", "cpuguy83", "crosbymichael", diff --git a/fn/vendor/github.com/docker/docker/profiles/seccomp/default.json b/fn/vendor/github.com/docker/docker/profiles/seccomp/default.json index 01ff1a4c5..b71a8718a 100755 --- a/fn/vendor/github.com/docker/docker/profiles/seccomp/default.json +++ b/fn/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -55,6 +55,7 @@ "accept", "accept4", "access", + "adjtimex", "alarm", "alarm", "bind", @@ -314,6 +315,8 @@ "signalfd", "signalfd4", "sigreturn", + "socket", + "socketcall", "socketpair", "splice", "stat", @@ -398,6 +401,40 @@ "includes": {}, "excludes": {} }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131072, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131080, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, { "names": [ "personality" @@ -415,223 +452,6 @@ "includes": {}, "excludes": {} }, - { - "names": [ - "socket" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 1, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "socket" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 2, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "socket" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 10, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "socket" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 16, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "socket" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 17, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "socketcall" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 1, - "valueTwo": 0, - "op": "SCMP_CMP_GT" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "socketcall" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 1, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - }, - { - "index": 1, - "value": 1, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "socketcall" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 1, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - }, - { - "index": 1, - "value": 2, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "socketcall" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 1, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - }, - { - "index": 1, - "value": 10, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "socketcall" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 1, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - }, - { - "index": 1, - "value": 16, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "socketcall" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 1, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - }, - { - "index": 1, - "value": 17, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, { "names": [ "sync_file_range2" @@ -900,7 +720,6 @@ "names": [ "settimeofday", "stime", - "adjtimex", "clock_settime" ], "action": "SCMP_ACT_ALLOW", diff --git a/fn/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/fn/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go index da5ddc3e2..90a385948 100644 --- a/fn/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go +++ b/fn/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go @@ -16,12 +16,12 @@ import ( //go:generate go run -tags 'seccomp' generate.go // GetDefaultProfile returns the default seccomp profile. -func GetDefaultProfile(rs *specs.Spec) (*specs.Seccomp, error) { +func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { return setupSeccomp(DefaultProfile(), rs) } // LoadProfile takes a json string and decodes the seccomp profile. -func LoadProfile(body string, rs *specs.Spec) (*specs.Seccomp, error) { +func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { var config types.Seccomp if err := json.Unmarshal([]byte(body), &config); err != nil { return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) @@ -39,7 +39,7 @@ var nativeToSeccomp = map[string]types.Arch{ "s390x": types.ArchS390X, } -func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.Seccomp, error) { +func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) { if config == nil { return nil, nil } @@ -49,7 +49,7 @@ func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.Seccomp, error) return nil, nil } - newConfig := &specs.Seccomp{} + newConfig := &specs.LinuxSeccomp{} var arch string var native, err = libseccomp.GetNativeArch() @@ -83,7 +83,7 @@ func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.Seccomp, error) } } - newConfig.DefaultAction = specs.Action(config.DefaultAction) + newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction) Loop: // Loop through all syscall blocks and convert them to libcontainer format after filtering them @@ -95,7 +95,7 @@ Loop: } if len(call.Excludes.Caps) > 0 { for _, c := range call.Excludes.Caps { - if stringutils.InSlice(rs.Process.Capabilities, c) { + if stringutils.InSlice(rs.Process.Capabilities.Effective, c) { continue Loop } } @@ -107,7 +107,7 @@ Loop: } if len(call.Includes.Caps) > 0 { for _, c := range call.Includes.Caps { - if !stringutils.InSlice(rs.Process.Capabilities, c) { + if !stringutils.InSlice(rs.Process.Capabilities.Effective, c) { continue Loop } } @@ -129,19 +129,19 @@ Loop: return newConfig, nil } -func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.Syscall { - newCall := specs.Syscall{ - Name: name, - Action: specs.Action(action), +func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.LinuxSyscall { + newCall := specs.LinuxSyscall{ + Names: []string{name}, + Action: specs.LinuxSeccompAction(action), } // Loop through all the arguments of the syscall and convert them for _, arg := range args { - newArg := specs.Arg{ + newArg := specs.LinuxSeccompArg{ Index: arg.Index, Value: arg.Value, ValueTwo: arg.ValueTwo, - Op: specs.Operator(arg.Op), + Op: specs.LinuxSeccompOperator(arg.Op), } newCall.Args = append(newCall.Args, newArg) diff --git a/fn/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/fn/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go index 42bc3baa4..833dcd5a8 100644 --- a/fn/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go +++ b/fn/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go @@ -49,6 +49,7 @@ func DefaultProfile() *types.Seccomp { "accept", "accept4", "access", + "adjtimex", "alarm", "alarm", "bind", @@ -308,6 +309,8 @@ func DefaultProfile() *types.Seccomp { "signalfd", "signalfd4", "sigreturn", + "socket", + "socketcall", "socketpair", "splice", "stat", @@ -377,6 +380,28 @@ func DefaultProfile() *types.Seccomp { }, }, }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x20000, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x20008, + Op: types.OpEqualTo, + }, + }, + }, { Names: []string{"personality"}, Action: types.ActAllow, @@ -388,153 +413,6 @@ func DefaultProfile() *types.Seccomp { }, }, }, - { - Names: []string{"socket"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: syscall.AF_UNIX, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"socket"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: syscall.AF_INET, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"socket"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: syscall.AF_INET6, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"socket"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: syscall.AF_NETLINK, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"socket"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: syscall.AF_PACKET, - Op: types.OpEqualTo, - }, - }, - }, - // socketcall(1, ...) is equivalent to socket(...) on some architectures eg i386 - { - Names: []string{"socketcall"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 1, - Op: types.OpGreaterThan, - }, - }, - }, - { - Names: []string{"socketcall"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 1, - Op: types.OpEqualTo, - }, - { - Index: 1, - Value: syscall.AF_UNIX, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"socketcall"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 1, - Op: types.OpEqualTo, - }, - { - Index: 1, - Value: syscall.AF_INET, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"socketcall"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 1, - Op: types.OpEqualTo, - }, - { - Index: 1, - Value: syscall.AF_INET6, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"socketcall"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 1, - Op: types.OpEqualTo, - }, - { - Index: 1, - Value: syscall.AF_NETLINK, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"socketcall"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 1, - Op: types.OpEqualTo, - }, - { - Index: 1, - Value: syscall.AF_PACKET, - Op: types.OpEqualTo, - }, - }, - }, { Names: []string{ "sync_file_range2", @@ -734,7 +612,6 @@ func DefaultProfile() *types.Seccomp { Names: []string{ "settimeofday", "stime", - "adjtimex", "clock_settime", }, Action: types.ActAllow, diff --git a/fn/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md b/fn/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md index 84848cae2..5c73b5826 100644 --- a/fn/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md +++ b/fn/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md @@ -312,16 +312,17 @@ echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERS echo "Windows 64bit client binary: https://test.docker.com/builds/Windows/x86_64/docker-${VERSION#v}.exe" echo "Windows 32bit client binary: https://test.docker.com/builds/Windows/i386/docker-${VERSION#v}.exe" ``` +### 13. Announce the release candidate -We recommend announcing the release candidate on: +The release candidate should be announced on: - IRC on #docker, #docker-dev, #docker-maintainers - In a comment on the pull request to notify subscribed people on GitHub - The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group - The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group -- Any social media that can bring some attention to the release candidate +- (Optional) Any social media that can bring some attention to the release candidate -### 13. Iterate on successive release candidates +### 14. Iterate on successive release candidates Spend several days along with the community explicitly investing time and resources to try and break Docker in every possible way, documenting any @@ -368,10 +369,10 @@ git commit --amend git push -f $GITHUBUSER bump_$VERSION ``` -Repeat step 6 to tag the code, publish new binaries, announce availability, and +Repeat steps 6 to 14 to tag the code, publish new binaries, announce availability, and get help testing. -### 14. Finalize the bump branch +### 15. Finalize the bump branch When you're happy with the quality of a release candidate, you can move on and create the real thing. @@ -387,9 +388,9 @@ git commit --amend You will then repeat step 6 to publish the binaries to test -### 15. Get 2 other maintainers to validate the pull request +### 16. Get 2 other maintainers to validate the pull request -### 16. Build final rpms and debs +### 17. Build final rpms and debs ```bash docker build -t docker . @@ -400,7 +401,7 @@ docker run \ hack/make.sh binary build-deb build-rpm ``` -### 17. Publish final rpms and debs +### 18. Publish final rpms and debs With the rpms and debs you built from the last step you can release them on the same server, or ideally, move them to a dedicated release box via scp into @@ -424,14 +425,14 @@ docker run --rm -it --privileged \ hack/make.sh release-deb release-rpm sign-repos generate-index-listing ``` -### 18. Upload the changed repos to wherever you host +### 19. Upload the changed repos to wherever you host For example, above we bind mounted `/volumes/repos` as the storage for `DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with a s3 bucket for the yum repo. -### 19. Publish final binaries +### 20. Publish final binaries Once they're tested and reasonably believed to be working, run against get.docker.com: @@ -449,9 +450,9 @@ docker run \ hack/release.sh ``` -### 20. Purge the cache! +### 21. Purge the cache! -### 21. Apply tag and create release +### 22. Apply tag and create release It's very important that we don't make the tag until after the official release is uploaded to get.docker.com! @@ -470,12 +471,12 @@ You can see examples in this two links: https://github.com/docker/docker/releases/tag/v1.8.0 https://github.com/docker/docker/releases/tag/v1.8.0-rc3 -### 22. Go to github to merge the `bump_$VERSION` branch into release +### 23. Go to github to merge the `bump_$VERSION` branch into release Don't forget to push that pretty blue button to delete the leftover branch afterwards! -### 23. Update the docs branch +### 24. Update the docs branch You will need to point the docs branch to the newly created release tag: @@ -494,7 +495,7 @@ distributed CDN system) is flushed. The `make docs-release` command will do this _if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run and you can check its progress with the CDN Cloudfront Chrome addon. -### 24. Create a new pull request to merge your bump commit back into master +### 25. Create a new pull request to merge your bump commit back into master ```bash git checkout master @@ -508,7 +509,7 @@ echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER: Again, get two maintainers to validate, then merge, then push that pretty blue button to delete your branch. -### 25. Rejoice and Evangelize! +### 26. Rejoice and Evangelize! Congratulations! You're done. diff --git a/fn/vendor/github.com/docker/docker/reference/store.go b/fn/vendor/github.com/docker/docker/reference/store.go index 8466e6e2c..5b68c437c 100644 --- a/fn/vendor/github.com/docker/docker/reference/store.go +++ b/fn/vendor/github.com/docker/docker/reference/store.go @@ -46,6 +46,9 @@ type store struct { // referencesByIDCache is a cache of references indexed by ID, to speed // up References. referencesByIDCache map[digest.Digest]map[string]reference.Named + // platform is the container target platform for this store (which may be + // different to the host operating system + platform string } // Repository maps tags to digests. The key is a stringified Reference, @@ -70,7 +73,7 @@ func (a lexicalAssociations) Less(i, j int) bool { // NewReferenceStore creates a new reference store, tied to a file path where // the set of references are serialized in JSON format. -func NewReferenceStore(jsonPath string) (Store, error) { +func NewReferenceStore(jsonPath, platform string) (Store, error) { abspath, err := filepath.Abs(jsonPath) if err != nil { return nil, err @@ -80,6 +83,7 @@ func NewReferenceStore(jsonPath string) (Store, error) { jsonPath: abspath, Repositories: make(map[string]repository), referencesByIDCache: make(map[digest.Digest]map[string]reference.Named), + platform: platform, } // Load the json file if it exists, otherwise create it. if err := store.reload(); os.IsNotExist(err) { @@ -217,7 +221,7 @@ func (store *store) Delete(ref reference.Named) (bool, error) { func (store *store) Get(ref reference.Named) (digest.Digest, error) { if canonical, ok := ref.(reference.Canonical); ok { // If reference contains both tag and digest, only - // lookup by digest as it takes precendent over + // lookup by digest as it takes precedence over // tag, until tag/digest combos are stored. if _, ok := ref.(reference.Tagged); ok { var err error diff --git a/fn/vendor/github.com/docker/docker/reference/store_test.go b/fn/vendor/github.com/docker/docker/reference/store_test.go index 8f0ff6304..2c796e76f 100644 --- a/fn/vendor/github.com/docker/docker/reference/store_test.go +++ b/fn/vendor/github.com/docker/docker/reference/store_test.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "os" "path/filepath" + "runtime" "strings" "testing" @@ -40,7 +41,7 @@ func TestLoad(t *testing.T) { } jsonFile.Close() - store, err := NewReferenceStore(jsonFile.Name()) + store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) if err != nil { t.Fatalf("error creating tag store: %v", err) } @@ -69,7 +70,7 @@ func TestSave(t *testing.T) { jsonFile.Close() defer os.RemoveAll(jsonFile.Name()) - store, err := NewReferenceStore(jsonFile.Name()) + store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) if err != nil { t.Fatalf("error creating tag store: %v", err) } @@ -111,7 +112,7 @@ func TestAddDeleteGet(t *testing.T) { jsonFile.Close() defer os.RemoveAll(jsonFile.Name()) - store, err := NewReferenceStore(jsonFile.Name()) + store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) if err != nil { t.Fatalf("error creating tag store: %v", err) } @@ -328,7 +329,7 @@ func TestInvalidTags(t *testing.T) { tmpDir, err := ioutil.TempDir("", "tag-store-test") defer os.RemoveAll(tmpDir) - store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) + store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json"), runtime.GOOS) if err != nil { t.Fatalf("error creating tag store: %v", err) } diff --git a/fn/vendor/github.com/docker/docker/registry/config.go b/fn/vendor/github.com/docker/docker/registry/config.go index 7b6703f57..182599e38 100644 --- a/fn/vendor/github.com/docker/docker/registry/config.go +++ b/fn/vendor/github.com/docker/docker/registry/config.go @@ -18,8 +18,9 @@ import ( // ServiceOptions holds command line options. type ServiceOptions struct { - Mirrors []string `json:"registry-mirrors,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` + AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` + Mirrors []string `json:"registry-mirrors,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` // V2Only controls access to legacy registries. If it is set to true via the // command line flag the daemon will not attempt to contact v1 legacy registries @@ -74,9 +75,11 @@ var lookupIP = net.LookupIP // InstallCliFlags adds command-line options to the top-level flag parser for // the current process. func (options *ServiceOptions) InstallCliFlags(flags *pflag.FlagSet) { + ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, ValidateIndexName) mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) + flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") @@ -95,12 +98,50 @@ func newServiceConfig(options ServiceOptions) *serviceConfig { V2Only: options.V2Only, } + config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts) config.LoadMirrors(options.Mirrors) config.LoadInsecureRegistries(options.InsecureRegistries) return config } +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. +func (config *serviceConfig) LoadAllowNondistributableArtifacts(registries []string) error { + cidrs := map[string]*registrytypes.NetIPNet{} + hostnames := map[string]bool{} + + for _, r := range registries { + if _, err := ValidateIndexName(r); err != nil { + return err + } + if validateNoScheme(r) != nil { + return fmt.Errorf("allow-nondistributable-artifacts registry %s should not contain '://'", r) + } + + if _, ipnet, err := net.ParseCIDR(r); err == nil { + // Valid CIDR. + cidrs[ipnet.String()] = (*registrytypes.NetIPNet)(ipnet) + } else if err := validateHostPort(r); err == nil { + // Must be `host:port` if not CIDR. + hostnames[r] = true + } else { + return fmt.Errorf("allow-nondistributable-artifacts registry %s is not valid: %v", r, err) + } + } + + config.AllowNondistributableArtifactsCIDRs = make([]*(registrytypes.NetIPNet), 0) + for _, c := range cidrs { + config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) + } + + config.AllowNondistributableArtifactsHostnames = make([]string, 0) + for h := range hostnames { + config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) + } + + return nil +} + // LoadMirrors loads mirrors to config, after removing duplicates. // Returns an error if mirrors contains an invalid mirror. func (config *serviceConfig) LoadMirrors(mirrors []string) error { @@ -211,6 +252,25 @@ skip: return nil } +// allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries +// that allow push of nondistributable artifacts. +// +// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP +// of the registry specified by hostname, true is returned. +// +// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If +// resolution fails, CIDR matching is not performed. +func allowNondistributableArtifacts(config *serviceConfig, hostname string) bool { + for _, h := range config.AllowNondistributableArtifactsHostnames { + if h == hostname { + return true + } + } + + return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) +} + // isSecureIndex returns false if the provided indexName is part of the list of insecure registries // Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. // @@ -229,10 +289,17 @@ func isSecureIndex(config *serviceConfig, indexName string) bool { return index.Secure } - host, _, err := net.SplitHostPort(indexName) + return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) +} + +// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) +// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be +// resolved to IP addresses for matching. If resolution fails, false is returned. +func isCIDRMatch(cidrs []*registrytypes.NetIPNet, URLHost string) bool { + host, _, err := net.SplitHostPort(URLHost) if err != nil { - // assume indexName is of the form `host` without the port and go on. - host = indexName + // Assume URLHost is of the form `host` without the port and go on. + host = URLHost } addrs, err := lookupIP(host) @@ -249,15 +316,15 @@ func isSecureIndex(config *serviceConfig, indexName string) bool { // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. for _, addr := range addrs { - for _, ipnet := range config.InsecureRegistryCIDRs { + for _, ipnet := range cidrs { // check if the addr falls in the subnet if (*net.IPNet)(ipnet).Contains(addr) { - return false + return true } } } - return true + return false } // ValidateMirror validates an HTTP(S) registry mirror diff --git a/fn/vendor/github.com/docker/docker/registry/config_test.go b/fn/vendor/github.com/docker/docker/registry/config_test.go index b57e515b9..8cb7e5a54 100644 --- a/fn/vendor/github.com/docker/docker/registry/config_test.go +++ b/fn/vendor/github.com/docker/docker/registry/config_test.go @@ -1,10 +1,129 @@ package registry import ( + "reflect" + "sort" "strings" "testing" ) +func TestLoadAllowNondistributableArtifacts(t *testing.T) { + testCases := []struct { + registries []string + cidrStrs []string + hostnames []string + err string + }{ + { + registries: []string{"1.2.3.0/24"}, + cidrStrs: []string{"1.2.3.0/24"}, + }, + { + registries: []string{"2001:db8::/120"}, + cidrStrs: []string{"2001:db8::/120"}, + }, + { + registries: []string{"127.0.0.1"}, + hostnames: []string{"127.0.0.1"}, + }, + { + registries: []string{"127.0.0.1:8080"}, + hostnames: []string{"127.0.0.1:8080"}, + }, + { + registries: []string{"2001:db8::1"}, + hostnames: []string{"2001:db8::1"}, + }, + { + registries: []string{"[2001:db8::1]:80"}, + hostnames: []string{"[2001:db8::1]:80"}, + }, + { + registries: []string{"[2001:db8::1]:80"}, + hostnames: []string{"[2001:db8::1]:80"}, + }, + { + registries: []string{"1.2.3.0/24", "2001:db8::/120", "127.0.0.1", "127.0.0.1:8080"}, + cidrStrs: []string{"1.2.3.0/24", "2001:db8::/120"}, + hostnames: []string{"127.0.0.1", "127.0.0.1:8080"}, + }, + + { + registries: []string{"http://mytest.com"}, + err: "allow-nondistributable-artifacts registry http://mytest.com should not contain '://'", + }, + { + registries: []string{"https://mytest.com"}, + err: "allow-nondistributable-artifacts registry https://mytest.com should not contain '://'", + }, + { + registries: []string{"HTTP://mytest.com"}, + err: "allow-nondistributable-artifacts registry HTTP://mytest.com should not contain '://'", + }, + { + registries: []string{"svn://mytest.com"}, + err: "allow-nondistributable-artifacts registry svn://mytest.com should not contain '://'", + }, + { + registries: []string{"-invalid-registry"}, + err: "Cannot begin or end with a hyphen", + }, + { + registries: []string{`mytest-.com`}, + err: `allow-nondistributable-artifacts registry mytest-.com is not valid: invalid host "mytest-.com"`, + }, + { + registries: []string{`1200:0000:AB00:1234:0000:2552:7777:1313:8080`}, + err: `allow-nondistributable-artifacts registry 1200:0000:AB00:1234:0000:2552:7777:1313:8080 is not valid: invalid host "1200:0000:AB00:1234:0000:2552:7777:1313:8080"`, + }, + { + registries: []string{`mytest.com:500000`}, + err: `allow-nondistributable-artifacts registry mytest.com:500000 is not valid: invalid port "500000"`, + }, + { + registries: []string{`"mytest.com"`}, + err: `allow-nondistributable-artifacts registry "mytest.com" is not valid: invalid host "\"mytest.com\""`, + }, + { + registries: []string{`"mytest.com:5000"`}, + err: `allow-nondistributable-artifacts registry "mytest.com:5000" is not valid: invalid host "\"mytest.com"`, + }, + } + for _, testCase := range testCases { + config := newServiceConfig(ServiceOptions{}) + err := config.LoadAllowNondistributableArtifacts(testCase.registries) + if testCase.err == "" { + if err != nil { + t.Fatalf("expect no error, got '%s'", err) + } + + cidrStrs := []string{} + for _, c := range config.AllowNondistributableArtifactsCIDRs { + cidrStrs = append(cidrStrs, c.String()) + } + + sort.Strings(testCase.cidrStrs) + sort.Strings(cidrStrs) + if (len(testCase.cidrStrs) > 0 || len(cidrStrs) > 0) && !reflect.DeepEqual(testCase.cidrStrs, cidrStrs) { + t.Fatalf("expect AllowNondistributableArtifactsCIDRs to be '%+v', got '%+v'", testCase.cidrStrs, cidrStrs) + } + + sort.Strings(testCase.hostnames) + sort.Strings(config.AllowNondistributableArtifactsHostnames) + if (len(testCase.hostnames) > 0 || len(config.AllowNondistributableArtifactsHostnames) > 0) && !reflect.DeepEqual(testCase.hostnames, config.AllowNondistributableArtifactsHostnames) { + t.Fatalf("expect AllowNondistributableArtifactsHostnames to be '%+v', got '%+v'", testCase.hostnames, config.AllowNondistributableArtifactsHostnames) + } + } else { + if err == nil { + t.Fatalf("expect error '%s', got no error", testCase.err) + } + if !strings.Contains(err.Error(), testCase.err) { + t.Fatalf("expect error '%s', got '%s'", testCase.err, err) + } + } + } +} + func TestValidateMirror(t *testing.T) { valid := []string{ "http://mirror-1.com", diff --git a/fn/vendor/github.com/docker/docker/registry/config_unix.go b/fn/vendor/github.com/docker/docker/registry/config_unix.go index d692e8ef5..fdc39a1d6 100644 --- a/fn/vendor/github.com/docker/docker/registry/config_unix.go +++ b/fn/vendor/github.com/docker/docker/registry/config_unix.go @@ -21,5 +21,5 @@ func cleanPath(s string) string { // installCliPlatformFlags handles any platform specific flags for the service. func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { - flags.BoolVar(&options.V2Only, "disable-legacy-registry", false, "Disable contacting legacy registries") + flags.BoolVar(&options.V2Only, "disable-legacy-registry", true, "Disable contacting legacy registries") } diff --git a/fn/vendor/github.com/docker/docker/registry/endpoint_v1.go b/fn/vendor/github.com/docker/docker/registry/endpoint_v1.go index 6bcf8c935..c5ca961dd 100644 --- a/fn/vendor/github.com/docker/docker/registry/endpoint_v1.go +++ b/fn/vendor/github.com/docker/docker/registry/endpoint_v1.go @@ -175,7 +175,7 @@ func (e *V1Endpoint) Ping() (PingResult, error) { Standalone: true, } if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) + logrus.Debugf("Error unmarshaling the _ping PingResult: %s", err) // don't stop here. Just assume sane defaults } if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { diff --git a/fn/vendor/github.com/docker/docker/registry/registry_test.go b/fn/vendor/github.com/docker/docker/registry/registry_test.go index 1cbaaf4d4..d89c46c2c 100644 --- a/fn/vendor/github.com/docker/docker/registry/registry_test.go +++ b/fn/vendor/github.com/docker/docker/registry/registry_test.go @@ -811,6 +811,48 @@ func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { } } +func TestAllowNondistributableArtifacts(t *testing.T) { + tests := []struct { + addr string + registries []string + expected bool + }{ + {IndexName, nil, false}, + {"example.com", []string{}, false}, + {"example.com", []string{"example.com"}, true}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, true}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, true}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, false}, + {"example.com", []string{"example.com"}, true}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, true}, + {"example.com", []string{"42.42.0.0/16"}, true}, + {"example.com:5000", []string{"42.42.42.42/8"}, true}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, true}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, true}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, false}, + {"invalid.domain.com", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, true}, + } + for _, tt := range tests { + config := newServiceConfig(ServiceOptions{ + AllowNondistributableArtifacts: tt.registries, + }) + if v := allowNondistributableArtifacts(config, tt.addr); v != tt.expected { + t.Errorf("allowNondistributableArtifacts failed for %q %v, expected %v got %v", tt.addr, tt.registries, tt.expected, v) + } + } +} + func TestIsSecureIndex(t *testing.T) { tests := []struct { addr string diff --git a/fn/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/fn/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go similarity index 66% rename from fn/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go rename to fn/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go index de488fb53..5403c7684 100644 --- a/fn/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go +++ b/fn/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go @@ -1,4 +1,4 @@ -package httputils +package resumable import ( "fmt" @@ -9,7 +9,7 @@ import ( "github.com/Sirupsen/logrus" ) -type resumableRequestReader struct { +type requestReader struct { client *http.Client request *http.Request lastRange int64 @@ -20,22 +20,22 @@ type resumableRequestReader struct { waitDuration time.Duration } -// ResumableRequestReader makes it possible to resume reading a request's body transparently +// NewRequestReader makes it possible to resume reading a request's body transparently // maxfail is the number of times we retry to make requests again (not resumes) // totalsize is the total length of the body; auto detect if not provided -func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second} +func NewRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second} } -// ResumableRequestReaderWithInitialResponse makes it possible to resume +// NewRequestReaderWithInitialResponse makes it possible to resume // reading the body of an already initiated request. -func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second} +func NewRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second} } -func (r *resumableRequestReader) Read(p []byte) (n int, err error) { +func (r *requestReader) Read(p []byte) (n int, err error) { if r.client == nil || r.request == nil { - return 0, fmt.Errorf("client and request can't be nil\n") + return 0, fmt.Errorf("client and request can't be nil") } isFreshRequest := false if r.lastRange != 0 && r.currentResponse == nil { @@ -81,14 +81,14 @@ func (r *resumableRequestReader) Read(p []byte) (n int, err error) { return n, err } -func (r *resumableRequestReader) Close() error { +func (r *requestReader) Close() error { r.cleanUpResponse() r.client = nil r.request = nil return nil } -func (r *resumableRequestReader) cleanUpResponse() { +func (r *requestReader) cleanUpResponse() { if r.currentResponse != nil { r.currentResponse.Body.Close() r.currentResponse = nil diff --git a/fn/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go b/fn/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader_test.go similarity index 69% rename from fn/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go rename to fn/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader_test.go index 81cafae57..a632bc673 100644 --- a/fn/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go +++ b/fn/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader_test.go @@ -1,7 +1,9 @@ -package httputils +package resumable import ( "fmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "io" "io/ioutil" "net/http" @@ -21,28 +23,19 @@ func TestResumableRequestHeaderSimpleErrors(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - expectedError := "client and request can't be nil\n" - resreq := &resumableRequestReader{} + resreq := &requestReader{} _, err = resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) - } + assert.EqualError(t, err, "client and request can't be nil") - resreq = &resumableRequestReader{ + resreq = &requestReader{ client: client, request: req, totalSize: -1, } - expectedError = "failed to auto detect content length" _, err = resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) - } - + assert.EqualError(t, err, "failed to auto detect content length") } // Not too much failures, bails out after some wait @@ -51,11 +44,9 @@ func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { var badReq *http.Request badReq, err := http.NewRequest("GET", "I'm not an url", nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - resreq := &resumableRequestReader{ + resreq := &requestReader{ client: client, request: badReq, failures: 0, @@ -63,9 +54,8 @@ func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { waitDuration: 10 * time.Millisecond, } read, err := resreq.Read([]byte{}) - if err != nil || read != 0 { - t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) - } + require.NoError(t, err) + assert.Equal(t, 0, read) } // Too much failures, returns the error @@ -74,11 +64,9 @@ func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { var badReq *http.Request badReq, err := http.NewRequest("GET", "I'm not an url", nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - resreq := &resumableRequestReader{ + resreq := &requestReader{ client: client, request: badReq, failures: 0, @@ -88,9 +76,8 @@ func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` read, err := resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError || read != 0 { - t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) - } + assert.EqualError(t, err, expectedError) + assert.Equal(t, 0, read) } type errorReaderCloser struct{} @@ -105,9 +92,7 @@ func (errorReaderCloser) Read(p []byte) (n int, err error) { func TestResumableRequestReaderWithReadError(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) client := &http.Client{} @@ -119,7 +104,7 @@ func TestResumableRequestReaderWithReadError(t *testing.T) { Body: errorReaderCloser{}, } - resreq := &resumableRequestReader{ + resreq := &requestReader{ client: client, request: req, currentResponse: response, @@ -130,21 +115,15 @@ func TestResumableRequestReaderWithReadError(t *testing.T) { buf := make([]byte, 1) read, err := resreq.Read(buf) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if read != 0 { - t.Fatalf("Expected to have read nothing, but read %v", read) - } + assert.Equal(t, 0, read) } func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) client := &http.Client{} @@ -156,7 +135,7 @@ func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { Body: ioutil.NopCloser(strings.NewReader("")), } - resreq := &resumableRequestReader{ + resreq := &requestReader{ client: client, request: req, currentResponse: response, @@ -167,9 +146,7 @@ func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { buf := make([]byte, 1) _, err = resreq.Read(buf) - if err == nil || err != io.EOF { - t.Fatalf("Expected an io.EOF error, got %v", err) - } + assert.EqualError(t, err, io.EOF.Error()) } func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { @@ -182,29 +159,23 @@ func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) client := &http.Client{} - resreq := &resumableRequestReader{ + resreq := &requestReader{ client: client, request: req, lastRange: 1, } defer resreq.Close() - expectedError := "the server doesn't support byte ranges" buf := make([]byte, 2) _, err = resreq.Read(buf) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error '%s', got %v", expectedError, err) - } + assert.EqualError(t, err, "the server doesn't support byte ranges") } func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { - srvtxt := "some response text data" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -214,30 +185,22 @@ func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) client := &http.Client{} retries := uint32(5) - resreq := ResumableRequestReader(client, req, retries, 0) + resreq := NewRequestReader(client, req, retries, 0) defer resreq.Close() data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Error("resstr != srvtxt") - } + assert.Equal(t, srvtxt, resstr) } func TestResumableRequestReader(t *testing.T) { - srvtxt := "some response text data" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -247,31 +210,23 @@ func TestResumableRequestReader(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) client := &http.Client{} retries := uint32(5) imgSize := int64(len(srvtxt)) - resreq := ResumableRequestReader(client, req, retries, imgSize) + resreq := NewRequestReader(client, req, retries, imgSize) defer resreq.Close() data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Error("resstr != srvtxt") - } + assert.Equal(t, srvtxt, resstr) } func TestResumableRequestReaderWithInitialResponse(t *testing.T) { - srvtxt := "some response text data" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -281,30 +236,21 @@ func TestResumableRequestReaderWithInitialResponse(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) client := &http.Client{} retries := uint32(5) imgSize := int64(len(srvtxt)) res, err := client.Do(req) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + resreq := NewRequestReaderWithInitialResponse(client, req, retries, imgSize, res) defer resreq.Close() data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Error("resstr != srvtxt") - } + assert.Equal(t, srvtxt, resstr) } diff --git a/fn/vendor/github.com/docker/docker/registry/service.go b/fn/vendor/github.com/docker/docker/registry/service.go index 56dabab75..34e8a13f9 100644 --- a/fn/vendor/github.com/docker/docker/registry/service.go +++ b/fn/vendor/github.com/docker/docker/registry/service.go @@ -31,6 +31,7 @@ type Service interface { Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) ServiceConfig() *registrytypes.ServiceConfig TLSConfig(hostname string) (*tls.Config, error) + LoadAllowNondistributableArtifacts([]string) error LoadMirrors([]string) error LoadInsecureRegistries([]string) error } @@ -56,13 +57,17 @@ func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { defer s.mu.Unlock() servConfig := registrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), - IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), - Mirrors: make([]string, 0), + AllowNondistributableArtifactsCIDRs: make([]*(registrytypes.NetIPNet), 0), + AllowNondistributableArtifactsHostnames: make([]string, 0), + InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), + IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), + Mirrors: make([]string, 0), } // construct a new ServiceConfig which will not retrieve s.Config directly, // and look up items in s.config with mu locked + servConfig.AllowNondistributableArtifactsCIDRs = append(servConfig.AllowNondistributableArtifactsCIDRs, s.config.ServiceConfig.AllowNondistributableArtifactsCIDRs...) + servConfig.AllowNondistributableArtifactsHostnames = append(servConfig.AllowNondistributableArtifactsHostnames, s.config.ServiceConfig.AllowNondistributableArtifactsHostnames...) servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) for key, value := range s.config.ServiceConfig.IndexConfigs { @@ -74,6 +79,14 @@ func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { return &servConfig } +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. +func (s *DefaultService) LoadAllowNondistributableArtifacts(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadAllowNondistributableArtifacts(registries) +} + // LoadMirrors loads registry mirrors for Service func (s *DefaultService) LoadMirrors(mirrors []string) error { s.mu.Lock() @@ -235,12 +248,13 @@ func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInf // APIEndpoint represents a remote API endpoint type APIEndpoint struct { - Mirror bool - URL *url.URL - Version APIVersion - Official bool - TrimHostname bool - TLSConfig *tls.Config + Mirror bool + URL *url.URL + Version APIVersion + AllowNondistributableArtifacts bool + Official bool + TrimHostname bool + TLSConfig *tls.Config } // ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint diff --git a/fn/vendor/github.com/docker/docker/registry/service_v2.go b/fn/vendor/github.com/docker/docker/registry/service_v2.go index 228d745f8..68466f823 100644 --- a/fn/vendor/github.com/docker/docker/registry/service_v2.go +++ b/fn/vendor/github.com/docker/docker/registry/service_v2.go @@ -44,6 +44,8 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp return endpoints, nil } + ana := allowNondistributableArtifacts(s.config, hostname) + tlsConfig, err = s.tlsConfig(hostname) if err != nil { return nil, err @@ -55,9 +57,10 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp Scheme: "https", Host: hostname, }, - Version: APIVersion2, - TrimHostname: true, - TLSConfig: tlsConfig, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, + TLSConfig: tlsConfig, }, } @@ -67,8 +70,9 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp Scheme: "http", Host: hostname, }, - Version: APIVersion2, - TrimHostname: true, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, // used to check if supposed to be secure via InsecureSkipVerify TLSConfig: tlsConfig, }) diff --git a/fn/vendor/github.com/docker/docker/registry/session.go b/fn/vendor/github.com/docker/docker/registry/session.go index c71e77803..9d7f32193 100644 --- a/fn/vendor/github.com/docker/docker/registry/session.go +++ b/fn/vendor/github.com/docker/docker/registry/session.go @@ -23,10 +23,11 @@ import ( "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry/resumable" ) var ( @@ -226,7 +227,7 @@ func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { if res.StatusCode == 401 { return nil, errcode.ErrorCodeUnauthorized.WithArgs() } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + return nil, newJSONError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } var history []string @@ -246,7 +247,7 @@ func (r *Session) LookupRemoteImage(imgID, registry string) error { } res.Body.Close() if res.StatusCode != 200 { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + return newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } return nil } @@ -259,7 +260,7 @@ func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, err } defer res.Body.Close() if res.StatusCode != 200 { - return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + return nil, -1, newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } // if the size header is not present, then set it to '-1' imageSize := int64(-1) @@ -313,7 +314,7 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { logrus.Debug("server supports resume") - return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil + return resumable.NewRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil } logrus.Debug("server doesn't support resume") return res.Body, nil @@ -444,13 +445,13 @@ func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, erro // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. if res.StatusCode == 404 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + return nil, newJSONError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) } else if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, reference.Path(name), errBody), res) + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, reference.Path(name), errBody), res) } var endpoints []string @@ -537,12 +538,12 @@ func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regist } defer res.Body.Close() if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { - return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + return newJSONError("HTTP code 401, Docker will not send auth headers over HTTP.", res) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { @@ -550,7 +551,7 @@ func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regist } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) } return nil } @@ -591,9 +592,9 @@ func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + return "", "", newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + return "", "", newJSONError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) } checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) @@ -619,7 +620,7 @@ func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registr } res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 201 { - return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, reference.Path(remote)), res) + return newJSONError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, reference.Path(remote)), res) } return nil } @@ -683,7 +684,7 @@ func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, reference.Path(remote), errBody), res) + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, reference.Path(remote), errBody), res) } tokens = res.Header["X-Docker-Token"] logrus.Debugf("Auth token: %v", tokens) @@ -701,7 +702,7 @@ func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, reference.Path(remote), errBody), res) + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, reference.Path(remote), errBody), res) } } @@ -750,25 +751,12 @@ func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.Sea } defer res.Body.Close() if res.StatusCode != 200 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + return nil, newJSONError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) } result := new(registrytypes.SearchResults) return result, json.NewDecoder(res.Body).Decode(result) } -// GetAuthConfig returns the authentication settings for a session -// TODO(tiborvass): remove this once registry client v2 is vendored -func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { - password := "" - if withPasswd { - password = r.authConfig.Password - } - return &types.AuthConfig{ - Username: r.authConfig.Username, - Password: password, - } -} - func isTimeout(err error) bool { type timeout interface { Timeout() bool @@ -781,3 +769,10 @@ func isTimeout(err error) bool { t, ok := e.(timeout) return ok && t.Timeout() } + +func newJSONError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} diff --git a/fn/vendor/github.com/docker/docker/reports/2017-05-01.md b/fn/vendor/github.com/docker/docker/reports/2017-05-01.md new file mode 100644 index 000000000..366f4fce7 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/2017-05-01.md @@ -0,0 +1,35 @@ +# Development Report for May 01, 2017 + +This is the 1st report, since the Moby project was announced at DockerCon. Thank you to everyone that stayed an extra day to attend the summit on Thursday. + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com/) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The moby tool + +The moby tool currently lives at [https://github.com/moby/tool](https://github.com/moby/tool), it's only a temporary place and will soon be merged in [https://github.com/moby/moby](https://github.com/moby/moby). + +### The CLI split + +Ongoing work to split the Docker CLI into [https://github.com/docker/cli](https://github.com/docker/cli) is happening [here](https://github.com/moby/moby/pull/32694). +We are almost done, it should be merged soon. + +### Mailing list + +Slack works great for synchronous communication, but we need to place for async discussion. A mailing list is currently being setup. + +### Find a good and non-confusing home for the remaining monolith + +Lots of discussion and progress made on this topic, see [here](https://github.com/moby/moby/issues/32871). The work will start this week. + +## Componentization + +So far only work on the builder happened regarding the componentization effort. + +### builder + +The builder dev report can be found [here](builder/2017-05-01.md) diff --git a/fn/vendor/github.com/docker/docker/reports/2017-05-08.md b/fn/vendor/github.com/docker/docker/reports/2017-05-08.md new file mode 100644 index 000000000..7f0333541 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/2017-05-08.md @@ -0,0 +1,34 @@ +# Development Report for May 08, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The CLI split + +The Docker CLI was successfully moved to [https://github.com/docker/cli](https://github.com/docker/cli) last week thanks to @tiborvass +The Docker CLI is now compiled from the [Dockerfile](https://github.com/moby/moby/blob/a762ceace4e8c1c7ce4fb582789af9d8074be3e1/Dockerfile#L248) + +### Mailing list + +Discourse is available at [forums.mobyproject.org](https://forums.mobyproject.org/) thanks to @thaJeztah. mailing-list mode is enabled, so once you register there, you will received every new threads / messages via email. So far, 3 categories were created: Architecture, Meta & Support. The last step missing is to setup an email address to be able to start a new thread via email. + +### Find a place for `/pkg` + +Lots of discussion and progress made on this [topic](https://github.com/moby/moby/issues/32989) thanks to @dnephin. [Here is the list](https://gist.github.com/dnephin/35dc10f6b6b7017f058a71908b301d38) proposed to split/reorganize the pkgs. + +### Find a good and non-confusing home for the remaining monolith + +@cpuguy83 is leading the effort [here](https://github.com/moby/moby/pull/33022). It's still WIP but the way we are experimenting with is to reorganise directories within the moby/moby. + +## Componentization + +So far only work on the builder, by @tonistiigi, happened regarding the componentization effort. + +### builder + +The builder dev report can be found [here](builder/2017-05-08.md) + diff --git a/fn/vendor/github.com/docker/docker/reports/2017-05-15.md b/fn/vendor/github.com/docker/docker/reports/2017-05-15.md new file mode 100644 index 000000000..7556f9cc4 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/2017-05-15.md @@ -0,0 +1,52 @@ +# Development Report for May 15, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The CLI split + +Work is in progress to move the "opts" package to the docker/cli repository. The package, was merged into the docker/cli +repository through [docker/cli#82](https://github.com/docker/cli/pull/82), preserving Git history, and parts that are not +used in Moby have been removed through [moby/moby#33198](https://github.com/moby/moby/pull/33198). + +### Find a good and non-confusing home for the remaining monolith + +Discussion on this topic is still ongoing, and possible approaches are looked into. The active discussion has moved +from GitHub to [https://forums.mobyproject.org/](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith/37) + +### Find a place for `/pkg` + +Concerns were raised about moving packages to separate repositories, and it was decided to put some extra effort into +breaking up / removing existing packages that likely are not good candidates to become a standalone project. + +### Update integration-cli tests + +With the removal of the CLI from the moby repository, new pull requests will have to be tested using API tests instead +of using the CLI. Discussion took place whether or not these tests should use the API `client` package, or be completely +independent, and make raw HTTP calls. + +A topic was created on the forum to discuss options: [evolution of testing](https://forums.mobyproject.org/t/evolution-of-testing-moby/38) + + +### Proposal: split & containerize hack/validate + +[@AkihiroSuda](https://github.com/AkihiroSuda) is proposing to split and containerize the `hack/validate` script and +[started a topic on the forum](https://forums.mobyproject.org/t/proposal-split-containerize-hack-validate/32). An initial +proposal to add validation functionality to `vndr` (the vendoring tool in use) was rejected upstream, so alternative +approaches were discussed. + + +### Special Interest Groups + +A "SIG" category was created on the forums to provide a home for Special Interest Groups. The first SIG, [LinuxKit +Security](https://forums.mobyproject.org/t/about-the-linuxkit-security-category/44) was started (thanks +[@riyazdf](https://github.com/riyazdf)). + + +### Builder + +The builder dev report can be found [here](builder/2017-05-15.md) diff --git a/fn/vendor/github.com/docker/docker/reports/2017-06-05.md b/fn/vendor/github.com/docker/docker/reports/2017-06-05.md new file mode 100644 index 000000000..8e2cc3c45 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/2017-06-05.md @@ -0,0 +1,36 @@ +# Development Report for June 5, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +Lots of discussion happened during this meeting to kickstart the project, but now that we have the forums, we see less activity there. +We are discussing the future of this meeting [here](https://forums.mobyproject.org/t/of-standups-future), we will possibily move the meeting +to weekly. + +## Topics discussed last week + +### The CLI split + +Thanks to @tiborvass, the man pages, docs and completion scripts were imported to `github.com/docker/cli` [last week](https://github.com/docker/cli/pull/147) +Once everything is finalised, we will remove them from `github.com/moby/moby` + +### Find a good and non-confusing home for the remaining monolith + +Discussion on this topic is still ongoing, and possible approaches are looked into. The active discussion has moved +from GitHub to [https://forums.mobyproject.org/](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith) + + +### Find a place for `/pkg` + +Thanks to @dnephin this topic in on-going, you can follow progress [here](https://github.com/moby/moby/issues/32989) +Many pkgs were reorganised last week, and more to come this week. + + +### Builder + +The builder dev report can be found [here](builder/2017-06-05.md) + + +### LinuxKit + +The LinuxKit dev report can be found [here](https://github.com/linuxkit/linuxkit/blob/master/reports/2017-06-03.md) \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/reports/2017-06-12.md b/fn/vendor/github.com/docker/docker/reports/2017-06-12.md new file mode 100644 index 000000000..8aef38c6b --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/2017-06-12.md @@ -0,0 +1,78 @@ +# Development Report for June 12, 2017 + +## Moby Summit + +The next Moby Summit will be at Docker HQ on June 19th, register [here](https://www.eventbrite.com/e/moby-summit-tickets-34483396768) + +## Daily Meeting + +### The CLI split + +Manpages and docs yaml files can now be generated on [docker/cli](https://github.com/docker/cli). +Man pages, docs and completion scripts will be removed next week thanks to @tiborvass + +### Find a good and non-confusing home for the remaining monolith + +Lot's of dicussion happened on the [forums](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith) +We should expect to do those changes after the moby summit. We contacted github to work with them so we have a smooth move. + +### Moby tool + +`moby` tool docs were moved from [LinuxKit](https://github.com/linuxkit/linuxkit) to the [moby tool repo](https://github.com/moby/tool) thanks to @justincormack + +### Custom golang URLs + +More discussions on the [forums](https://forums.mobyproject.org/t/cutoms-golang-urls), no agreement for now. + +### Buildkit + +[Proposal](https://github.com/moby/moby/issues/32925) + +More updates to the [POC repo](https://github.com/tonistiigi/buildkit_poc). It now contains binaries for the daemon and client. Examples directory shows a way for invoking a build job by generating the internal low-level build graph definition with a helper binary(as there is not support for frontends yet). The grpc control server binary can be built in two versions, one that connects to containerD socket and other that doesn't have any external dependencies. + +If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +#### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +New PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers. + +#### Long running session & incremental file sending + +[PR ](https://github.com/moby/moby/pull/32677) + +Same status as last week. The PR went through one pass of review from @dnephin and has been rebased again. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +#### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR is waiting for a second review. + +#### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +#### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +#### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. diff --git a/fn/vendor/github.com/docker/docker/reports/2017-06-26.md b/fn/vendor/github.com/docker/docker/reports/2017-06-26.md new file mode 100644 index 000000000..e12533ae4 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/2017-06-26.md @@ -0,0 +1,120 @@ +# Development Report for June 26, 2017 + +## Moby Summit + +The Moby Summit held in San Francisco was very active and well attended ([blog](http://mobyproject.org/blog/2017/06/26/moby-summit-recap/) / [linuxkit table notes](https://github.com/linuxkit/linuxkit/blob/master/reports/2017-06-19-summit.md) [#2090](https://github.com/linuxkit/linuxkit/pull/2090) [#2033](https://github.com/linuxkit/linuxkit/pull/2033) [@mgoelzer] [@justincormack]). + +## Container Engine + +Thanks to @fabiokung there is no container locks anymore on `docker ps` [#31273](https://github.com/moby/moby/pull/31273) + +## BuildKit + +[Repo](https://github.com/moby/buildkit) +[Proposal](https://github.com/moby/moby/issues/32925) + +New development repo is open at https://github.com/moby/buildkit + +The readme file provides examples how to get started. You can see an example of building BuildKit with BuildKit. + +There are lots of new issues opened as well to track the missing functionality. You are welcomed to help on any of them or discuss the design there. + +Last week most of the work was done on improving the `llb` client library for more complicated use cases and providing traces and interactive progress of executed build jobs. + +The `llb` client package is a go library that helps you to generate the build definition graph. It uses chained methods to make it easy to describe what steps need to be running. Mounts can be added to the execution steps for defining multiple inputs or outputs. To prepare the graph, you just have to call `Marshal()` on a leaf node that will generate the protobuf definition for everything required to build that node. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +This PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers(eg. BuildKit). + +The PR had some review and updates in last week. Should be ready to code review soon. + +### Merged: Long running session & incremental file sending + +[PR](https://github.com/moby/moby/pull/32677) + +Incremental context sending PR was merged and is expected to land in `v17.07`. + +This feature experimental feature lets you skip sending the build context to the daemon on repeated builder invocations during development. Currently, this feature requires a CLI flag `--stream=true`. If this flag is used, one first builder invocation full build context is sent to the daemon. On a second attempt, only the changed files are transferred. + +Previous build context is saved in the build cache, and you can see how much space it takes form `docker system df`. Build cache will be automatically garbage collected and can also be manually cleared with `docker prune`. + +### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR was merged. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +[fix copy —from conflict with force pull](https://github.com/moby/moby/pull/33735) + +### Builder features currently in code-review: + +[Fix handling of remote "git@" notation](https://github.com/moby/moby/pull/33696) + +[builder: Emit a BuildResult after squashing.](https://github.com/moby/moby/pull/33824) + +[Fix shallow git clone in docker-build](https://github.com/moby/moby/pull/33704) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. + +## LinuxKit + +* **Kernel GPG verification:** The kernel compilation containers now verify the GPG and SHA256 + checksums before building the binaries. ([#2062](https://github.com/linuxkit/linuxkit/issues/2062) [#2083](https://github.com/linuxkit/linuxkit/issues/2083) [@mscribe] [@justincormack] [@rn] [@riyazdf]). + The base Alpine build image now includes `gnupg` to support this feature ([#2091](https://github.com/linuxkit/linuxkit/issues/2091) [@riyazdf] [@rn]). + +* **Security SIG on Landlock:** The third Moby Security SIG focussed on the [Landlock](https://github.com/landlock-lsm) security module that provides unprivileged fine-grained sandboxing to applications. There are videos and forum links ([#2087](https://github.com/linuxkit/linuxkit/issues/2087) [#2089](https://github.com/linuxkit/linuxkit/issues/2089) [#2073](https://github.com/linuxkit/linuxkit/issues/2073) [@riyazdf]). + +* **Networking drivers now modules:** The kernels have been updated to 4.11.6/4.9.33/4.4.73, and many drivers are now loaded as modules to speed up boot-time ([#2095](https://github.com/linuxkit/linuxkit/issues/2095) [#2061](https://github.com/linuxkit/linuxkit/issues/2061) [@rn] [@justincormack] [@tych0]) + +- **Whaley important update:** The ASCII logo was updated and we fondly wave goodbye to the waves. ([#2084](https://github.com/linuxkit/linuxkit/issues/2084) [@thaJeztah] [@rn]) + +- **Containerised getty and sshd:** The login services now run in their own mount namespace, which was confusing people since they were expecting it to be on the host filesystem. This is now being addressed via a reminder in the `motd` upon login ([#2078](https://github.com/linuxkit/linuxkit/issues/2078) [#2097](https://github.com/linuxkit/linuxkit/issues/2097) [@deitch] [@ijc] [@justincormack] [@riyazdf] [@rn]) + +- **Hardened user copying:** The RFC on ensuring that we use a hardened kernel/userspace copying system was closed, as it is enabled by default on all our modern kernels and a regression test is included by default ([#2086](https://github.com/linuxkit/linuxkit/issues/2086) [@fntlnz] [@riyazdf]). + +- **Vultr provider:** There is an ongoing effort to add a metadata provider for [Vultr](http://vultr.com) ([#2101](https://github.com/linuxkit/linuxkit/issues/2101) [@furious-luke] [@justincormack]). + +### Packages and Projects + +- Simplified Makefiles for packages ([#2080](https://github.com/linuxkit/linuxkit/issues/2080) [@justincormack] [@rn]) +- The MirageOS SDK is integrating many upstream changes from dependent libraries, for the DHCP client ([#2070](https://github.com/linuxkit/linuxkit/issues/2070) [#2072](https://github.com/linuxkit/linuxkit/issues/2072) [@samoht] [@talex5] [@avsm]). + +### Documentation and Tests + +- A comprehensive test suite for containerd is now integrated into LinuxKit tests ([#2062](https://github.com/linuxkit/linuxkit/issues/2062) [@AkihiroSuda] [@justincormack] [@rn]) +- Fix documentation links ([#2074](https://github.com/linuxkit/linuxkit/issues/2074) [@ndauten] [@justincormack]) +- Update RTF version ([#2077](https://github.com/linuxkit/linuxkit/issues/2077) [@justincormack]) +- tests: add build test for Docker for Mac blueprint ([#2093](https://github.com/linuxkit/linuxkit/issues/2093) [@riyazdf] [@MagnusS]) +- Disable Qemu EFI ISO test for now ([#2100](https://github.com/linuxkit/linuxkit/issues/2100) [@justincormack]) +- The CI whitelists and ACLs were updated ([linuxkit-ci#11](https://github.com/linuxkit/linuxkit-ce/issues/11) [linuxkit-ci#15](https://github.com/linuxkit/linuxkit-ce/issues/15) [linuxkit/linuxkit-ci#10](https://github.com/linuxkit/linuxkit-ce/issues/10) [@rn] [@justincormack]) +- Fix spelling errors ([#2079](https://github.com/linuxkit/linuxkit/issues/2079) [@ndauten]) +- Fix typo in dev report ([#2094](https://github.com/linuxkit/linuxkit/issues/2094) [@justincormack]) +- Fix dead Link to VMWare File ([#2082](https://github.com/linuxkit/linuxkit/issues/2082) [@davefreitag]) \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/reports/builder/2017-05-01.md b/fn/vendor/github.com/docker/docker/reports/builder/2017-05-01.md new file mode 100644 index 000000000..73d1c4930 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/builder/2017-05-01.md @@ -0,0 +1,47 @@ +# Development Report for May 01, 2017 + +### buildkit + +As part of the goals of [Moby](https://github.com/moby/moby#transitioning-to-moby) to split the current platform into reusable components and to provide a future vision for the builder component new [buildkit proposal](https://github.com/moby/moby/issues/32925) was opened with early design draft. + +Buildkit is a library providing the core essentials of running a build process using isolated sandboxed commands. It is designed for extensibility and customization. Buildkit supports multiple build declaration formats(frontends) and multiple ways for outputting build results(not just docker images). It doesn't make decisions for a specific worker, snapshot or exporter implementations. + +It is designed to help find the most efficient way to process build tasks and intelligently cache them for repeated invocations. + +### Quality: Dependency interface switch + +To improve quality and performance, a new [proposal was made for switching the dependency interface](https://github.com/moby/moby/issues/32904) for current builder package. That should fix the current problems with data leakage and conflicts caused by daemon state cleanup scripts. + +@dnephin is in progress of refactoring current builder code to logical areas as a preparation work for updating this interface. + +Merged as part of this effort: + +- [Refactor Dockerfile.parser and directive](https://github.com/moby/moby/pull/32580) +- [Refactor builder dispatch state](https://github.com/moby/moby/pull/32600) +- [Use a bytes.Buffer for shell_words string concat](https://github.com/moby/moby/pull/32601) +- [Refactor `Builder.commit()`](https://github.com/moby/moby/pull/32772) +- [Remove b.escapeToken, create ShellLex](https://github.com/moby/moby/pull/32858) + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enabled advanced features like incremental context send, build credentials from the client, ssh forwarding etc. is looking for initial design review. It is currently open if features implemented on top of it would use a specific transport implementation on the wire or a generic interface(current implementation). @tonistiigi is working on adding persistent cache capabilities that are currently missing from that PR. It also needs to be figured out how the [cli split](https://github.com/moby/moby/pull/32694) will affect features like this. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +These proposals have gotten mostly positive feedback for now. We will leave them open for a couple of more weeks and then decide what actions to take in a maintainers meeting. Also, if you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[`docker build --iidfile` to capture the ID of the build result](https://github.com/moby/moby/pull/32406) + +[Allow builds from any git remote ref](https://github.com/moby/moby/pull/32502) + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/fn/vendor/github.com/docker/docker/reports/builder/2017-05-08.md b/fn/vendor/github.com/docker/docker/reports/builder/2017-05-08.md new file mode 100644 index 000000000..d9396ab76 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/builder/2017-05-08.md @@ -0,0 +1,57 @@ +# Development Report for May 08, 2017 + + +### Quality: Dependency interface switch + +Proposal for [switching the dependency interface](https://github.com/moby/moby/issues/32904) for current builder package. That should fix the current problems with data leakage and conflicts caused by daemon state cleanup scripts. + +Merged as part of this effort: + +- [Move dispatch state to a new struct](https://github.com/moby/moby/pull/32952) +- [Cleanup unnecessary mutate then revert of b.runConfig](https://github.com/moby/moby/pull/32773) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) +- [Expose GetImage interface for builder](https://github.com/moby/moby/pull/33054) + +### Merged: docker build --iidfile + +[`docker build --iidfile` to capture the ID of the build result](https://github.com/moby/moby/pull/32406). New option can be used by the CLI applications to get back the image ID of build result. API users can use the `Aux` messages in progress stream to also get the IDs for intermediate build stages, for example to share them for build cache. + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. + +@simonferquel proposed a [grpc-only version of that interface](https://github.com/moby/moby/pull/33047) that should simplify the setup needed for describing new features for the session. Looking for design reviews. + +The feature also needs to be reworked after CLI split. + +### buildkit + +Not much progress [apart from some design discussion](https://github.com/moby/moby/issues/32925). Next step would be to open up a repo. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[Allow builds from any git remote ref](https://github.com/moby/moby/pull/32502) + +[Fix a case where using FROM scratch as NAME would fail](https://github.com/moby/moby/pull/32997) + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/fn/vendor/github.com/docker/docker/reports/builder/2017-05-15.md b/fn/vendor/github.com/docker/docker/reports/builder/2017-05-15.md new file mode 100644 index 000000000..cfc742f3a --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/builder/2017-05-15.md @@ -0,0 +1,64 @@ +# Development Report for May 15, 2017 + +### Multi-stage builds fixes coming in 17.06-rc1 + +Some bugs were discovered in new multi-stage build feature, release in 17.05. + +When using an image name directly in `COPY --from` without defining a build stage, the data associated with that image was not properly cleaned up. + +If a second was based on `scratch` image, the metadata from the previous stage didn't get reset, forcing the user to clear it manually with extra commands. + +Fixes for these are merged for the next release, everyone is welcomed to test it once `17.06-rc1` is out. + +- [Fix resetting image metadata between stages for scratch case](https://github.com/moby/moby/pull/33179) +- [Fix releasing implicit mounts](https://github.com/moby/moby/pull/33090) +- [Fix a case where using FROM scratch as NAME would fail](https://github.com/moby/moby/pull/32997) + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. This week methods for getting access to source image were swapped out to a new version that keeps a reference to image data until build job has complete. + +Merged as part of this effort: + +- [Expose GetImage interface for builder](https://github.com/moby/moby/pull/33054) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) +- [Refactor COPY/ADD dispatchers](https://github.com/moby/moby/pull/33116) + + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. + +@simonferquel updated a [grpc-only version of that interface](https://github.com/moby/moby/pull/33047) and mostly seems that consensus was achieved for using only grpc transport. @tonistiigi finished up persistent cache layer and garbage collection for file transfers. The PR now needs to be split up because CLI has moved. Once that is done, the main PR should be ready for review early this week. + +### Merged: Specifying any remote ref in git checkout URLs + +Building from git sources now allows [specifying any remote ref](https://github.com/moby/moby/pull/32502). For example, to build a pull request from GitHub you can use: `docker build git://github.com/moby/moby#pull/32502/head`. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +- + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/fn/vendor/github.com/docker/docker/reports/builder/2017-05-22.md b/fn/vendor/github.com/docker/docker/reports/builder/2017-05-22.md new file mode 100644 index 000000000..29ecc6bb9 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/builder/2017-05-22.md @@ -0,0 +1,47 @@ +# Development Report for May 22, 2017 + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. is ready for reviews. This is blocking many new features like token signing, not pulling unnecessary context files, exposing sources outside working directory etc. + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +Merged as part of this effort this week: + +- [Refactor COPY/ADD dispatchers](https://github.com/moby/moby/pull/33116) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) + +### Buildkit + +[Diff and snapshot services](https://github.com/containerd/containerd/pull/849) were added to containerd. This is a required dependency for [buildkit](https://github.com/moby/moby/issues/32925). + +### Proposals discussed in maintainers meeting + +New builder proposals were discussed in maintainers meeting. The decision was to give 2 more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. + +Build secrets and its possible overlap with [--mount](https://github.com/moby/moby/issues/32507) was discussed as well. The decision was to create a [new issue](https://github.com/moby/moby/issues/33343)(as the [old PR](https://github.com/moby/moby/pull/30637) is closed) to track this and avoid it from blocking `--mount` implementation. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +- diff --git a/fn/vendor/github.com/docker/docker/reports/builder/2017-05-29.md b/fn/vendor/github.com/docker/docker/reports/builder/2017-05-29.md new file mode 100644 index 000000000..33043d9f3 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/builder/2017-05-29.md @@ -0,0 +1,52 @@ +# Development Report for May 29, 2017 + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding, etc. is ready for reviews. It is blocking many new features like the token signing, not pulling unnecessary context files, exposing sources outside working directory, etc. Maintainers are encouraged to give this one a review! + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +Merged as part of this effort this week: + +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) + +@dnephin continues working on the copy/export aspects of the interface. + +### Buildkit + +Some initial proof of concept code for [buildkit](https://github.com/moby/moby/issues/32925) has been pushed to https://github.com/tonistiigi/buildkit_poc . It's in a very early exploratory stage. Current development has been about providing concurrent references based access to the snapshot data that is backed by containerd. More info should follow in next weeks, including hopefully opening up an official repo. If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Proposals discussed in maintainers meeting + +Reminder from last week: New builder proposals were discussed in maintainers meeting. The decision was to give 2 more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. + +New issue about [build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality please make yourself heard. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[Fix canceling builder on chunked requests](https://github.com/moby/moby/pull/33363) + +[Fix parser directive refactoring](https://github.com/moby/moby/pull/33436) + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/reports/builder/2017-06-05.md b/fn/vendor/github.com/docker/docker/reports/builder/2017-06-05.md new file mode 100644 index 000000000..3746c2639 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/builder/2017-06-05.md @@ -0,0 +1,58 @@ +# Development Report for June 5, 2017 + +### New feature: Long running session + +Similarly to last week, the PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) is waiting for reviews. It is blocking many new features like the token signing, not pulling unnecessary context files, exposing sources outside working directory, etc. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +PRs currently in review as part of this effort: + +- [Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) + +This PR is the core of the update that removes the need to track active containers and instead of lets builder hold references to layers while it's running. + +Related to this, @simonferquel opened a [WIP PR](https://github.com/moby/moby/pull/33492) that introduces typed Dockerfile parsing. This enables making [decisions about dependencies](https://github.com/moby/moby/issues/32550#issuecomment-297867334) between build stages and reusing Dockerfile parsing as a buildkit frontend. + +### Buildkit + +Some initial proof of concept code for [buildkit](https://github.com/moby/moby/issues/32925) has been pushed to https://github.com/tonistiigi/buildkit_poc . It's in a very early exploratory stage. Current codebase includes libraries for getting concurrency safe references to containerd snapshots using a centralized cache management instance. There is a sample source implementation for pulling images to these snapshots and executing jobs with runc on top of them. There is also some utility code for concurrent execution and progress stream handling. More info should follow in next weeks, including hopefully opening up an official repo. If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Proposals discussed in maintainers meeting + +Reminder from last week: New builder proposals were discussed in maintainers meeting. The decision was to give two more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. It is the last week to post your feedback on these proposals or the comments in them. You can also volunteer to implement them. + +A new issue about [build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[Fix canceling builder on chunked requests](https://github.com/moby/moby/pull/33363) + +[Fix parser directive refactoring](https://github.com/moby/moby/pull/33436) + +### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/reports/builder/2017-06-12.md b/fn/vendor/github.com/docker/docker/reports/builder/2017-06-12.md new file mode 100644 index 000000000..df5d801e7 --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/builder/2017-06-12.md @@ -0,0 +1,58 @@ +# Development Report for June 12, 2017 + + +### Buildkit + +[Proposal](https://github.com/moby/moby/issues/32925) + +More updates to the [POC repo](https://github.com/tonistiigi/buildkit_poc). It now contains binaries for the daemon and client. Examples directory shows a way for invoking a build job by generating the internal low-level build graph definition with a helper binary(as there is not support for frontends yet). The grpc control server binary can be built in two versions, one that connects to containerD socket and other that doesn't have any external dependencies. + +If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +New PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers. + +### Long running session & incremental file sending + +[PR ](https://github.com/moby/moby/pull/32677) + +Same status as last week. The PR went through one pass of review from @dnephin and has been rebased again. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR is waiting for a second review. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + + +### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/reports/builder/2017-06-26.md b/fn/vendor/github.com/docker/docker/reports/builder/2017-06-26.md new file mode 100644 index 000000000..e0ba95a7a --- /dev/null +++ b/fn/vendor/github.com/docker/docker/reports/builder/2017-06-26.md @@ -0,0 +1,78 @@ +# Development Report for June 26, 2017 + + +### BuildKit + +[Repo](https://github.com/moby/buildkit) +[Proposal](https://github.com/moby/moby/issues/32925) + +New development repo is open at https://github.com/moby/buildkit + +The readme file provides examples how to get started. You can see an example of building BuildKit with BuildKit. + +There are lots of new issues opened as well to track the missing functionality. You are welcomed to help on any of them or discuss the design there. + +Last week most of the work was done on improving the `llb` client library for more complicated use cases and providing traces and interactive progress of executed build jobs. + +The `llb` client package is a go library that helps you to generate the build definition graph. It uses chained methods to make it easy to describe what steps need to be running. Mounts can be added to the execution steps for defining multiple inputs or outputs. To prepare the graph, you just have to call `Marshal()` on a leaf node that will generate the protobuf definition for everything required to build that node. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +This PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers(eg. BuildKit). + +The PR had some review and updates in last week. Should be ready to code review soon. + +### Merged: Long running session & incremental file sending + +[PR](https://github.com/moby/moby/pull/32677) + +Incremental context sending PR was merged and is expected to land in `v17.07`. + +This feature experimental feature lets you skip sending the build context to the daemon on repeated builder invocations during development. Currently, this feature requires a CLI flag `--stream=true`. If this flag is used, one first builder invocation full build context is sent to the daemon. On a second attempt, only the changed files are transferred. + +Previous build context is saved in the build cache, and you can see how much space it takes form `docker system df`. Build cache will be automatically garbage collected and can also be manually cleared with `docker prune`. + +### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR was merged. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +[fix copy —from conflict with force pull](https://github.com/moby/moby/pull/33735) + +### Builder features currently in code-review: + +[Fix handling of remote "git@" notation](https://github.com/moby/moby/pull/33696) + +[builder: Emit a BuildResult after squashing.](https://github.com/moby/moby/pull/33824) + +[Fix shallow git clone in docker-build](https://github.com/moby/moby/pull/33704) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. \ No newline at end of file diff --git a/fn/vendor/github.com/docker/docker/runconfig/config.go b/fn/vendor/github.com/docker/docker/runconfig/config.go index 43de4bb99..c9dc6e96e 100644 --- a/fn/vendor/github.com/docker/docker/runconfig/config.go +++ b/fn/vendor/github.com/docker/docker/runconfig/config.go @@ -79,6 +79,11 @@ func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostCon return nil, nil, nil, err } + // Validate ReadonlyRootfs + if err := validateReadonlyRootfs(hc); err != nil { + return nil, nil, nil, err + } + return w.Config, hc, w.NetworkingConfig, nil } diff --git a/fn/vendor/github.com/docker/docker/runconfig/config_test.go b/fn/vendor/github.com/docker/docker/runconfig/config_test.go index f1f9de595..83ec363a0 100644 --- a/fn/vendor/github.com/docker/docker/runconfig/config_test.go +++ b/fn/vendor/github.com/docker/docker/runconfig/config_test.go @@ -75,9 +75,9 @@ func TestDecodeContainerConfig(t *testing.T) { // as to what level of container isolation is supported. func TestDecodeContainerConfigIsolation(t *testing.T) { - // An invalid isolation level + // An Invalid isolation level if _, _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "invalid"`) { + if !strings.Contains(err.Error(), `Invalid isolation: "invalid"`) { t.Fatal(err) } } @@ -99,7 +99,7 @@ func TestDecodeContainerConfigIsolation(t *testing.T) { } } else { if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "process"`) { + if !strings.Contains(err.Error(), `Invalid isolation: "process"`) { t.Fatal(err) } } @@ -112,7 +112,7 @@ func TestDecodeContainerConfigIsolation(t *testing.T) { } } else { if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "hyperv"`) { + if !strings.Contains(err.Error(), `Invalid isolation: "hyperv"`) { t.Fatal(err) } } diff --git a/fn/vendor/github.com/docker/docker/runconfig/hostconfig.go b/fn/vendor/github.com/docker/docker/runconfig/hostconfig.go index e8eede150..24aed1935 100644 --- a/fn/vendor/github.com/docker/docker/runconfig/hostconfig.go +++ b/fn/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -45,7 +45,7 @@ func validateNetContainerMode(c *container.Config, hc *container.HostConfig) err parts := strings.Split(string(hc.NetworkMode), ":") if parts[0] == "container" { if len(parts) < 2 || parts[1] == "" { - return fmt.Errorf("--net: invalid net mode: invalid container format container:") + return fmt.Errorf("Invalid network mode: invalid container format container:") } } diff --git a/fn/vendor/github.com/docker/docker/runconfig/hostconfig_test.go b/fn/vendor/github.com/docker/docker/runconfig/hostconfig_test.go index 7f39cf521..a6a3eef7c 100644 --- a/fn/vendor/github.com/docker/docker/runconfig/hostconfig_test.go +++ b/fn/vendor/github.com/docker/docker/runconfig/hostconfig_test.go @@ -167,11 +167,11 @@ func TestPidModeTest(t *testing.T) { func TestRestartPolicy(t *testing.T) { restartPolicies := map[container.RestartPolicy][]bool{ // none, always, failure - container.RestartPolicy{}: {true, false, false}, - container.RestartPolicy{Name: "something", MaximumRetryCount: 0}: {false, false, false}, - container.RestartPolicy{Name: "no", MaximumRetryCount: 0}: {true, false, false}, - container.RestartPolicy{Name: "always", MaximumRetryCount: 0}: {false, true, false}, - container.RestartPolicy{Name: "on-failure", MaximumRetryCount: 0}: {false, false, true}, + {}: {true, false, false}, + {Name: "something", MaximumRetryCount: 0}: {false, false, false}, + {Name: "no", MaximumRetryCount: 0}: {true, false, false}, + {Name: "always", MaximumRetryCount: 0}: {false, true, false}, + {Name: "on-failure", MaximumRetryCount: 0}: {false, false, true}, } for restartPolicy, state := range restartPolicies { if restartPolicy.IsNone() != state[0] { diff --git a/fn/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/fn/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go index 9af32b8a6..55df5da3f 100644 --- a/fn/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go +++ b/fn/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -55,7 +55,7 @@ func validateIsolation(hc *container.HostConfig) error { return nil } if !hc.Isolation.IsValid() { - return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + return fmt.Errorf("Invalid isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) } return nil } @@ -68,11 +68,11 @@ func validateQoS(hc *container.HostConfig) error { } if hc.IOMaximumBandwidth != 0 { - return fmt.Errorf("invalid QoS settings: %s does not support --io-maxbandwidth", runtime.GOOS) + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum bandwidth", runtime.GOOS) } if hc.IOMaximumIOps != 0 { - return fmt.Errorf("invalid QoS settings: %s does not support --io-maxiops", runtime.GOOS) + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum IOPs", runtime.GOOS) } return nil } @@ -86,15 +86,15 @@ func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { } if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { - return fmt.Errorf("invalid --cpu-rt-period: Your kernel does not support cgroup rt period") + return fmt.Errorf("Your kernel does not support cgroup cpu real-time period") } if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { - return fmt.Errorf("invalid --cpu-rt-runtime: Your kernel does not support cgroup rt runtime") + return fmt.Errorf("Your kernel does not support cgroup cpu real-time runtime") } if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { - return fmt.Errorf("invalid --cpu-rt-runtime: rt runtime cannot be higher than rt period") + return fmt.Errorf("cpu real-time runtime cannot be higher than cpu real-time period") } return nil } @@ -103,3 +103,8 @@ func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { func validatePrivileged(hc *container.HostConfig) error { return nil } + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + return nil +} diff --git a/fn/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/fn/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go index 63bc7523b..5eb956d1b 100644 --- a/fn/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go +++ b/fn/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go @@ -31,7 +31,7 @@ func validateNetMode(c *container.Config, hc *container.HostConfig) error { } if hc.NetworkMode.IsContainer() && hc.Isolation.IsHyperV() { - return fmt.Errorf("net mode --net=container: unsupported for hyperv isolation") + return fmt.Errorf("Using the network stack of another container is not supported while using Hyper-V Containers") } return nil @@ -46,7 +46,7 @@ func validateIsolation(hc *container.HostConfig) error { return nil } if !hc.Isolation.IsValid() { - return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + return fmt.Errorf("Invalid isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) } return nil } @@ -63,10 +63,10 @@ func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { return nil } if hc.Resources.CPURealtimePeriod != 0 { - return fmt.Errorf("invalid --cpu-rt-period: Windows does not support this feature") + return fmt.Errorf("Windows does not support CPU real-time period") } if hc.Resources.CPURealtimeRuntime != 0 { - return fmt.Errorf("invalid --cpu-rt-runtime: Windows does not support this feature") + return fmt.Errorf("Windows does not support CPU real-time runtime") } return nil } @@ -78,7 +78,19 @@ func validatePrivileged(hc *container.HostConfig) error { return nil } if hc.Privileged { - return fmt.Errorf("invalid --privileged: Windows does not support this feature") + return fmt.Errorf("Windows does not support privileged mode") + } + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.ReadonlyRootfs { + return fmt.Errorf("Windows does not support root filesystem in read-only mode") } return nil } diff --git a/fn/vendor/github.com/docker/docker/runconfig/hostconfig_windows_test.go b/fn/vendor/github.com/docker/docker/runconfig/hostconfig_windows_test.go index 174a65fcb..b780dc05d 100644 --- a/fn/vendor/github.com/docker/docker/runconfig/hostconfig_windows_test.go +++ b/fn/vendor/github.com/docker/docker/runconfig/hostconfig_windows_test.go @@ -9,7 +9,7 @@ import ( ) func TestValidatePrivileged(t *testing.T) { - expected := "invalid --privileged: Windows does not support this feature" + expected := "Windows does not support privileged mode" err := validatePrivileged(&container.HostConfig{Privileged: true}) if err == nil || err.Error() != expected { t.Fatalf("Expected %s", expected) diff --git a/fn/vendor/github.com/docker/docker/runconfig/opts/envfile.go b/fn/vendor/github.com/docker/docker/runconfig/opts/envfile.go deleted file mode 100644 index f72379921..000000000 --- a/fn/vendor/github.com/docker/docker/runconfig/opts/envfile.go +++ /dev/null @@ -1,81 +0,0 @@ -package opts - -import ( - "bufio" - "bytes" - "fmt" - "os" - "strings" - "unicode" - "unicode/utf8" -) - -// ParseEnvFile reads a file with environment variables enumerated by lines -// -// ``Environment variable names used by the utilities in the Shell and -// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase -// letters, digits, and the '_' (underscore) from the characters defined in -// Portable Character Set and do not begin with a digit. *But*, other -// characters may be permitted by an implementation; applications shall -// tolerate the presence of such names.'' -// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html -// -// As of #16585, it's up to application inside docker to validate or not -// environment variables, that's why we just strip leading whitespace and -// nothing more. -func ParseEnvFile(filename string) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - currentLine := 0 - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - if !utf8.Valid(scannedBytes) { - return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes) - } - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - // trim the line from all leading whitespace first - line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) - currentLine++ - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") { - data := strings.SplitN(line, "=", 2) - - // trim the front of a variable, but nothing else - variable := strings.TrimLeft(data[0], whiteSpaces) - if strings.ContainsAny(variable, whiteSpaces) { - return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} - } - - if len(data) > 1 { - - // pass the value through, no trimming - lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) - } else { - // if only a pass-through variable is given, clean it up. - lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) - } - } - } - return lines, scanner.Err() -} - -var whiteSpaces = " \t" - -// ErrBadEnvVariable typed error for bad environment variable -type ErrBadEnvVariable struct { - msg string -} - -func (e ErrBadEnvVariable) Error() string { - return fmt.Sprintf("poorly formatted environment: %s", e.msg) -} diff --git a/fn/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go b/fn/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go deleted file mode 100644 index f3faabe3c..000000000 --- a/fn/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" -) - -func tmpFileWithContent(content string, t *testing.T) string { - tmpFile, err := ioutil.TempFile("", "envfile-test") - if err != nil { - t.Fatal(err) - } - defer tmpFile.Close() - - tmpFile.WriteString(content) - return tmpFile.Name() -} - -// Test ParseEnvFile for a file with a few well formatted lines -func TestParseEnvFileGoodFile(t *testing.T) { - content := `foo=bar - baz=quux -# comment - -_foobar=foobaz -with.dots=working -and_underscore=working too -` - // Adding a newline + a line with pure whitespace. - // This is being done like this instead of the block above - // because it's common for editors to trim trailing whitespace - // from lines, which becomes annoying since that's the - // exact thing we need to test. - content += "\n \t " - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - expectedLines := []string{ - "foo=bar", - "baz=quux", - "_foobar=foobaz", - "with.dots=working", - "and_underscore=working too", - } - - if !reflect.DeepEqual(lines, expectedLines) { - t.Fatal("lines not equal to expectedLines") - } -} - -// Test ParseEnvFile for an empty file -func TestParseEnvFileEmptyFile(t *testing.T) { - tmpFile := tmpFileWithContent("", t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - if len(lines) != 0 { - t.Fatal("lines not empty; expected empty") - } -} - -// Test ParseEnvFile for a non existent file -func TestParseEnvFileNonExistentFile(t *testing.T) { - _, err := ParseEnvFile("foo_bar_baz") - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } - if _, ok := err.(*os.PathError); !ok { - t.Fatalf("Expected a PathError, got [%v]", err) - } -} - -// Test ParseEnvFile for a badly formatted file -func TestParseEnvFileBadlyFormattedFile(t *testing.T) { - content := `foo=bar - f =quux -` - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatalf("Expected an ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected an ErrBadEnvVariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} - -// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize -func TestParseEnvFileLineTooLongFile(t *testing.T) { - content := strings.Repeat("a", bufio.MaxScanTokenSize+42) - content = fmt.Sprint("foo=", content) - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } -} - -// ParseEnvFile with a random file, pass through -func TestParseEnvFileRandomFile(t *testing.T) { - content := `first line -another invalid line` - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatalf("Expected an ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected an ErrBadEnvVariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} diff --git a/fn/vendor/github.com/docker/docker/runconfig/opts/parse.go b/fn/vendor/github.com/docker/docker/runconfig/opts/parse.go index a88ea1385..a7f1b79f1 100644 --- a/fn/vendor/github.com/docker/docker/runconfig/opts/parse.go +++ b/fn/vendor/github.com/docker/docker/runconfig/opts/parse.go @@ -1,30 +1,9 @@ package opts import ( - "fmt" - "strconv" "strings" - - "github.com/docker/docker/api/types/container" ) -// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys -// present in the file with additional pairs specified in the override parameter -func ReadKVStrings(files []string, override []string) ([]string, error) { - envVariables := []string{} - for _, ef := range files { - parsedVars, err := ParseEnvFile(ef) - if err != nil { - return nil, err - } - envVariables = append(envVariables, parsedVars...) - } - // parse the '-e' and '--env' after, to allow override - envVariables = append(envVariables, override...) - - return envVariables, nil -} - // ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} func ConvertKVStringsToMap(values []string) map[string]string { result := make(map[string]string, len(values)) @@ -39,49 +18,3 @@ func ConvertKVStringsToMap(values []string) map[string]string { return result } - -// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"} -// but set unset keys to nil - meaning the ones with no "=" in them. -// We use this in cases where we need to distinguish between -// FOO= and FOO -// where the latter case just means FOO was mentioned but not given a value -func ConvertKVStringsToMapWithNil(values []string) map[string]*string { - result := make(map[string]*string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = nil - } else { - result[kv[0]] = &kv[1] - } - } - - return result -} - -// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect -func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { - p := container.RestartPolicy{} - - if policy == "" { - return p, nil - } - - parts := strings.Split(policy, ":") - - if len(parts) > 2 { - return p, fmt.Errorf("invalid restart policy format") - } - if len(parts) == 2 { - count, err := strconv.Atoi(parts[1]) - if err != nil { - return p, fmt.Errorf("maximum retry count must be an integer") - } - - p.MaximumRetryCount = count - } - - p.Name = parts[0] - - return p, nil -} diff --git a/fn/vendor/github.com/docker/docker/vendor.conf b/fn/vendor/github.com/docker/docker/vendor.conf index bd8fe31af..20460d5cc 100644 --- a/fn/vendor/github.com/docker/docker/vendor.conf +++ b/fn/vendor/github.com/docker/docker/vendor.conf @@ -1,44 +1,48 @@ # the following lines are in sorted order, FYI github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 -github.com/Microsoft/hcsshim v0.5.13 -# TODO: get rid of this fork once PR https://github.com/Microsoft/go-winio/pull/43 is merged -github.com/Microsoft/go-winio 7c7d6b461cb10872c1138a0d7f3acf9a41b5c353 https://github.com/dgageot/go-winio.git +github.com/Microsoft/hcsshim v0.5.25 +github.com/Microsoft/go-winio v0.4.2 github.com/Sirupsen/logrus v0.11.0 -github.com/davecgh/go-spew 6d212800a42e8ab5c146b8ace3490ee17e5225f9 +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git github.com/gorilla/context v1.1 github.com/gorilla/mux v1.1 +github.com/jhowardmsft/opengcs v0.0.7 github.com/kr/pty 5cf931ef8f github.com/mattn/go-shellwords v1.0.3 github.com/tchap/go-patricia v2.2.6 github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 -# forked golang.org/x/net package includes a patch for lazy loading trace templates -golang.org/x/net c427ad74c6d7a814201695e9ffde0c5d400a7674 +golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 -github.com/docker/go-connections e15c02316c12de00874640cd76311849de2aeed5 +github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 +github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 +github.com/pmezard/go-difflib v1.0.0 github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 github.com/imdario/mergo 0.2.1 +golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0 #get libnetwork packages -github.com/docker/libnetwork 5d4e5de2f9962c2de8a7872128e2cc09dfdd99aa +github.com/docker/libnetwork 6426d1e66f33c0b0c8bb135b7ee547447f54d043 github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b -github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37 +github.com/hashicorp/memberlist v0.1.0 +github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372 +github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 -github.com/vishvananda/netlink 1e86b2bee5b6a7d377e4c02bb7f98209d6a7297c +github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969 github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d -github.com/coreos/etcd 824277cb3a577a0e8c829ca9ec557b973fe06d20 +github.com/coreos/etcd ea5389a79f40206170582c1ea076191b8622cb8e https://github.com/aaronlehmann/etcd # for https://github.com/coreos/etcd/pull/7830 github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 github.com/hashicorp/consul v0.5.2 github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 @@ -53,23 +57,20 @@ github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa github.com/pborman/uuid v1.0 -# get desired notary commit, might also need to be updated in Dockerfile -github.com/docker/notary v0.4.2 - -google.golang.org/grpc v1.0.4 -github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f -github.com/docker/go v1.5.1-1-1-gbaf439e -github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c +google.golang.org/grpc v1.3.0 # When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly -github.com/opencontainers/runc 9c2d8d184e5da67c95d601382adf14862e4f2228 https://github.com/docker/runc.git # libcontainer -github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c # specs -github.com/seccomp/libseccomp-golang v0.9.0 +github.com/opencontainers/runc 2d41c047c83e09a6d61d464906feb2a2f3c52aa4 https://github.com/docker/runc +github.com/opencontainers/image-spec f03dbe35d449c54915d235f1a3cf8f585a24babe +github.com/opencontainers/runtime-spec d42f1eb741e6361e858d83fc75aa6893b66292c4 # specs + +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 + # libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) github.com/coreos/go-systemd v4 github.com/godbus/dbus v4.0.0 github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 -github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93 +github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4 # gelf logging driver deps github.com/Graylog2/go-gelf 7029da823dad4ef3a876df61065156acb703b2ea @@ -95,18 +96,17 @@ golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0 google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823 cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525 github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7 -google.golang.org/genproto b3e7c2fb04031add52c4817f53f43757ccbf9c18 - -# native credentials -github.com/docker/docker-credential-helpers v0.5.0 +google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 # containerd -github.com/docker/containerd 9048e5e50717ea4497b757314bad98ea3763c145 +github.com/containerd/containerd 3addd840653146c90a254301d6c3a663c7fd6429 github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 +github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d +github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb # cluster -github.com/docker/swarmkit ae52d9de97b91eee978bc2fe411bc85b33eb82dd -github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2 +github.com/docker/swarmkit 79381d0840be27f8b3f5c667b348a4467d866eeb +github.com/gogo/protobuf v0.4 github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 @@ -129,15 +129,9 @@ github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty # metrics -github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72 +github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 -# composefile -github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 -github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a -github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 -github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d -gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6 +github.com/opencontainers/selinux v1.0.0-rc1 diff --git a/fn/vendor/github.com/docker/docker/volume/drivers/adapter.go b/fn/vendor/github.com/docker/docker/volume/drivers/adapter.go index 62ef7dfe6..304c81bc0 100644 --- a/fn/vendor/github.com/docker/docker/volume/drivers/adapter.go +++ b/fn/vendor/github.com/docker/docker/volume/drivers/adapter.go @@ -4,6 +4,7 @@ import ( "errors" "path/filepath" "strings" + "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/volume" @@ -82,6 +83,7 @@ func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { name: v.Name, driverName: a.Name(), eMount: v.Mountpoint, + createdAt: v.CreatedAt, status: v.Status, baseHostPath: a.baseHostPath, }, nil @@ -100,7 +102,7 @@ func (a *volumeDriverAdapter) getCapabilities() volume.Capability { if err != nil { // `GetCapabilities` is a not a required endpoint. // On error assume it's a local-only driver - logrus.Warnf("Volume driver %s returned an error while trying to query its capabilities, using default capabilties: %v", a.name, err) + logrus.Warnf("Volume driver %s returned an error while trying to query its capabilities, using default capabilities: %v", a.name, err) return volume.Capability{Scope: volume.LocalScope} } @@ -124,13 +126,15 @@ type volumeAdapter struct { name string baseHostPath string driverName string - eMount string // ephemeral host volume path + eMount string // ephemeral host volume path + createdAt time.Time // time the directory was created status map[string]interface{} } type proxyVolume struct { Name string Mountpoint string + CreatedAt time.Time Status map[string]interface{} } @@ -168,6 +172,9 @@ func (a *volumeAdapter) Unmount(id string) error { return err } +func (a *volumeAdapter) CreatedAt() (time.Time, error) { + return a.createdAt, nil +} func (a *volumeAdapter) Status() map[string]interface{} { out := make(map[string]interface{}, len(a.status)) for k, v := range a.status { diff --git a/fn/vendor/github.com/docker/docker/volume/local/local.go b/fn/vendor/github.com/docker/docker/volume/local/local.go index 6631423bb..43ba1e1db 100644 --- a/fn/vendor/github.com/docker/docker/volume/local/local.go +++ b/fn/vendor/github.com/docker/docker/volume/local/local.go @@ -55,10 +55,10 @@ type activeMount struct { // New instantiates a new Root instance with the provided scope. Scope // is the base path that the Root instance uses to store its // volumes. The base path is created here if it does not exist. -func New(scope string, rootUID, rootGID int) (*Root, error) { +func New(scope string, rootIDs idtools.IDPair) (*Root, error) { rootDirectory := filepath.Join(scope, volumesPathName) - if err := idtools.MkdirAllAs(rootDirectory, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(rootDirectory, 0700, rootIDs); err != nil { return nil, err } @@ -66,8 +66,7 @@ func New(scope string, rootUID, rootGID int) (*Root, error) { scope: scope, path: rootDirectory, volumes: make(map[string]*localVolume), - rootUID: rootUID, - rootGID: rootGID, + rootIDs: rootIDs, } dirs, err := ioutil.ReadDir(rootDirectory) @@ -125,8 +124,7 @@ type Root struct { scope string path string volumes map[string]*localVolume - rootUID int - rootGID int + rootIDs idtools.IDPair } // List lists all the volumes @@ -167,7 +165,7 @@ func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error } path := r.DataPath(name) - if err := idtools.MkdirAllAs(path, 0755, r.rootUID, r.rootGID); err != nil { + if err := idtools.MkdirAllAndChown(path, 0755, r.rootIDs); err != nil { if os.IsExist(err) { return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) } diff --git a/fn/vendor/github.com/docker/docker/volume/local/local_test.go b/fn/vendor/github.com/docker/docker/volume/local/local_test.go index f5a519b88..2353391aa 100644 --- a/fn/vendor/github.com/docker/docker/volume/local/local_test.go +++ b/fn/vendor/github.com/docker/docker/volume/local/local_test.go @@ -9,6 +9,7 @@ import ( "strings" "testing" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" ) @@ -40,7 +41,7 @@ func TestRemove(t *testing.T) { } defer os.RemoveAll(rootDir) - r, err := New(rootDir, 0, 0) + r, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } @@ -82,7 +83,7 @@ func TestInitializeWithVolumes(t *testing.T) { } defer os.RemoveAll(rootDir) - r, err := New(rootDir, 0, 0) + r, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } @@ -92,7 +93,7 @@ func TestInitializeWithVolumes(t *testing.T) { t.Fatal(err) } - r, err = New(rootDir, 0, 0) + r, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } @@ -114,7 +115,7 @@ func TestCreate(t *testing.T) { } defer os.RemoveAll(rootDir) - r, err := New(rootDir, 0, 0) + r, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } @@ -151,7 +152,7 @@ func TestCreate(t *testing.T) { } } - r, err = New(rootDir, 0, 0) + r, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } @@ -189,7 +190,7 @@ func TestCreateWithOpts(t *testing.T) { } defer os.RemoveAll(rootDir) - r, err := New(rootDir, 0, 0) + r, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } @@ -270,7 +271,7 @@ func TestCreateWithOpts(t *testing.T) { t.Fatal("expected mount to still be active") } - r, err = New(rootDir, 0, 0) + r, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } @@ -292,7 +293,7 @@ func TestRealodNoOpts(t *testing.T) { } defer os.RemoveAll(rootDir) - r, err := New(rootDir, 0, 0) + r, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } @@ -320,7 +321,7 @@ func TestRealodNoOpts(t *testing.T) { t.Fatal(err) } - r, err = New(rootDir, 0, 0) + r, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } diff --git a/fn/vendor/github.com/docker/docker/volume/local/local_unix.go b/fn/vendor/github.com/docker/docker/volume/local/local_unix.go index fb08862ce..5bba5b706 100644 --- a/fn/vendor/github.com/docker/docker/volume/local/local_unix.go +++ b/fn/vendor/github.com/docker/docker/volume/local/local_unix.go @@ -8,8 +8,11 @@ package local import ( "fmt" "net" + "os" "path/filepath" "strings" + "syscall" + "time" "github.com/pkg/errors" @@ -85,3 +88,12 @@ func (v *localVolume) mount() error { err := mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, mountOpts) return errors.Wrapf(err, "error while mounting volume with options: %s", v.opts) } + +func (v *localVolume) CreatedAt() (time.Time, error) { + fileInfo, err := os.Stat(v.path) + if err != nil { + return time.Time{}, err + } + sec, nsec := fileInfo.Sys().(*syscall.Stat_t).Ctim.Unix() + return time.Unix(sec, nsec), nil +} diff --git a/fn/vendor/github.com/docker/docker/volume/local/local_windows.go b/fn/vendor/github.com/docker/docker/volume/local/local_windows.go index 1bdb368a0..6f5d2223a 100644 --- a/fn/vendor/github.com/docker/docker/volume/local/local_windows.go +++ b/fn/vendor/github.com/docker/docker/volume/local/local_windows.go @@ -5,8 +5,11 @@ package local import ( "fmt" + "os" "path/filepath" "strings" + "syscall" + "time" ) type optsConfig struct{} @@ -32,3 +35,12 @@ func setOpts(v *localVolume, opts map[string]string) error { func (v *localVolume) mount() error { return nil } + +func (v *localVolume) CreatedAt() (time.Time, error) { + fileInfo, err := os.Stat(v.path) + if err != nil { + return time.Time{}, err + } + ft := fileInfo.Sys().(*syscall.Win32FileAttributeData).CreationTime + return time.Unix(0, ft.Nanoseconds()), nil +} diff --git a/fn/vendor/github.com/docker/docker/volume/store/store_test.go b/fn/vendor/github.com/docker/docker/volume/store/store_test.go index c94237ac3..f5f00255a 100644 --- a/fn/vendor/github.com/docker/docker/volume/store/store_test.go +++ b/fn/vendor/github.com/docker/docker/volume/store/store_test.go @@ -212,7 +212,7 @@ func TestDerefMultipleOfSameRef(t *testing.T) { if err != nil { t.Fatal(err) } - + defer os.RemoveAll(dir) s, err := New(dir) if err != nil { t.Fatal(err) diff --git a/fn/vendor/github.com/docker/docker/volume/testutils/testutils.go b/fn/vendor/github.com/docker/docker/volume/testutils/testutils.go index 2dbac02fd..359d92382 100644 --- a/fn/vendor/github.com/docker/docker/volume/testutils/testutils.go +++ b/fn/vendor/github.com/docker/docker/volume/testutils/testutils.go @@ -2,6 +2,7 @@ package testutils import ( "fmt" + "time" "github.com/docker/docker/volume" ) @@ -24,9 +25,12 @@ func (NoopVolume) Mount(_ string) (string, error) { return "noop", nil } // Unmount unmounts the volume from the container func (NoopVolume) Unmount(_ string) error { return nil } -// Status proivdes low-level details about the volume +// Status provides low-level details about the volume func (NoopVolume) Status() map[string]interface{} { return nil } +// CreatedAt provides the time the volume (directory) was created at +func (NoopVolume) CreatedAt() (time.Time, error) { return time.Now(), nil } + // FakeVolume is a fake volume with a random name type FakeVolume struct { name string @@ -53,9 +57,12 @@ func (FakeVolume) Mount(_ string) (string, error) { return "fake", nil } // Unmount unmounts the volume from the container func (FakeVolume) Unmount(_ string) error { return nil } -// Status proivdes low-level details about the volume +// Status provides low-level details about the volume func (FakeVolume) Status() map[string]interface{} { return nil } +// CreatedAt provides the time the volume (directory) was created at +func (FakeVolume) CreatedAt() (time.Time, error) { return time.Now(), nil } + // FakeDriver is a driver that generates fake volumes type FakeDriver struct { name string diff --git a/fn/vendor/github.com/docker/docker/volume/volume.go b/fn/vendor/github.com/docker/docker/volume/volume.go index d73e2d511..8598d4cb8 100644 --- a/fn/vendor/github.com/docker/docker/volume/volume.go +++ b/fn/vendor/github.com/docker/docker/volume/volume.go @@ -6,11 +6,12 @@ import ( "path/filepath" "strings" "syscall" + "time" mounttypes "github.com/docker/docker/api/types/mount" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" ) @@ -64,6 +65,8 @@ type Volume interface { Mount(id string) (string, error) // Unmount unmounts the volume when it is no longer in use. Unmount(id string) error + // CreatedAt returns Volume Creation time + CreatedAt() (time.Time, error) // Status returns low-level status information about a volume Status() map[string]interface{} } @@ -120,22 +123,48 @@ type MountPoint struct { // Sepc is a copy of the API request that created this mount. Spec mounttypes.Mount + + // Track usage of this mountpoint + // Specifically needed for containers which are running and calls to `docker cp` + // because both these actions require mounting the volumes. + active int +} + +// Cleanup frees resources used by the mountpoint +func (m *MountPoint) Cleanup() error { + if m.Volume == nil || m.ID == "" { + return nil + } + + if err := m.Volume.Unmount(m.ID); err != nil { + return errors.Wrapf(err, "error unmounting volume %s", m.Volume.Name()) + } + + m.active-- + if m.active == 0 { + m.ID = "" + } + return nil } // Setup sets up a mount point by either mounting the volume if it is // configured, or creating the source directory if supplied. -func (m *MountPoint) Setup(mountLabel string, rootUID, rootGID int) (path string, err error) { +// The, optional, checkFun parameter allows doing additional checking +// before creating the source directory on the host. +func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.IDPair, checkFun func(m *MountPoint) error) (path string, err error) { defer func() { - if err == nil { - if label.RelabelNeeded(m.Mode) { - if err = label.Relabel(m.Source, mountLabel, label.IsShared(m.Mode)); err != nil { - path = "" - err = errors.Wrapf(err, "error setting label on mount source '%s'", m.Source) - return - } - } + if err != nil || !label.RelabelNeeded(m.Mode) { + return + } + + err = label.Relabel(m.Source, mountLabel, label.IsShared(m.Mode)) + if err == syscall.ENOTSUP { + err = nil + } + if err != nil { + path = "" + err = errors.Wrapf(err, "error setting label on mount source '%s'", m.Source) } - return }() if m.Volume != nil { @@ -147,17 +176,29 @@ func (m *MountPoint) Setup(mountLabel string, rootUID, rootGID int) (path string if err != nil { return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) } + m.ID = id + m.active++ return path, nil } + if len(m.Source) == 0 { return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") } + // system.MkdirAll() produces an error if m.Source exists and is a file (not a directory), if m.Type == mounttypes.TypeBind { + // Before creating the source directory on the host, invoke checkFun if it's not nil. One of + // the use case is to forbid creating the daemon socket as a directory if the daemon is in + // the process of shutting down. + if checkFun != nil { + if err := checkFun(m); err != nil { + return "", err + } + } // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it - if err := idtools.MkdirAllNewAs(m.Source, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(m.Source, 0755, rootIDs); err != nil { if perr, ok := err.(*os.PathError); ok { if perr.Err != syscall.ENOTDIR { return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) @@ -311,10 +352,12 @@ func ParseMountSpec(cfg mounttypes.Mount, options ...func(*validateOpts)) (*Moun } case mounttypes.TypeBind: mp.Source = clean(convertSlash(cfg.Source)) - if cfg.BindOptions != nil { - if len(cfg.BindOptions.Propagation) > 0 { - mp.Propagation = cfg.BindOptions.Propagation - } + if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 { + mp.Propagation = cfg.BindOptions.Propagation + } else { + // If user did not specify a propagation mode, get + // default propagation mode. + mp.Propagation = DefaultPropagationMode } case mounttypes.TypeTmpfs: // NOP diff --git a/fn/vendor/github.com/docker/docker/volume/volume_linux.go b/fn/vendor/github.com/docker/docker/volume/volume_linux.go index d4b4d800b..fdf7b63e4 100644 --- a/fn/vendor/github.com/docker/docker/volume/volume_linux.go +++ b/fn/vendor/github.com/docker/docker/volume/volume_linux.go @@ -26,7 +26,7 @@ func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, e // okay, since API is that way anyways. // we do this by finding the suffix that divides evenly into the - // value, returing the value itself, with no suffix, if it fails. + // value, returning the value itself, with no suffix, if it fails. // // For the most part, we don't enforce any semantic to this values. // The operating system will usually align this and enforce minimum diff --git a/fn/vendor/github.com/docker/docker/volume/volume_test.go b/fn/vendor/github.com/docker/docker/volume/volume_test.go index 426e6e5c1..5c3e0e381 100644 --- a/fn/vendor/github.com/docker/docker/volume/volume_test.go +++ b/fn/vendor/github.com/docker/docker/volume/volume_test.go @@ -229,10 +229,10 @@ func TestParseMountSpec(t *testing.T) { defer os.RemoveAll(testDir) cases := []c{ - {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, - {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true}}, - {mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, - {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, } diff --git a/fn/vendor/github.com/go-resty/resty/.travis.yml b/fn/vendor/github.com/go-resty/resty/.travis.yml index c8c4ba293..8972c4cb5 100644 --- a/fn/vendor/github.com/go-resty/resty/.travis.yml +++ b/fn/vendor/github.com/go-resty/resty/.travis.yml @@ -17,7 +17,8 @@ go: - tip install: - - go get -v ./... + - go get -v -t ./... + - go get -v golang.org/x/tools/cmd/cover script: - go test ./... -coverprofile=coverage.txt -covermode=atomic diff --git a/fn/vendor/github.com/go-resty/resty/README.md b/fn/vendor/github.com/go-resty/resty/README.md index 6865cfbdb..bc6783ebf 100644 --- a/fn/vendor/github.com/go-resty/resty/README.md +++ b/fn/vendor/github.com/go-resty/resty/README.md @@ -38,6 +38,7 @@ Go Resty first released on Sep 15, 2015 then go-resty grew gradually as a very h * Cookies for your request and CookieJar support * SRV Record based request instead of Host URL * Client settings like `Timeout`, `RedirectPolicy`, `Proxy`, `TLSClientConfig`, `Transport`, etc. +* Optionally allows GET request with payload, see [SetAllowGetMethodPayload](https://godoc.org/github.com/go-resty/resty#Client.SetOutputDirectory#Client.SetAllowGetMethodPayload) * resty design * Have client level settings & options and also override at Request level if you want to * Request and Response middlewares @@ -511,6 +512,12 @@ resty.SetRESTMode() resty.SetHTTPMode() ``` +#### Allow GET request with Payload +```go +// Allow GET request with Payload. This is disabled by default. +resty.SetAllowGetMethodPayload(true) +``` + #### Wanna Multiple Clients ```go // Here you go! diff --git a/fn/vendor/github.com/go-resty/resty/client.go b/fn/vendor/github.com/go-resty/resty/client.go index 187d71ba3..f03190f71 100644 --- a/fn/vendor/github.com/go-resty/resty/client.go +++ b/fn/vendor/github.com/go-resty/resty/client.go @@ -71,21 +71,22 @@ var ( // Client type is used for HTTP/RESTful global values // for all request raised from the client type Client struct { - HostURL string - QueryParam url.Values - FormData url.Values - Header http.Header - UserInfo *User - Token string - Cookies []*http.Cookie - Error reflect.Type - Debug bool - DisableWarn bool - Log *log.Logger - RetryCount int - RetryWaitTime time.Duration - RetryMaxWaitTime time.Duration - RetryConditions []RetryConditionFunc + HostURL string + QueryParam url.Values + FormData url.Values + Header http.Header + UserInfo *User + Token string + Cookies []*http.Cookie + Error reflect.Type + Debug bool + DisableWarn bool + AllowGetMethodPayload bool + Log *log.Logger + RetryCount int + RetryWaitTime time.Duration + RetryMaxWaitTime time.Duration + RetryConditions []RetryConditionFunc httpClient *http.Client transport *http.Transport @@ -373,6 +374,15 @@ func (c *Client) SetDisableWarn(d bool) *Client { return c } +// SetAllowGetMethodPayload method allows the GET method with payload on `go-resty` client. +// For example: go-resty allows the user sends request with a payload on HTTP GET method. +// resty.SetAllowGetMethodPayload(true) +// +func (c *Client) SetAllowGetMethodPayload(a bool) *Client { + c.AllowGetMethodPayload = a + return c +} + // SetLogger method sets given writer for logging go-resty request and response details. // Default is os.Stderr // file, _ := os.OpenFile("/Users/jeeva/go-resty.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) @@ -851,8 +861,8 @@ func getPointer(v interface{}) interface{} { return reflect.New(vv.Type()).Interface() } -func isPayloadSupported(m string) bool { - return (m == MethodPost || m == MethodPut || m == MethodDelete || m == MethodPatch) +func isPayloadSupported(m string, allowMethodGet bool) bool { + return (m == MethodPost || m == MethodPut || m == MethodDelete || m == MethodPatch || (allowMethodGet && m == MethodGet)) } func typeOf(i interface{}) reflect.Type { diff --git a/fn/vendor/github.com/go-resty/resty/client_test.go b/fn/vendor/github.com/go-resty/resty/client_test.go index 588a11c50..b408e613a 100644 --- a/fn/vendor/github.com/go-resty/resty/client_test.go +++ b/fn/vendor/github.com/go-resty/resty/client_test.go @@ -288,7 +288,7 @@ func TestClientOptions(t *testing.T) { SetRetryCount(3) assertEqual(t, 3, DefaultClient.RetryCount) - + rwt := time.Duration(1000) * time.Millisecond SetRetryWaitTime(rwt) assertEqual(t, rwt, DefaultClient.RetryWaitTime) @@ -324,6 +324,9 @@ func TestClientOptions(t *testing.T) { SetDebug(true) assertEqual(t, DefaultClient.Debug, true) + SetAllowGetMethodPayload(true) + assertEqual(t, DefaultClient.AllowGetMethodPayload, true) + SetScheme("http") assertEqual(t, DefaultClient.scheme, "http") @@ -344,3 +347,19 @@ func TestClientPreRequestHook(t *testing.T) { return nil }) } + +func TestClientAllowsGetMethodPayload(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + c := dc() + c.SetAllowGetMethodPayload(true) + c.SetPreRequestHook(func(*Client, *Request) error { return nil }) // for coverage + + payload := "test-payload" + resp, err := c.R().SetBody(payload).Get(ts.URL + "/get-method-payload-test") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, payload, resp.String()) +} diff --git a/fn/vendor/github.com/go-resty/resty/default.go b/fn/vendor/github.com/go-resty/resty/default.go index 2afd0f3d2..7677dfaf5 100644 --- a/fn/vendor/github.com/go-resty/resty/default.go +++ b/fn/vendor/github.com/go-resty/resty/default.go @@ -149,6 +149,11 @@ func SetDebug(d bool) *Client { return DefaultClient.SetDebug(d) } +// SetAllowGetMethodPayload method allows the GET method with payload. See `Client.SetAllowGetMethodPayload` for more information. +func SetAllowGetMethodPayload(a bool) *Client { + return DefaultClient.SetAllowGetMethodPayload(a) +} + // SetRetryCount method sets the retry count. See `Client.SetRetryCount` for more information. func SetRetryCount(count int) *Client { return DefaultClient.SetRetryCount(count) diff --git a/fn/vendor/github.com/go-resty/resty/middleware.go b/fn/vendor/github.com/go-resty/resty/middleware.go index 99fd6ec2c..536df8628 100644 --- a/fn/vendor/github.com/go-resty/resty/middleware.go +++ b/fn/vendor/github.com/go-resty/resty/middleware.go @@ -91,8 +91,7 @@ func parseRequestHeader(c *Client, r *Request) error { } func parseRequestBody(c *Client, r *Request) (err error) { - if isPayloadSupported(r.Method) { - + if isPayloadSupported(r.Method, c.AllowGetMethodPayload) { // Handling Multipart if r.isMultiPart && !(r.Method == MethodPatch) { if err = handleMultipart(c, r); err != nil { diff --git a/fn/vendor/github.com/go-resty/resty/request.go b/fn/vendor/github.com/go-resty/resty/request.go index 2cb62e14c..7335cbccb 100644 --- a/fn/vendor/github.com/go-resty/resty/request.go +++ b/fn/vendor/github.com/go-resty/resty/request.go @@ -439,7 +439,7 @@ func (r *Request) Execute(method, url string) (*Response, error) { func (r *Request) fmtBodyString() (body string) { body = "***** NO CONTENT *****" - if isPayloadSupported(r.Method) { + if isPayloadSupported(r.Method, r.client.AllowGetMethodPayload) { // multipart or form-data if r.isMultiPart || r.isFormData { body = string(r.bodyBuf.Bytes()) diff --git a/fn/vendor/github.com/go-resty/resty/resty_test.go b/fn/vendor/github.com/go-resty/resty/resty_test.go index df3bb8f2f..119062e30 100644 --- a/fn/vendor/github.com/go-resty/resty/resty_test.go +++ b/fn/vendor/github.com/go-resty/resty/resty_test.go @@ -1187,6 +1187,12 @@ func createGetServer(t *testing.T) *httptest.Server { w.Header().Set("Content-Type", "image/png") w.Header().Set("Content-Length", strconv.Itoa(len(fileBytes))) _, _ = w.Write(fileBytes) + } else if r.URL.Path == "/get-method-payload-test" { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Error: could not read get body: %s", err.Error()) + } + _, _ = w.Write(body) } } }) diff --git a/fn/vendor/golang.org/x/net/bpf/asm.go b/fn/vendor/golang.org/x/net/bpf/asm.go new file mode 100644 index 000000000..15e21b181 --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/asm.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// Assemble converts insts into raw instructions suitable for loading +// into a BPF virtual machine. +// +// Currently, no optimization is attempted, the assembled program flow +// is exactly as provided. +func Assemble(insts []Instruction) ([]RawInstruction, error) { + ret := make([]RawInstruction, len(insts)) + var err error + for i, inst := range insts { + ret[i], err = inst.Assemble() + if err != nil { + return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) + } + } + return ret, nil +} + +// Disassemble attempts to parse raw back into +// Instructions. Unrecognized RawInstructions are assumed to be an +// extension not implemented by this package, and are passed through +// unchanged to the output. The allDecoded value reports whether insts +// contains no RawInstructions. +func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { + insts = make([]Instruction, len(raw)) + allDecoded = true + for i, r := range raw { + insts[i] = r.Disassemble() + if _, ok := insts[i].(RawInstruction); ok { + allDecoded = false + } + } + return insts, allDecoded +} diff --git a/fn/vendor/golang.org/x/net/bpf/constants.go b/fn/vendor/golang.org/x/net/bpf/constants.go new file mode 100644 index 000000000..b89ca3523 --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/constants.go @@ -0,0 +1,218 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Register is a register of the BPF virtual machine. +type Register uint16 + +const ( + // RegA is the accumulator register. RegA is always the + // destination register of ALU operations. + RegA Register = iota + // RegX is the indirection register, used by LoadIndirect + // operations. + RegX +) + +// An ALUOp is an arithmetic or logic operation. +type ALUOp uint16 + +// ALU binary operation types. +const ( + ALUOpAdd ALUOp = iota << 4 + ALUOpSub + ALUOpMul + ALUOpDiv + ALUOpOr + ALUOpAnd + ALUOpShiftLeft + ALUOpShiftRight + aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. + ALUOpMod + ALUOpXor +) + +// A JumpTest is a comparison operator used in conditional jumps. +type JumpTest uint16 + +// Supported operators for conditional jumps. +const ( + // K == A + JumpEqual JumpTest = iota + // K != A + JumpNotEqual + // K > A + JumpGreaterThan + // K < A + JumpLessThan + // K >= A + JumpGreaterOrEqual + // K <= A + JumpLessOrEqual + // K & A != 0 + JumpBitsSet + // K & A == 0 + JumpBitsNotSet +) + +// An Extension is a function call provided by the kernel that +// performs advanced operations that are expensive or impossible +// within the BPF virtual machine. +// +// Extensions are only implemented by the Linux kernel. +// +// TODO: should we prune this list? Some of these extensions seem +// either broken or near-impossible to use correctly, whereas other +// (len, random, ifindex) are quite useful. +type Extension int + +// Extension functions available in the Linux kernel. +const ( + // extOffset is the negative maximum number of instructions used + // to load instructions by overloading the K argument. + extOffset = -0x1000 + // ExtLen returns the length of the packet. + ExtLen Extension = 1 + // ExtProto returns the packet's L3 protocol type. + ExtProto Extension = 0 + // ExtType returns the packet's type (skb->pkt_type in the kernel) + // + // TODO: better documentation. How nice an API do we want to + // provide for these esoteric extensions? + ExtType Extension = 4 + // ExtPayloadOffset returns the offset of the packet payload, or + // the first protocol header that the kernel does not know how to + // parse. + ExtPayloadOffset Extension = 52 + // ExtInterfaceIndex returns the index of the interface on which + // the packet was received. + ExtInterfaceIndex Extension = 8 + // ExtNetlinkAttr returns the netlink attribute of type X at + // offset A. + ExtNetlinkAttr Extension = 12 + // ExtNetlinkAttrNested returns the nested netlink attribute of + // type X at offset A. + ExtNetlinkAttrNested Extension = 16 + // ExtMark returns the packet's mark value. + ExtMark Extension = 20 + // ExtQueue returns the packet's assigned hardware queue. + ExtQueue Extension = 24 + // ExtLinkLayerType returns the packet's hardware address type + // (e.g. Ethernet, Infiniband). + ExtLinkLayerType Extension = 28 + // ExtRXHash returns the packets receive hash. + // + // TODO: figure out what this rxhash actually is. + ExtRXHash Extension = 32 + // ExtCPUID returns the ID of the CPU processing the current + // packet. + ExtCPUID Extension = 36 + // ExtVLANTag returns the packet's VLAN tag. + ExtVLANTag Extension = 44 + // ExtVLANTagPresent returns non-zero if the packet has a VLAN + // tag. + // + // TODO: I think this might be a lie: it reads bit 0x1000 of the + // VLAN header, which changed meaning in recent revisions of the + // spec - this extension may now return meaningless information. + ExtVLANTagPresent Extension = 48 + // ExtVLANProto returns 0x8100 if the frame has a VLAN header, + // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some + // other value if no VLAN information is present. + ExtVLANProto Extension = 60 + // ExtRand returns a uniformly random uint32. + ExtRand Extension = 56 +) + +// The following gives names to various bit patterns used in opcode construction. + +const ( + opMaskCls uint16 = 0x7 + // opClsLoad masks + opMaskLoadDest = 0x01 + opMaskLoadWidth = 0x18 + opMaskLoadMode = 0xe0 + // opClsALU + opMaskOperandSrc = 0x08 + opMaskOperator = 0xf0 + // opClsJump + opMaskJumpConst = 0x0f + opMaskJumpCond = 0xf0 +) + +const ( + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsLoadA uint16 = iota + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | + // +---------------+-----------------+---+---+---+ + opClsLoadX + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | + // +---+---+---+---+---+---+---+---+ + opClsStoreA + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | + // +---+---+---+---+---+---+---+---+ + opClsStoreX + // +---------------+-----------------+---+---+---+ + // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsALU + // +-----------------------------+---+---+---+---+ + // | TestOperator (4b) | 0 | 1 | 0 | 1 | + // +-----------------------------+---+---+---+---+ + opClsJump + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | + // +---+-------------------------+---+---+---+---+ + opClsReturn + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | + // +---+-------------------------+---+---+---+---+ + opClsMisc +) + +const ( + opAddrModeImmediate uint16 = iota << 5 + opAddrModeAbsolute + opAddrModeIndirect + opAddrModeScratch + opAddrModePacketLen // actually an extension, not an addressing mode. + opAddrModeMemShift +) + +const ( + opLoadWidth4 uint16 = iota << 3 + opLoadWidth2 + opLoadWidth1 +) + +// Operator defined by ALUOp* + +const ( + opALUSrcConstant uint16 = iota << 3 + opALUSrcX +) + +const ( + opJumpAlways = iota << 4 + opJumpEqual + opJumpGT + opJumpGE + opJumpSet +) + +const ( + opRetSrcConstant uint16 = iota << 4 + opRetSrcA +) + +const ( + opMiscTAX = 0x00 + opMiscTXA = 0x80 +) diff --git a/fn/vendor/golang.org/x/net/bpf/doc.go b/fn/vendor/golang.org/x/net/bpf/doc.go new file mode 100644 index 000000000..ae62feb53 --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/doc.go @@ -0,0 +1,82 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package bpf implements marshaling and unmarshaling of programs for the +Berkeley Packet Filter virtual machine, and provides a Go implementation +of the virtual machine. + +BPF's main use is to specify a packet filter for network taps, so that +the kernel doesn't have to expensively copy every packet it sees to +userspace. However, it's been repurposed to other areas where running +user code in-kernel is needed. For example, Linux's seccomp uses BPF +to apply security policies to system calls. For simplicity, this +documentation refers only to packets, but other uses of BPF have their +own data payloads. + +BPF programs run in a restricted virtual machine. It has almost no +access to kernel functions, and while conditional branches are +allowed, they can only jump forwards, to guarantee that there are no +infinite loops. + +The virtual machine + +The BPF VM is an accumulator machine. Its main register, called +register A, is an implicit source and destination in all arithmetic +and logic operations. The machine also has 16 scratch registers for +temporary storage, and an indirection register (register X) for +indirect memory access. All registers are 32 bits wide. + +Each run of a BPF program is given one packet, which is placed in the +VM's read-only "main memory". LoadAbsolute and LoadIndirect +instructions can fetch up to 32 bits at a time into register A for +examination. + +The goal of a BPF program is to produce and return a verdict (uint32), +which tells the kernel what to do with the packet. In the context of +packet filtering, the returned value is the number of bytes of the +packet to forward to userspace, or 0 to ignore the packet. Other +contexts like seccomp define their own return values. + +In order to simplify programs, attempts to read past the end of the +packet terminate the program execution with a verdict of 0 (ignore +packet). This means that the vast majority of BPF programs don't need +to do any explicit bounds checking. + +In addition to the bytes of the packet, some BPF programs have access +to extensions, which are essentially calls to kernel utility +functions. Currently, the only extensions supported by this package +are the Linux packet filter extensions. + +Examples + +This packet filter selects all ARP packets. + + bpf.Assemble([]bpf.Instruction{ + // Load "EtherType" field from the ethernet header. + bpf.LoadAbsolute{Off: 12, Size: 2}, + // Skip over the next instruction if EtherType is not ARP. + bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, + // Verdict is "send up to 4k of the packet to userspace." + bpf.RetConstant{Val: 4096}, + // Verdict is "ignore packet." + bpf.RetConstant{Val: 0}, + }) + +This packet filter captures a random 1% sample of traffic. + + bpf.Assemble([]bpf.Instruction{ + // Get a 32-bit random number from the Linux kernel. + bpf.LoadExtension{Num: bpf.ExtRand}, + // 1% dice roll? + bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, + // Capture. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + +*/ +package bpf // import "golang.org/x/net/bpf" diff --git a/fn/vendor/golang.org/x/net/bpf/instructions.go b/fn/vendor/golang.org/x/net/bpf/instructions.go new file mode 100644 index 000000000..3b4fd0891 --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/instructions.go @@ -0,0 +1,704 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// An Instruction is one instruction executed by the BPF virtual +// machine. +type Instruction interface { + // Assemble assembles the Instruction into a RawInstruction. + Assemble() (RawInstruction, error) +} + +// A RawInstruction is a raw BPF virtual machine instruction. +type RawInstruction struct { + // Operation to execute. + Op uint16 + // For conditional jump instructions, the number of instructions + // to skip if the condition is true/false. + Jt uint8 + Jf uint8 + // Constant parameter. The meaning depends on the Op. + K uint32 +} + +// Assemble implements the Instruction Assemble method. +func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } + +// Disassemble parses ri into an Instruction and returns it. If ri is +// not recognized by this package, ri itself is returned. +func (ri RawInstruction) Disassemble() Instruction { + switch ri.Op & opMaskCls { + case opClsLoadA, opClsLoadX: + reg := Register(ri.Op & opMaskLoadDest) + sz := 0 + switch ri.Op & opMaskLoadWidth { + case opLoadWidth4: + sz = 4 + case opLoadWidth2: + sz = 2 + case opLoadWidth1: + sz = 1 + default: + return ri + } + switch ri.Op & opMaskLoadMode { + case opAddrModeImmediate: + if sz != 4 { + return ri + } + return LoadConstant{Dst: reg, Val: ri.K} + case opAddrModeScratch: + if sz != 4 || ri.K > 15 { + return ri + } + return LoadScratch{Dst: reg, N: int(ri.K)} + case opAddrModeAbsolute: + if ri.K > extOffset+0xffffffff { + return LoadExtension{Num: Extension(-extOffset + ri.K)} + } + return LoadAbsolute{Size: sz, Off: ri.K} + case opAddrModeIndirect: + return LoadIndirect{Size: sz, Off: ri.K} + case opAddrModePacketLen: + if sz != 4 { + return ri + } + return LoadExtension{Num: ExtLen} + case opAddrModeMemShift: + return LoadMemShift{Off: ri.K} + default: + return ri + } + + case opClsStoreA: + if ri.Op != opClsStoreA || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegA, N: int(ri.K)} + + case opClsStoreX: + if ri.Op != opClsStoreX || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegX, N: int(ri.K)} + + case opClsALU: + switch op := ALUOp(ri.Op & opMaskOperator); op { + case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: + if ri.Op&opMaskOperandSrc != 0 { + return ALUOpX{Op: op} + } + return ALUOpConstant{Op: op, Val: ri.K} + case aluOpNeg: + return NegateA{} + default: + return ri + } + + case opClsJump: + if ri.Op&opMaskJumpConst != opClsJump { + return ri + } + switch ri.Op & opMaskJumpCond { + case opJumpAlways: + return Jump{Skip: ri.K} + case opJumpEqual: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpNotEqual, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGT: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpLessOrEqual, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpGreaterThan, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGE: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpLessThan, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpGreaterOrEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpSet: + return JumpIf{ + Cond: JumpBitsSet, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + default: + return ri + } + + case opClsReturn: + switch ri.Op { + case opClsReturn | opRetSrcA: + return RetA{} + case opClsReturn | opRetSrcConstant: + return RetConstant{Val: ri.K} + default: + return ri + } + + case opClsMisc: + switch ri.Op { + case opClsMisc | opMiscTAX: + return TAX{} + case opClsMisc | opMiscTXA: + return TXA{} + default: + return ri + } + + default: + panic("unreachable") // switch is exhaustive on the bit pattern + } +} + +// LoadConstant loads Val into register Dst. +type LoadConstant struct { + Dst Register + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadConstant) Assemble() (RawInstruction, error) { + return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) +} + +// String returns the the instruction in assembler notation. +func (a LoadConstant) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld #%d", a.Val) + case RegX: + return fmt.Sprintf("ldx #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadScratch loads scratch[N] into register Dst. +type LoadScratch struct { + Dst Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) +} + +// String returns the the instruction in assembler notation. +func (a LoadScratch) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld M[%d]", a.N) + case RegX: + return fmt.Sprintf("ldx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadAbsolute loads packet[Off:Off+Size] as an integer value into +// register A. +type LoadAbsolute struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadAbsolute) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) +} + +// String returns the the instruction in assembler notation. +func (a LoadAbsolute) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [%d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [%d]", a.Off) + case 4: // word + if a.Off > extOffset+0xffffffff { + return LoadExtension{Num: Extension(a.Off + 0x1000)}.String() + } + return fmt.Sprintf("ld [%d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value +// into register A. +type LoadIndirect struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadIndirect) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) +} + +// String returns the the instruction in assembler notation. +func (a LoadIndirect) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [x + %d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [x + %d]", a.Off) + case 4: // word + return fmt.Sprintf("ld [x + %d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] +// by 4 and stores the result in register X. +// +// This instruction is mainly useful to load into X the length of an +// IPv4 packet header in a single instruction, rather than have to do +// the arithmetic on the header's first byte by hand. +type LoadMemShift struct { + Off uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadMemShift) Assemble() (RawInstruction, error) { + return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) +} + +// String returns the the instruction in assembler notation. +func (a LoadMemShift) String() string { + return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) +} + +// LoadExtension invokes a linux-specific extension and stores the +// result in register A. +type LoadExtension struct { + Num Extension +} + +// Assemble implements the Instruction Assemble method. +func (a LoadExtension) Assemble() (RawInstruction, error) { + if a.Num == ExtLen { + return assembleLoad(RegA, 4, opAddrModePacketLen, 0) + } + return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) +} + +// String returns the the instruction in assembler notation. +func (a LoadExtension) String() string { + switch a.Num { + case ExtLen: + return "ld #len" + case ExtProto: + return "ld #proto" + case ExtType: + return "ld #type" + case ExtPayloadOffset: + return "ld #poff" + case ExtInterfaceIndex: + return "ld #ifidx" + case ExtNetlinkAttr: + return "ld #nla" + case ExtNetlinkAttrNested: + return "ld #nlan" + case ExtMark: + return "ld #mark" + case ExtQueue: + return "ld #queue" + case ExtLinkLayerType: + return "ld #hatype" + case ExtRXHash: + return "ld #rxhash" + case ExtCPUID: + return "ld #cpu" + case ExtVLANTag: + return "ld #vlan_tci" + case ExtVLANTagPresent: + return "ld #vlan_avail" + case ExtVLANProto: + return "ld #vlan_tpid" + case ExtRand: + return "ld #rand" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// StoreScratch stores register Src into scratch[N]. +type StoreScratch struct { + Src Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a StoreScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + var op uint16 + switch a.Src { + case RegA: + op = opClsStoreA + case RegX: + op = opClsStoreX + default: + return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) + } + + return RawInstruction{ + Op: op, + K: uint32(a.N), + }, nil +} + +// String returns the the instruction in assembler notation. +func (a StoreScratch) String() string { + switch a.Src { + case RegA: + return fmt.Sprintf("st M[%d]", a.N) + case RegX: + return fmt.Sprintf("stx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpConstant executes A = A Val. +type ALUOpConstant struct { + Op ALUOp + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcConstant | uint16(a.Op), + K: a.Val, + }, nil +} + +// String returns the the instruction in assembler notation. +func (a ALUOpConstant) String() string { + switch a.Op { + case ALUOpAdd: + return fmt.Sprintf("add #%d", a.Val) + case ALUOpSub: + return fmt.Sprintf("sub #%d", a.Val) + case ALUOpMul: + return fmt.Sprintf("mul #%d", a.Val) + case ALUOpDiv: + return fmt.Sprintf("div #%d", a.Val) + case ALUOpMod: + return fmt.Sprintf("mod #%d", a.Val) + case ALUOpAnd: + return fmt.Sprintf("and #%d", a.Val) + case ALUOpOr: + return fmt.Sprintf("or #%d", a.Val) + case ALUOpXor: + return fmt.Sprintf("xor #%d", a.Val) + case ALUOpShiftLeft: + return fmt.Sprintf("lsh #%d", a.Val) + case ALUOpShiftRight: + return fmt.Sprintf("rsh #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpX executes A = A X +type ALUOpX struct { + Op ALUOp +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcX | uint16(a.Op), + }, nil +} + +// String returns the the instruction in assembler notation. +func (a ALUOpX) String() string { + switch a.Op { + case ALUOpAdd: + return "add x" + case ALUOpSub: + return "sub x" + case ALUOpMul: + return "mul x" + case ALUOpDiv: + return "div x" + case ALUOpMod: + return "mod x" + case ALUOpAnd: + return "and x" + case ALUOpOr: + return "or x" + case ALUOpXor: + return "xor x" + case ALUOpShiftLeft: + return "lsh x" + case ALUOpShiftRight: + return "rsh x" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// NegateA executes A = -A. +type NegateA struct{} + +// Assemble implements the Instruction Assemble method. +func (a NegateA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(aluOpNeg), + }, nil +} + +// String returns the the instruction in assembler notation. +func (a NegateA) String() string { + return fmt.Sprintf("neg") +} + +// Jump skips the following Skip instructions in the program. +type Jump struct { + Skip uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a Jump) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsJump | opJumpAlways, + K: a.Skip, + }, nil +} + +// String returns the the instruction in assembler notation. +func (a Jump) String() string { + return fmt.Sprintf("ja %d", a.Skip) +} + +// JumpIf skips the following Skip instructions in the program if A +// Val is true. +type JumpIf struct { + Cond JumpTest + Val uint32 + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIf) Assemble() (RawInstruction, error) { + var ( + cond uint16 + flip bool + ) + switch a.Cond { + case JumpEqual: + cond = opJumpEqual + case JumpNotEqual: + cond, flip = opJumpEqual, true + case JumpGreaterThan: + cond = opJumpGT + case JumpLessThan: + cond, flip = opJumpGE, true + case JumpGreaterOrEqual: + cond = opJumpGE + case JumpLessOrEqual: + cond, flip = opJumpGT, true + case JumpBitsSet: + cond = opJumpSet + case JumpBitsNotSet: + cond, flip = opJumpSet, true + default: + return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond) + } + jt, jf := a.SkipTrue, a.SkipFalse + if flip { + jt, jf = jf, jt + } + return RawInstruction{ + Op: opClsJump | cond, + Jt: jt, + Jf: jf, + K: a.Val, + }, nil +} + +// String returns the the instruction in assembler notation. +func (a JumpIf) String() string { + switch a.Cond { + // K == A + case JumpEqual: + return conditionalJump(a, "jeq", "jneq") + // K != A + case JumpNotEqual: + return fmt.Sprintf("jneq #%d,%d", a.Val, a.SkipTrue) + // K > A + case JumpGreaterThan: + return conditionalJump(a, "jgt", "jle") + // K < A + case JumpLessThan: + return fmt.Sprintf("jlt #%d,%d", a.Val, a.SkipTrue) + // K >= A + case JumpGreaterOrEqual: + return conditionalJump(a, "jge", "jlt") + // K <= A + case JumpLessOrEqual: + return fmt.Sprintf("jle #%d,%d", a.Val, a.SkipTrue) + // K & A != 0 + case JumpBitsSet: + if a.SkipFalse > 0 { + return fmt.Sprintf("jset #%d,%d,%d", a.Val, a.SkipTrue, a.SkipFalse) + } + return fmt.Sprintf("jset #%d,%d", a.Val, a.SkipTrue) + // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips + case JumpBitsNotSet: + return JumpIf{Cond: JumpBitsSet, SkipTrue: a.SkipFalse, SkipFalse: a.SkipTrue, Val: a.Val}.String() + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +func conditionalJump(inst JumpIf, positiveJump, negativeJump string) string { + if inst.SkipTrue > 0 { + if inst.SkipFalse > 0 { + return fmt.Sprintf("%s #%d,%d,%d", positiveJump, inst.Val, inst.SkipTrue, inst.SkipFalse) + } + return fmt.Sprintf("%s #%d,%d", positiveJump, inst.Val, inst.SkipTrue) + } + return fmt.Sprintf("%s #%d,%d", negativeJump, inst.Val, inst.SkipFalse) +} + +// RetA exits the BPF program, returning the value of register A. +type RetA struct{} + +// Assemble implements the Instruction Assemble method. +func (a RetA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcA, + }, nil +} + +// String returns the the instruction in assembler notation. +func (a RetA) String() string { + return fmt.Sprintf("ret a") +} + +// RetConstant exits the BPF program, returning a constant value. +type RetConstant struct { + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a RetConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcConstant, + K: a.Val, + }, nil +} + +// String returns the the instruction in assembler notation. +func (a RetConstant) String() string { + return fmt.Sprintf("ret #%d", a.Val) +} + +// TXA copies the value of register X to register A. +type TXA struct{} + +// Assemble implements the Instruction Assemble method. +func (a TXA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTXA, + }, nil +} + +// String returns the the instruction in assembler notation. +func (a TXA) String() string { + return fmt.Sprintf("txa") +} + +// TAX copies the value of register A to register X. +type TAX struct{} + +// Assemble implements the Instruction Assemble method. +func (a TAX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTAX, + }, nil +} + +// String returns the the instruction in assembler notation. +func (a TAX) String() string { + return fmt.Sprintf("tax") +} + +func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { + var ( + cls uint16 + sz uint16 + ) + switch dst { + case RegA: + cls = opClsLoadA + case RegX: + cls = opClsLoadX + default: + return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) + } + switch loadSize { + case 1: + sz = opLoadWidth1 + case 2: + sz = opLoadWidth2 + case 4: + sz = opLoadWidth4 + default: + return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) + } + return RawInstruction{ + Op: cls | sz | mode, + K: k, + }, nil +} diff --git a/fn/vendor/golang.org/x/net/bpf/instructions_test.go b/fn/vendor/golang.org/x/net/bpf/instructions_test.go new file mode 100644 index 000000000..dde474aba --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/instructions_test.go @@ -0,0 +1,525 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "fmt" + "io/ioutil" + "reflect" + "strconv" + "strings" + "testing" +) + +// This is a direct translation of the program in +// testdata/all_instructions.txt. +var allInstructions = []Instruction{ + LoadConstant{Dst: RegA, Val: 42}, + LoadConstant{Dst: RegX, Val: 42}, + + LoadScratch{Dst: RegA, N: 3}, + LoadScratch{Dst: RegX, N: 3}, + + LoadAbsolute{Off: 42, Size: 1}, + LoadAbsolute{Off: 42, Size: 2}, + LoadAbsolute{Off: 42, Size: 4}, + + LoadIndirect{Off: 42, Size: 1}, + LoadIndirect{Off: 42, Size: 2}, + LoadIndirect{Off: 42, Size: 4}, + + LoadMemShift{Off: 42}, + + LoadExtension{Num: ExtLen}, + LoadExtension{Num: ExtProto}, + LoadExtension{Num: ExtType}, + LoadExtension{Num: ExtRand}, + + StoreScratch{Src: RegA, N: 3}, + StoreScratch{Src: RegX, N: 3}, + + ALUOpConstant{Op: ALUOpAdd, Val: 42}, + ALUOpConstant{Op: ALUOpSub, Val: 42}, + ALUOpConstant{Op: ALUOpMul, Val: 42}, + ALUOpConstant{Op: ALUOpDiv, Val: 42}, + ALUOpConstant{Op: ALUOpOr, Val: 42}, + ALUOpConstant{Op: ALUOpAnd, Val: 42}, + ALUOpConstant{Op: ALUOpShiftLeft, Val: 42}, + ALUOpConstant{Op: ALUOpShiftRight, Val: 42}, + ALUOpConstant{Op: ALUOpMod, Val: 42}, + ALUOpConstant{Op: ALUOpXor, Val: 42}, + + ALUOpX{Op: ALUOpAdd}, + ALUOpX{Op: ALUOpSub}, + ALUOpX{Op: ALUOpMul}, + ALUOpX{Op: ALUOpDiv}, + ALUOpX{Op: ALUOpOr}, + ALUOpX{Op: ALUOpAnd}, + ALUOpX{Op: ALUOpShiftLeft}, + ALUOpX{Op: ALUOpShiftRight}, + ALUOpX{Op: ALUOpMod}, + ALUOpX{Op: ALUOpXor}, + + NegateA{}, + + Jump{Skip: 10}, + JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9}, + JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8}, + JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7}, + JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6}, + JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5}, + JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4}, + JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + + TAX{}, + TXA{}, + + RetA{}, + RetConstant{Val: 42}, +} +var allInstructionsExpected = "testdata/all_instructions.bpf" + +// Check that we produce the same output as the canonical bpf_asm +// linux kernel tool. +func TestInterop(t *testing.T) { + out, err := Assemble(allInstructions) + if err != nil { + t.Fatalf("assembly of allInstructions program failed: %s", err) + } + t.Logf("Assembled program is %d instructions long", len(out)) + + bs, err := ioutil.ReadFile(allInstructionsExpected) + if err != nil { + t.Fatalf("reading %s: %s", allInstructionsExpected, err) + } + // First statement is the number of statements, last statement is + // empty. We just ignore both and rely on slice length. + stmts := strings.Split(string(bs), ",") + if len(stmts)-2 != len(out) { + t.Fatalf("test program lengths don't match: %s has %d, Go implementation has %d", allInstructionsExpected, len(stmts)-2, len(allInstructions)) + } + + for i, stmt := range stmts[1 : len(stmts)-2] { + nums := strings.Split(stmt, " ") + if len(nums) != 4 { + t.Fatalf("malformed instruction %d in %s: %s", i+1, allInstructionsExpected, stmt) + } + + actual := out[i] + + op, err := strconv.ParseUint(nums[0], 10, 16) + if err != nil { + t.Fatalf("malformed opcode %s in instruction %d of %s", nums[0], i+1, allInstructionsExpected) + } + if actual.Op != uint16(op) { + t.Errorf("opcode mismatch on instruction %d (%#v): got 0x%02x, want 0x%02x", i+1, allInstructions[i], actual.Op, op) + } + + jt, err := strconv.ParseUint(nums[1], 10, 8) + if err != nil { + t.Fatalf("malformed jt offset %s in instruction %d of %s", nums[1], i+1, allInstructionsExpected) + } + if actual.Jt != uint8(jt) { + t.Errorf("jt mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jt, jt) + } + + jf, err := strconv.ParseUint(nums[2], 10, 8) + if err != nil { + t.Fatalf("malformed jf offset %s in instruction %d of %s", nums[2], i+1, allInstructionsExpected) + } + if actual.Jf != uint8(jf) { + t.Errorf("jf mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jf, jf) + } + + k, err := strconv.ParseUint(nums[3], 10, 32) + if err != nil { + t.Fatalf("malformed constant %s in instruction %d of %s", nums[3], i+1, allInstructionsExpected) + } + if actual.K != uint32(k) { + t.Errorf("constant mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.K, k) + } + } +} + +// Check that assembly and disassembly match each other. +func TestAsmDisasm(t *testing.T) { + prog1, err := Assemble(allInstructions) + if err != nil { + t.Fatalf("assembly of allInstructions program failed: %s", err) + } + t.Logf("Assembled program is %d instructions long", len(prog1)) + + got, allDecoded := Disassemble(prog1) + if !allDecoded { + t.Errorf("Disassemble(Assemble(allInstructions)) produced unrecognized instructions:") + for i, inst := range got { + if r, ok := inst.(RawInstruction); ok { + t.Logf(" insn %d, %#v --> %#v", i+1, allInstructions[i], r) + } + } + } + + if len(allInstructions) != len(got) { + t.Fatalf("disassembly changed program size: %d insns before, %d insns after", len(allInstructions), len(got)) + } + if !reflect.DeepEqual(allInstructions, got) { + t.Errorf("program mutated by disassembly:") + for i := range got { + if !reflect.DeepEqual(allInstructions[i], got[i]) { + t.Logf(" insn %d, s: %#v, p1: %#v, got: %#v", i+1, allInstructions[i], prog1[i], got[i]) + } + } + } +} + +type InvalidInstruction struct{} + +func (a InvalidInstruction) Assemble() (RawInstruction, error) { + return RawInstruction{}, fmt.Errorf("Invalid Instruction") +} + +func (a InvalidInstruction) String() string { + return fmt.Sprintf("unknown instruction: %#v", a) +} + +func TestString(t *testing.T) { + testCases := []struct { + instruction Instruction + assembler string + }{ + { + instruction: LoadConstant{Dst: RegA, Val: 42}, + assembler: "ld #42", + }, + { + instruction: LoadConstant{Dst: RegX, Val: 42}, + assembler: "ldx #42", + }, + { + instruction: LoadConstant{Dst: 0xffff, Val: 42}, + assembler: "unknown instruction: bpf.LoadConstant{Dst:0xffff, Val:0x2a}", + }, + { + instruction: LoadScratch{Dst: RegA, N: 3}, + assembler: "ld M[3]", + }, + { + instruction: LoadScratch{Dst: RegX, N: 3}, + assembler: "ldx M[3]", + }, + { + instruction: LoadScratch{Dst: 0xffff, N: 3}, + assembler: "unknown instruction: bpf.LoadScratch{Dst:0xffff, N:3}", + }, + { + instruction: LoadAbsolute{Off: 42, Size: 1}, + assembler: "ldb [42]", + }, + { + instruction: LoadAbsolute{Off: 42, Size: 2}, + assembler: "ldh [42]", + }, + { + instruction: LoadAbsolute{Off: 42, Size: 4}, + assembler: "ld [42]", + }, + { + instruction: LoadAbsolute{Off: 42, Size: -1}, + assembler: "unknown instruction: bpf.LoadAbsolute{Off:0x2a, Size:-1}", + }, + { + instruction: LoadIndirect{Off: 42, Size: 1}, + assembler: "ldb [x + 42]", + }, + { + instruction: LoadIndirect{Off: 42, Size: 2}, + assembler: "ldh [x + 42]", + }, + { + instruction: LoadIndirect{Off: 42, Size: 4}, + assembler: "ld [x + 42]", + }, + { + instruction: LoadIndirect{Off: 42, Size: -1}, + assembler: "unknown instruction: bpf.LoadIndirect{Off:0x2a, Size:-1}", + }, + { + instruction: LoadMemShift{Off: 42}, + assembler: "ldx 4*([42]&0xf)", + }, + { + instruction: LoadExtension{Num: ExtLen}, + assembler: "ld #len", + }, + { + instruction: LoadExtension{Num: ExtProto}, + assembler: "ld #proto", + }, + { + instruction: LoadExtension{Num: ExtType}, + assembler: "ld #type", + }, + { + instruction: LoadExtension{Num: ExtPayloadOffset}, + assembler: "ld #poff", + }, + { + instruction: LoadExtension{Num: ExtInterfaceIndex}, + assembler: "ld #ifidx", + }, + { + instruction: LoadExtension{Num: ExtNetlinkAttr}, + assembler: "ld #nla", + }, + { + instruction: LoadExtension{Num: ExtNetlinkAttrNested}, + assembler: "ld #nlan", + }, + { + instruction: LoadExtension{Num: ExtMark}, + assembler: "ld #mark", + }, + { + instruction: LoadExtension{Num: ExtQueue}, + assembler: "ld #queue", + }, + { + instruction: LoadExtension{Num: ExtLinkLayerType}, + assembler: "ld #hatype", + }, + { + instruction: LoadExtension{Num: ExtRXHash}, + assembler: "ld #rxhash", + }, + { + instruction: LoadExtension{Num: ExtCPUID}, + assembler: "ld #cpu", + }, + { + instruction: LoadExtension{Num: ExtVLANTag}, + assembler: "ld #vlan_tci", + }, + { + instruction: LoadExtension{Num: ExtVLANTagPresent}, + assembler: "ld #vlan_avail", + }, + { + instruction: LoadExtension{Num: ExtVLANProto}, + assembler: "ld #vlan_tpid", + }, + { + instruction: LoadExtension{Num: ExtRand}, + assembler: "ld #rand", + }, + { + instruction: LoadAbsolute{Off: 0xfffff038, Size: 4}, + assembler: "ld #rand", + }, + { + instruction: LoadExtension{Num: 0xfff}, + assembler: "unknown instruction: bpf.LoadExtension{Num:4095}", + }, + { + instruction: StoreScratch{Src: RegA, N: 3}, + assembler: "st M[3]", + }, + { + instruction: StoreScratch{Src: RegX, N: 3}, + assembler: "stx M[3]", + }, + { + instruction: StoreScratch{Src: 0xffff, N: 3}, + assembler: "unknown instruction: bpf.StoreScratch{Src:0xffff, N:3}", + }, + { + instruction: ALUOpConstant{Op: ALUOpAdd, Val: 42}, + assembler: "add #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpSub, Val: 42}, + assembler: "sub #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpMul, Val: 42}, + assembler: "mul #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpDiv, Val: 42}, + assembler: "div #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpOr, Val: 42}, + assembler: "or #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpAnd, Val: 42}, + assembler: "and #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpShiftLeft, Val: 42}, + assembler: "lsh #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpShiftRight, Val: 42}, + assembler: "rsh #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpMod, Val: 42}, + assembler: "mod #42", + }, + { + instruction: ALUOpConstant{Op: ALUOpXor, Val: 42}, + assembler: "xor #42", + }, + { + instruction: ALUOpConstant{Op: 0xffff, Val: 42}, + assembler: "unknown instruction: bpf.ALUOpConstant{Op:0xffff, Val:0x2a}", + }, + { + instruction: ALUOpX{Op: ALUOpAdd}, + assembler: "add x", + }, + { + instruction: ALUOpX{Op: ALUOpSub}, + assembler: "sub x", + }, + { + instruction: ALUOpX{Op: ALUOpMul}, + assembler: "mul x", + }, + { + instruction: ALUOpX{Op: ALUOpDiv}, + assembler: "div x", + }, + { + instruction: ALUOpX{Op: ALUOpOr}, + assembler: "or x", + }, + { + instruction: ALUOpX{Op: ALUOpAnd}, + assembler: "and x", + }, + { + instruction: ALUOpX{Op: ALUOpShiftLeft}, + assembler: "lsh x", + }, + { + instruction: ALUOpX{Op: ALUOpShiftRight}, + assembler: "rsh x", + }, + { + instruction: ALUOpX{Op: ALUOpMod}, + assembler: "mod x", + }, + { + instruction: ALUOpX{Op: ALUOpXor}, + assembler: "xor x", + }, + { + instruction: ALUOpX{Op: 0xffff}, + assembler: "unknown instruction: bpf.ALUOpX{Op:0xffff}", + }, + { + instruction: NegateA{}, + assembler: "neg", + }, + { + instruction: Jump{Skip: 10}, + assembler: "ja 10", + }, + { + instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9}, + assembler: "jeq #42,8,9", + }, + { + instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8}, + assembler: "jeq #42,8", + }, + { + instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipFalse: 8}, + assembler: "jneq #42,8", + }, + { + instruction: JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8}, + assembler: "jneq #42,8", + }, + { + instruction: JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7}, + assembler: "jlt #42,7", + }, + { + instruction: JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6}, + assembler: "jle #42,6", + }, + { + instruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5}, + assembler: "jgt #42,4,5", + }, + { + instruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4}, + assembler: "jgt #42,4", + }, + { + instruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4}, + assembler: "jge #42,3,4", + }, + { + instruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3}, + assembler: "jge #42,3", + }, + { + instruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + assembler: "jset #42,2,3", + }, + { + instruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2}, + assembler: "jset #42,2", + }, + { + instruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + assembler: "jset #42,3,2", + }, + { + instruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2}, + assembler: "jset #42,0,2", + }, + { + instruction: JumpIf{Cond: 0xffff, Val: 42, SkipTrue: 1, SkipFalse: 2}, + assembler: "unknown instruction: bpf.JumpIf{Cond:0xffff, Val:0x2a, SkipTrue:0x1, SkipFalse:0x2}", + }, + { + instruction: TAX{}, + assembler: "tax", + }, + { + instruction: TXA{}, + assembler: "txa", + }, + { + instruction: RetA{}, + assembler: "ret a", + }, + { + instruction: RetConstant{Val: 42}, + assembler: "ret #42", + }, + // Invalid instruction + { + instruction: InvalidInstruction{}, + assembler: "unknown instruction: bpf.InvalidInstruction{}", + }, + } + + for _, testCase := range testCases { + if input, ok := testCase.instruction.(fmt.Stringer); ok { + got := input.String() + if got != testCase.assembler { + t.Errorf("String did not return expected assembler notation, expected: %s, got: %s", testCase.assembler, got) + } + } else { + t.Errorf("Instruction %#v is not a fmt.Stringer", testCase.instruction) + } + } +} diff --git a/fn/vendor/golang.org/x/net/bpf/setter.go b/fn/vendor/golang.org/x/net/bpf/setter.go new file mode 100644 index 000000000..43e35f0ac --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/setter.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Setter is a type which can attach a compiled BPF filter to itself. +type Setter interface { + SetBPF(filter []RawInstruction) error +} diff --git a/fn/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf b/fn/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf new file mode 100644 index 000000000..f87144064 --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf @@ -0,0 +1 @@ +50,0 0 0 42,1 0 0 42,96 0 0 3,97 0 0 3,48 0 0 42,40 0 0 42,32 0 0 42,80 0 0 42,72 0 0 42,64 0 0 42,177 0 0 42,128 0 0 0,32 0 0 4294963200,32 0 0 4294963204,32 0 0 4294963256,2 0 0 3,3 0 0 3,4 0 0 42,20 0 0 42,36 0 0 42,52 0 0 42,68 0 0 42,84 0 0 42,100 0 0 42,116 0 0 42,148 0 0 42,164 0 0 42,12 0 0 0,28 0 0 0,44 0 0 0,60 0 0 0,76 0 0 0,92 0 0 0,108 0 0 0,124 0 0 0,156 0 0 0,172 0 0 0,132 0 0 0,5 0 0 10,21 8 9 42,21 0 8 42,53 0 7 42,37 0 6 42,37 4 5 42,53 3 4 42,69 2 3 42,7 0 0 0,135 0 0 0,22 0 0 0,6 0 0 0, diff --git a/fn/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt b/fn/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt new file mode 100644 index 000000000..304550155 --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt @@ -0,0 +1,79 @@ +# This filter is compiled to all_instructions.bpf by the `bpf_asm` +# tool, which can be found in the linux kernel source tree under +# tools/net. + +# Load immediate +ld #42 +ldx #42 + +# Load scratch +ld M[3] +ldx M[3] + +# Load absolute +ldb [42] +ldh [42] +ld [42] + +# Load indirect +ldb [x + 42] +ldh [x + 42] +ld [x + 42] + +# Load IPv4 header length +ldx 4*([42]&0xf) + +# Run extension function +ld #len +ld #proto +ld #type +ld #rand + +# Store scratch +st M[3] +stx M[3] + +# A constant +add #42 +sub #42 +mul #42 +div #42 +or #42 +and #42 +lsh #42 +rsh #42 +mod #42 +xor #42 + +# A X +add x +sub x +mul x +div x +or x +and x +lsh x +rsh x +mod x +xor x + +# !A +neg + +# Jumps +ja end +jeq #42,prev,end +jne #42,end +jlt #42,end +jle #42,end +jgt #42,prev,end +jge #42,prev,end +jset #42,prev,end + +# Register transfers +tax +txa + +# Returns +prev: ret a +end: ret #42 diff --git a/fn/vendor/golang.org/x/net/bpf/vm.go b/fn/vendor/golang.org/x/net/bpf/vm.go new file mode 100644 index 000000000..4c656f1e1 --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/vm.go @@ -0,0 +1,140 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "errors" + "fmt" +) + +// A VM is an emulated BPF virtual machine. +type VM struct { + filter []Instruction +} + +// NewVM returns a new VM using the input BPF program. +func NewVM(filter []Instruction) (*VM, error) { + if len(filter) == 0 { + return nil, errors.New("one or more Instructions must be specified") + } + + for i, ins := range filter { + check := len(filter) - (i + 1) + switch ins := ins.(type) { + // Check for out-of-bounds jumps in instructions + case Jump: + if check <= int(ins.Skip) { + return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip) + } + case JumpIf: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + // Check for division or modulus by zero + case ALUOpConstant: + if ins.Val != 0 { + break + } + + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return nil, errors.New("cannot divide by zero using ALUOpConstant") + } + // Check for unknown extensions + case LoadExtension: + switch ins.Num { + case ExtLen: + default: + return nil, fmt.Errorf("extension %d not implemented", ins.Num) + } + } + } + + // Make sure last instruction is a return instruction + switch filter[len(filter)-1].(type) { + case RetA, RetConstant: + default: + return nil, errors.New("BPF program must end with RetA or RetConstant") + } + + // Though our VM works using disassembled instructions, we + // attempt to assemble the input filter anyway to ensure it is compatible + // with an operating system VM. + _, err := Assemble(filter) + + return &VM{ + filter: filter, + }, err +} + +// Run runs the VM's BPF program against the input bytes. +// Run returns the number of bytes accepted by the BPF program, and any errors +// which occurred while processing the program. +func (v *VM) Run(in []byte) (int, error) { + var ( + // Registers of the virtual machine + regA uint32 + regX uint32 + regScratch [16]uint32 + + // OK is true if the program should continue processing the next + // instruction, or false if not, causing the loop to break + ok = true + ) + + // TODO(mdlayher): implement: + // - NegateA: + // - would require a change from uint32 registers to int32 + // registers + + // TODO(mdlayher): add interop tests that check signedness of ALU + // operations against kernel implementation, and make sure Go + // implementation matches behavior + + for i := 0; i < len(v.filter) && ok; i++ { + ins := v.filter[i] + + switch ins := ins.(type) { + case ALUOpConstant: + regA = aluOpConstant(ins, regA) + case ALUOpX: + regA, ok = aluOpX(ins, regA, regX) + case Jump: + i += int(ins.Skip) + case JumpIf: + jump := jumpIf(ins, regA) + i += jump + case LoadAbsolute: + regA, ok = loadAbsolute(ins, in) + case LoadConstant: + regA, regX = loadConstant(ins, regA, regX) + case LoadExtension: + regA = loadExtension(ins, in) + case LoadIndirect: + regA, ok = loadIndirect(ins, in, regX) + case LoadMemShift: + regX, ok = loadMemShift(ins, in) + case LoadScratch: + regA, regX = loadScratch(ins, regScratch, regA, regX) + case RetA: + return int(regA), nil + case RetConstant: + return int(ins.Val), nil + case StoreScratch: + regScratch = storeScratch(ins, regScratch, regA, regX) + case TAX: + regX = regA + case TXA: + regA = regX + default: + return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins) + } + } + + return 0, nil +} diff --git a/fn/vendor/golang.org/x/net/bpf/vm_aluop_test.go b/fn/vendor/golang.org/x/net/bpf/vm_aluop_test.go new file mode 100644 index 000000000..16678244a --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/vm_aluop_test.go @@ -0,0 +1,512 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMALUOpAdd(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAdd, + Val: 3, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 8, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 3, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpSub(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + bpf.ALUOpX{ + Op: bpf.ALUOpSub, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpMul(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMul, + Val: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 6, 2, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpDiv(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpDiv, + Val: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 20, 2, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpDivByZeroALUOpConstant(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.ALUOpConstant{ + Op: bpf.ALUOpDiv, + Val: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot divide by zero using ALUOpConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMALUOpDivByZeroALUOpX(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 0 into X + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + // Load byte 1 into A + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Attempt to perform 1/0 + bpf.ALUOpX{ + Op: bpf.ALUOpDiv, + }, + // Return 4 bytes if program does not terminate + bpf.LoadConstant{ + Val: 12, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpOr(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpOr, + Val: 0x01, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x00, 0x10, 0x03, 0x04, + 0x05, 0x06, 0x07, 0x08, + 0x09, 0xff, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 9, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpAnd(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAnd, + Val: 0x0019, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xaa, 0x09, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpShiftLeft(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpShiftLeft, + Val: 0x01, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x02, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0xaa, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpShiftRight(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpShiftRight, + Val: 0x01, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x04, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x08, 0xff, 0xff, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpMod(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMod, + Val: 20, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 30, 0, 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpModByZeroALUOpConstant(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMod, + Val: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot divide by zero using ALUOpConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMALUOpModByZeroALUOpX(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 0 into X + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + // Load byte 1 into A + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Attempt to perform 1%0 + bpf.ALUOpX{ + Op: bpf.ALUOpMod, + }, + // Return 4 bytes if program does not terminate + bpf.LoadConstant{ + Val: 12, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpXor(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpXor, + Val: 0x0a, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x01, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0b, 0x00, 0x00, 0x00, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpUnknown(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAdd, + Val: 1, + }, + // Verify that an unknown operation is a no-op + bpf.ALUOpConstant{ + Op: 100, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x02, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/fn/vendor/golang.org/x/net/bpf/vm_bpf_test.go b/fn/vendor/golang.org/x/net/bpf/vm_bpf_test.go new file mode 100644 index 000000000..77fa8fe4a --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/vm_bpf_test.go @@ -0,0 +1,192 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +// A virtualMachine is a BPF virtual machine which can process an +// input packet against a BPF program and render a verdict. +type virtualMachine interface { + Run(in []byte) (int, error) +} + +// canUseOSVM indicates if the OS BPF VM is available on this platform. +func canUseOSVM() bool { + // OS BPF VM can only be used on platforms where x/net/ipv4 supports + // attaching a BPF program to a socket. + switch runtime.GOOS { + case "linux": + return true + } + + return false +} + +// All BPF tests against both the Go VM and OS VM are assumed to +// be used with a UDP socket. As a result, the entire contents +// of a UDP datagram is sent through the BPF program, but only +// the body after the UDP header will ever be returned in output. + +// testVM sets up a Go BPF VM, and if available, a native OS BPF VM +// for integration testing. +func testVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func(), error) { + goVM, err := bpf.NewVM(filter) + if err != nil { + // Some tests expect an error, so this error must be returned + // instead of fatally exiting the test + return nil, nil, err + } + + mvm := &multiVirtualMachine{ + goVM: goVM, + + t: t, + } + + // If available, add the OS VM for tests which verify that both the Go + // VM and OS VM have exactly the same output for the same input program + // and packet. + done := func() {} + if canUseOSVM() { + osVM, osVMDone := testOSVM(t, filter) + done = func() { osVMDone() } + mvm.osVM = osVM + } + + return mvm, done, nil +} + +// udpHeaderLen is the length of a UDP header. +const udpHeaderLen = 8 + +// A multiVirtualMachine is a virtualMachine which can call out to both the Go VM +// and the native OS VM, if the OS VM is available. +type multiVirtualMachine struct { + goVM virtualMachine + osVM virtualMachine + + t *testing.T +} + +func (mvm *multiVirtualMachine) Run(in []byte) (int, error) { + if len(in) < udpHeaderLen { + mvm.t.Fatalf("input must be at least length of UDP header (%d), got: %d", + udpHeaderLen, len(in)) + } + + // All tests have a UDP header as part of input, because the OS VM + // packets always will. For the Go VM, this output is trimmed before + // being sent back to tests. + goOut, goErr := mvm.goVM.Run(in) + if goOut >= udpHeaderLen { + goOut -= udpHeaderLen + } + + // If Go output is larger than the size of the packet, packet filtering + // interop tests must trim the output bytes to the length of the packet. + // The BPF VM should not do this on its own, as other uses of it do + // not trim the output byte count. + trim := len(in) - udpHeaderLen + if goOut > trim { + goOut = trim + } + + // When the OS VM is not available, process using the Go VM alone + if mvm.osVM == nil { + return goOut, goErr + } + + // The OS VM will apply its own UDP header, so remove the pseudo header + // that the Go VM needs. + osOut, err := mvm.osVM.Run(in[udpHeaderLen:]) + if err != nil { + mvm.t.Fatalf("error while running OS VM: %v", err) + } + + // Verify both VMs return same number of bytes + var mismatch bool + if goOut != osOut { + mismatch = true + mvm.t.Logf("output byte count does not match:\n- go: %v\n- os: %v", goOut, osOut) + } + + if mismatch { + mvm.t.Fatal("Go BPF and OS BPF packet outputs do not match") + } + + return goOut, goErr +} + +// An osVirtualMachine is a virtualMachine which uses the OS's BPF VM for +// processing BPF programs. +type osVirtualMachine struct { + l net.PacketConn + s net.Conn +} + +// testOSVM creates a virtualMachine which uses the OS's BPF VM by injecting +// packets into a UDP listener with a BPF program attached to it. +func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) { + l, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatalf("failed to open OS VM UDP listener: %v", err) + } + + prog, err := bpf.Assemble(filter) + if err != nil { + t.Fatalf("failed to compile BPF program: %v", err) + } + + p := ipv4.NewPacketConn(l) + if err = p.SetBPF(prog); err != nil { + t.Fatalf("failed to attach BPF program to listener: %v", err) + } + + s, err := net.Dial("udp4", l.LocalAddr().String()) + if err != nil { + t.Fatalf("failed to dial connection to listener: %v", err) + } + + done := func() { + _ = s.Close() + _ = l.Close() + } + + return &osVirtualMachine{ + l: l, + s: s, + }, done +} + +// Run sends the input bytes into the OS's BPF VM and returns its verdict. +func (vm *osVirtualMachine) Run(in []byte) (int, error) { + go func() { + _, _ = vm.s.Write(in) + }() + + vm.l.SetDeadline(time.Now().Add(50 * time.Millisecond)) + + var b [512]byte + n, _, err := vm.l.ReadFrom(b[:]) + if err != nil { + // A timeout indicates that BPF filtered out the packet, and thus, + // no input should be returned. + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + return n, nil + } + + return n, err + } + + return n, nil +} diff --git a/fn/vendor/golang.org/x/net/bpf/vm_extension_test.go b/fn/vendor/golang.org/x/net/bpf/vm_extension_test.go new file mode 100644 index 000000000..7a48c82f3 --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/vm_extension_test.go @@ -0,0 +1,49 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMLoadExtensionNotImplemented(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadExtension{ + Num: 100, + }, + bpf.RetA{}, + }) + if errStr(err) != "extension 100 not implemented" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadExtensionExtLen(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadExtension{ + Num: bpf.ExtLen, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/fn/vendor/golang.org/x/net/bpf/vm_instructions.go b/fn/vendor/golang.org/x/net/bpf/vm_instructions.go new file mode 100644 index 000000000..516f9462b --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -0,0 +1,174 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "encoding/binary" + "fmt" +) + +func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 { + return aluOpCommon(ins.Op, regA, ins.Val) +} + +func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) { + // Guard against division or modulus by zero by terminating + // the program, as the OS BPF VM does + if regX == 0 { + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return 0, false + } + } + + return aluOpCommon(ins.Op, regA, regX), true +} + +func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { + switch op { + case ALUOpAdd: + return regA + value + case ALUOpSub: + return regA - value + case ALUOpMul: + return regA * value + case ALUOpDiv: + // Division by zero not permitted by NewVM and aluOpX checks + return regA / value + case ALUOpOr: + return regA | value + case ALUOpAnd: + return regA & value + case ALUOpShiftLeft: + return regA << value + case ALUOpShiftRight: + return regA >> value + case ALUOpMod: + // Modulus by zero not permitted by NewVM and aluOpX checks + return regA % value + case ALUOpXor: + return regA ^ value + default: + return regA + } +} + +func jumpIf(ins JumpIf, value uint32) int { + var ok bool + inV := uint32(ins.Val) + + switch ins.Cond { + case JumpEqual: + ok = value == inV + case JumpNotEqual: + ok = value != inV + case JumpGreaterThan: + ok = value > inV + case JumpLessThan: + ok = value < inV + case JumpGreaterOrEqual: + ok = value >= inV + case JumpLessOrEqual: + ok = value <= inV + case JumpBitsSet: + ok = (value & inV) != 0 + case JumpBitsNotSet: + ok = (value & inV) == 0 + } + + if ok { + return int(ins.SkipTrue) + } + + return int(ins.SkipFalse) +} + +func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { + offset := int(ins.Off) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = ins.Val + case RegX: + regX = ins.Val + } + + return regA, regX +} + +func loadExtension(ins LoadExtension, in []byte) uint32 { + switch ins.Num { + case ExtLen: + return uint32(len(in)) + default: + panic(fmt.Sprintf("unimplemented extension: %d", ins.Num)) + } +} + +func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { + offset := int(ins.Off) + int(regX) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { + offset := int(ins.Off) + + if !inBounds(len(in), offset, 0) { + return 0, false + } + + // Mask off high 4 bits and multiply low 4 bits by 4 + return uint32(in[offset]&0x0f) * 4, true +} + +func inBounds(inLen int, offset int, size int) bool { + return offset+size <= inLen +} + +func loadCommon(in []byte, offset int, size int) (uint32, bool) { + if !inBounds(len(in), offset, size) { + return 0, false + } + + switch size { + case 1: + return uint32(in[offset]), true + case 2: + return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true + case 4: + return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true + default: + panic(fmt.Sprintf("invalid load size: %d", size)) + } +} + +func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = regScratch[ins.N] + case RegX: + regX = regScratch[ins.N] + } + + return regA, regX +} + +func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 { + switch ins.Src { + case RegA: + regScratch[ins.N] = regA + case RegX: + regScratch[ins.N] = regX + } + + return regScratch +} diff --git a/fn/vendor/golang.org/x/net/bpf/vm_jump_test.go b/fn/vendor/golang.org/x/net/bpf/vm_jump_test.go new file mode 100644 index 000000000..e0a3a988b --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/vm_jump_test.go @@ -0,0 +1,380 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMJumpOne(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.Jump{ + Skip: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.Jump{ + Skip: 1, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 1 instructions; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfTrueOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.JumpIf{ + Cond: bpf.JumpEqual, + SkipTrue: 2, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 2 instructions in true case; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfFalseOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.JumpIf{ + Cond: bpf.JumpEqual, + SkipFalse: 3, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 3 instructions in false case; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 1, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfNotEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.JumpIf{ + Cond: bpf.JumpNotEqual, + Val: 1, + SkipFalse: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfGreaterThan(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpGreaterThan, + Val: 0x00010202, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfLessThan(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpLessThan, + Val: 0xff010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfGreaterOrEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpGreaterOrEqual, + Val: 0x00010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfLessOrEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpLessOrEqual, + Val: 0xff010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfBitsSet(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.JumpIf{ + Cond: bpf.JumpBitsSet, + Val: 0x1122, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0x02, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfBitsNotSet(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.JumpIf{ + Cond: bpf.JumpBitsNotSet, + Val: 0x1221, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0x02, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/fn/vendor/golang.org/x/net/bpf/vm_load_test.go b/fn/vendor/golang.org/x/net/bpf/vm_load_test.go new file mode 100644 index 000000000..04578b66b --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/vm_load_test.go @@ -0,0 +1,246 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "net" + "testing" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +func TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 100, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadAbsoluteBadInstructionSize(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Size: 5, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid load byte length 0" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadConstantOK(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegX, + Val: 9, + }, + bpf.TXA{}, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadIndirectOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadIndirect{ + Off: 100, + Size: 1, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadMemShiftOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadMemShift{ + Off: 100, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +const ( + dhcp4Port = 53 +) + +func TestVMLoadMemShiftLoadIndirectNoResult(t *testing.T) { + vm, in, done := testDHCPv4(t) + defer done() + + // Append mostly empty UDP header with incorrect DHCPv4 port + in = append(in, []byte{ + 0, 0, + 0, dhcp4Port + 1, + 0, 0, + 0, 0, + }...) + + out, err := vm.Run(in) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadMemShiftLoadIndirectOK(t *testing.T) { + vm, in, done := testDHCPv4(t) + defer done() + + // Append mostly empty UDP header with correct DHCPv4 port + in = append(in, []byte{ + 0, 0, + 0, dhcp4Port, + 0, 0, + 0, 0, + }...) + + out, err := vm.Run(in) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := len(in)-8, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func testDHCPv4(t *testing.T) (virtualMachine, []byte, func()) { + // DHCPv4 test data courtesy of David Anderson: + // https://github.com/google/netboot/blob/master/dhcp4/conn_linux.go#L59-L70 + vm, done, err := testVM(t, []bpf.Instruction{ + // Load IPv4 packet length + bpf.LoadMemShift{Off: 8}, + // Get UDP dport + bpf.LoadIndirect{Off: 8 + 2, Size: 2}, + // Correct dport? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: dhcp4Port, SkipFalse: 1}, + // Accept + bpf.RetConstant{Val: 1500}, + // Ignore + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + + // Minimal requirements to make a valid IPv4 header + h := &ipv4.Header{ + Len: ipv4.HeaderLen, + Src: net.IPv4(192, 168, 1, 1), + Dst: net.IPv4(192, 168, 1, 2), + } + hb, err := h.Marshal() + if err != nil { + t.Fatalf("failed to marshal IPv4 header: %v", err) + } + + hb = append([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + }, hb...) + + return vm, hb, done +} diff --git a/fn/vendor/golang.org/x/net/bpf/vm_ret_test.go b/fn/vendor/golang.org/x/net/bpf/vm_ret_test.go new file mode 100644 index 000000000..2d86eae3e --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/vm_ret_test.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMRetA(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 9, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetALargerThanInput(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 255, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetConstant(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetConstantLargerThanInput(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.RetConstant{ + Val: 16, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/fn/vendor/golang.org/x/net/bpf/vm_scratch_test.go b/fn/vendor/golang.org/x/net/bpf/vm_scratch_test.go new file mode 100644 index 000000000..e600e3c28 --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/vm_scratch_test.go @@ -0,0 +1,247 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMStoreScratchInvalidScratchRegisterTooSmall(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: bpf.RegA, + N: -1, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot -1" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchInvalidScratchRegisterTooLarge(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: bpf.RegA, + N: 16, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot 16" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchUnknownSourceRegister(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: 100, + N: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid source register 100" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchInvalidScratchRegisterTooSmall(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: bpf.RegX, + N: -1, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot -1" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchInvalidScratchRegisterTooLarge(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: bpf.RegX, + N: 16, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot 16" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchUnknownDestinationRegister(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: 100, + N: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid target register 100" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchLoadScratchOneValue(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 255 + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + // Copy to X and store in scratch[0] + bpf.TAX{}, + bpf.StoreScratch{ + Src: bpf.RegX, + N: 0, + }, + // Load byte 1 + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Overwrite 1 with 255 from scratch[0] + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 0, + }, + // Return 255 + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 255, 1, 2, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 3, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMStoreScratchLoadScratchMultipleValues(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 10 + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + // Store in scratch[0] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 0, + }, + // Load byte 20 + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Store in scratch[1] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 1, + }, + // Load byte 30 + bpf.LoadAbsolute{ + Off: 10, + Size: 1, + }, + // Store in scratch[2] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 2, + }, + // Load byte 1 + bpf.LoadAbsolute{ + Off: 11, + Size: 1, + }, + // Store in scratch[3] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 3, + }, + // Load in byte 10 to X + bpf.LoadScratch{ + Dst: bpf.RegX, + N: 0, + }, + // Copy X -> A + bpf.TXA{}, + // Verify value is 10 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 10, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Load in byte 20 to A + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 1, + }, + // Verify value is 20 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 20, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Load in byte 30 to A + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 2, + }, + // Verify value is 30 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 30, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Return first two bytes on success + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 10, 20, 30, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/fn/vendor/golang.org/x/net/bpf/vm_test.go b/fn/vendor/golang.org/x/net/bpf/vm_test.go new file mode 100644 index 000000000..6bd4dd5c3 --- /dev/null +++ b/fn/vendor/golang.org/x/net/bpf/vm_test.go @@ -0,0 +1,144 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "fmt" + "testing" + + "golang.org/x/net/bpf" +) + +var _ bpf.Instruction = unknown{} + +type unknown struct{} + +func (unknown) Assemble() (bpf.RawInstruction, error) { + return bpf.RawInstruction{}, nil +} + +func TestVMUnknownInstruction(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegA, + Val: 100, + }, + // Should terminate the program with an error immediately + unknown{}, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer done() + + _, err = vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, + }) + if errStr(err) != "unknown Instruction at index 1: bpf_test.unknown" { + t.Fatalf("unexpected error while running program: %v", err) + } +} + +func TestVMNoReturnInstruction(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegA, + Val: 1, + }, + }) + if errStr(err) != "BPF program must end with RetA or RetConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMNoInputInstructions(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{}) + if errStr(err) != "one or more Instructions must be specified" { + t.Fatalf("unexpected error: %v", err) + } +} + +// ExampleNewVM demonstrates usage of a VM, using an Ethernet frame +// as input and checking its EtherType to determine if it should be accepted. +func ExampleNewVM() { + // Offset | Length | Comment + // ------------------------- + // 00 | 06 | Ethernet destination MAC address + // 06 | 06 | Ethernet source MAC address + // 12 | 02 | Ethernet EtherType + const ( + etOff = 12 + etLen = 2 + + etARP = 0x0806 + ) + + // Set up a VM to filter traffic based on if its EtherType + // matches the ARP EtherType. + vm, err := bpf.NewVM([]bpf.Instruction{ + // Load EtherType value from Ethernet header + bpf.LoadAbsolute{ + Off: etOff, + Size: etLen, + }, + // If EtherType is equal to the ARP EtherType, jump to allow + // packet to be accepted + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: etARP, + SkipTrue: 1, + }, + // EtherType does not match the ARP EtherType + bpf.RetConstant{ + Val: 0, + }, + // EtherType matches the ARP EtherType, accept up to 1500 + // bytes of packet + bpf.RetConstant{ + Val: 1500, + }, + }) + if err != nil { + panic(fmt.Sprintf("failed to load BPF program: %v", err)) + } + + // Create an Ethernet frame with the ARP EtherType for testing + frame := []byte{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, + 0x08, 0x06, + // Payload omitted for brevity + } + + // Run our VM's BPF program using the Ethernet frame as input + out, err := vm.Run(frame) + if err != nil { + panic(fmt.Sprintf("failed to accept Ethernet frame: %v", err)) + } + + // BPF VM can return a byte count greater than the number of input + // bytes, so trim the output to match the input byte length + if out > len(frame) { + out = len(frame) + } + + fmt.Printf("out: %d bytes", out) + + // Output: + // out: 14 bytes +} + +// errStr returns the string representation of an error, or +// "" if it is nil. +func errStr(err error) string { + if err == nil { + return "" + } + + return err.Error() +} diff --git a/fn/vendor/golang.org/x/net/context/context.go b/fn/vendor/golang.org/x/net/context/context.go index 77b64d0c6..f143ed6a1 100644 --- a/fn/vendor/golang.org/x/net/context/context.go +++ b/fn/vendor/golang.org/x/net/context/context.go @@ -7,7 +7,7 @@ // and between processes. // // Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must +// servers should accept a Context. The chain of function calls between must // propagate the Context, optionally replacing it with a modified copy created // using WithDeadline, WithTimeout, WithCancel, or WithValue. // @@ -16,14 +16,14 @@ // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -36,12 +36,7 @@ // Contexts. package context // import "golang.org/x/net/context" -import ( - "errors" - "fmt" - "sync" - "time" -) +import "time" // A Context carries a deadline, a cancelation signal, and other values across // API boundaries. @@ -49,13 +44,13 @@ import ( // Context's methods may be called by multiple goroutines simultaneously. type Context interface { // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. Deadline() (deadline time.Time, ok bool) // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. // // WithCancel arranges for Done to be closed when cancel is called; // WithDeadline arranges for Done to be closed when the deadline @@ -66,7 +61,7 @@ type Context interface { // // // Stream generates values with DoSomething and sends them to out // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out <-chan Value) error { + // func Stream(ctx context.Context, out chan<- Value) error { // for { // v, err := DoSomething(ctx) // if err != nil { @@ -84,24 +79,24 @@ type Context interface { // a Done channel for cancelation. Done() <-chan struct{} - // Err returns a non-nil error value after Done is closed. Err returns + // Err returns a non-nil error value after Done is closed. Err returns // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. + // context's deadline passed. No other values for Err are defined. // After Done is closed, successive calls to Err return the same value. Err() error // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with + // if no value is associated with key. Successive calls to Value with // the same key returns the same result. // // Use context values only for request-scoped data that transits // processes and API boundaries, not for passing optional parameters to // functions. // - // A key identifies a specific value in a Context. Functions that wish + // A key identifies a specific value in a Context. Functions that wish // to store values in Context typically allocate a key in a global // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; + // Context.Value. A key can be any type that supports equality; // packages should define keys as an unexported type to avoid // collisions. // @@ -120,7 +115,7 @@ type Context interface { // // This prevents collisions with keys defined in other packages. // type key int // - // // userKey is the key for user.User values in Contexts. It is + // // userKey is the key for user.User values in Contexts. It is // // unexported; clients use user.NewContext and user.FromContext // // instead of using this key directly. // var userKey key = 0 @@ -138,57 +133,15 @@ type Context interface { Value(key interface{}) interface{} } -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - // Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, +// values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming // requests. func Background() Context { return background } -// TODO returns a non-nil, empty Context. Code should use context.TODO when +// TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). TODO is recognized by static analysis tools that determine @@ -201,247 +154,3 @@ func TODO() Context { // A CancelFunc does not wait for the work to stop. // After the first call, subsequent calls to a CancelFunc do nothing. type CancelFunc func() - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, &c) - return &c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) cancelCtx { - return cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return &c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/fn/vendor/golang.org/x/net/context/context_test.go b/fn/vendor/golang.org/x/net/context/context_test.go index 05345fc5e..62844131b 100644 --- a/fn/vendor/golang.org/x/net/context/context_test.go +++ b/fn/vendor/golang.org/x/net/context/context_test.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.7 + package context import ( @@ -241,45 +243,51 @@ func testDeadline(c Context, wait time.Duration, t *testing.T) { } func TestDeadline(t *testing.T) { - c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit)) if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { t.Errorf("c.String() = %q want prefix %q", got, prefix) } - testDeadline(c, 200*time.Millisecond, t) + testDeadline(c, 2*timeUnit, t) - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) + testDeadline(o, 2*timeUnit, t) - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) o = otherContext{c} - c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) - testDeadline(c, 200*time.Millisecond, t) + c, _ = WithDeadline(o, time.Now().Add(3*timeUnit)) + testDeadline(c, 2*timeUnit, t) } func TestTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 100*time.Millisecond) + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 1*timeUnit) if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { t.Errorf("c.String() = %q want prefix %q", got, prefix) } - testDeadline(c, 200*time.Millisecond, t) + testDeadline(c, 2*timeUnit, t) - c, _ = WithTimeout(Background(), 100*time.Millisecond) + c, _ = WithTimeout(Background(), 1*timeUnit) o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) + testDeadline(o, 2*timeUnit, t) - c, _ = WithTimeout(Background(), 100*time.Millisecond) + c, _ = WithTimeout(Background(), 1*timeUnit) o = otherContext{c} - c, _ = WithTimeout(o, 300*time.Millisecond) - testDeadline(c, 200*time.Millisecond, t) + c, _ = WithTimeout(o, 3*timeUnit) + testDeadline(c, 2*timeUnit, t) } func TestCanceledTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 200*time.Millisecond) + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 2*timeUnit) o := otherContext{c} - c, cancel := WithTimeout(o, 400*time.Millisecond) + c, cancel := WithTimeout(o, 4*timeUnit) cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate + time.Sleep(1 * timeUnit) // let cancelation propagate select { case <-c.Done(): default: @@ -375,7 +383,7 @@ func TestAllocs(t *testing.T) { <-c.Done() }, limit: 8, - gccgoLimit: 15, + gccgoLimit: 16, }, { desc: "WithCancel(bg)", @@ -401,7 +409,7 @@ func TestAllocs(t *testing.T) { limit := test.limit if runtime.Compiler == "gccgo" { // gccgo does not yet do escape analysis. - // TOOD(iant): Remove this when gccgo does do escape analysis. + // TODO(iant): Remove this when gccgo does do escape analysis. limit = test.gccgoLimit } if n := testing.AllocsPerRun(100, test.f); n > limit { diff --git a/fn/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go b/fn/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go deleted file mode 100644 index e3170e333..000000000 --- a/fn/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package ctxhttp - -import "net/http" - -func canceler(client *http.Client, req *http.Request) func() { - // TODO(djd): Respect any existing value of req.Cancel. - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/fn/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/fn/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go deleted file mode 100644 index 56bcbadb8..000000000 --- a/fn/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package ctxhttp - -import "net/http" - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -func canceler(client *http.Client, req *http.Request) func() { - rc, ok := client.Transport.(requestCanceler) - if !ok { - return func() {} - } - return func() { - rc.CancelRequest(req) - } -} diff --git a/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go index 62620d4eb..606cf1f97 100644 --- a/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ b/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -1,7 +1,9 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build go1.7 + // Package ctxhttp provides helper functions for performing context-aware HTTP requests. package ctxhttp // import "golang.org/x/net/context/ctxhttp" @@ -14,71 +16,28 @@ import ( "golang.org/x/net/context" ) -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// // If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { if client == nil { client = http.DefaultClient } - - // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. - cancel := canceler(client, req) - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - cancel() - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { select { case <-ctx.Done(): - cancel() - case <-c: - // The response's Body is closed. + err = ctx.Err() + default: } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil + } + return resp, err } // Get issues a GET request via the Do function. @@ -113,28 +72,3 @@ func Post(ctx context.Context, client *http.Client, url string, bodyType string, func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go new file mode 100644 index 000000000..72411b1b6 --- /dev/null +++ b/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,go1.7 + +package ctxhttp + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "context" +) + +func TestGo17Context(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + })) + defer ts.Close() + ctx := context.Background() + resp, err := Get(ctx, http.DefaultClient, ts.URL) + if resp == nil || err != nil { + t.Fatalf("error received from client: %v %v", err, resp) + } + resp.Body.Close() +} diff --git a/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go new file mode 100644 index 000000000..926870cc2 --- /dev/null +++ b/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go @@ -0,0 +1,147 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // TODO(djd): Respect any existing value of req.Cancel. + cancel := make(chan struct{}) + req.Cancel = cancel + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + close(cancel) + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + close(cancel) + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go b/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go new file mode 100644 index 000000000..9159cf022 --- /dev/null +++ b/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!go1.7 + +package ctxhttp + +import ( + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "golang.org/x/net/context" +) + +// golang.org/issue/14065 +func TestClosesResponseBodyOnCancel(t *testing.T) { + defer func() { testHookContextDoneBeforeHeaders = nop }() + defer func() { testHookDoReturned = nop }() + defer func() { testHookDidBodyClose = nop }() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + // closed when Do enters select case <-ctx.Done() + enteredDonePath := make(chan struct{}) + + testHookContextDoneBeforeHeaders = func() { + close(enteredDonePath) + } + + testHookDoReturned = func() { + // We now have the result (the Flush'd headers) at least, + // so we can cancel the request. + cancel() + + // But block the client.Do goroutine from sending + // until Do enters into the <-ctx.Done() path, since + // otherwise if both channels are readable, select + // picks a random one. + <-enteredDonePath + } + + sawBodyClose := make(chan struct{}) + testHookDidBodyClose = func() { close(sawBodyClose) } + + tr := &http.Transport{} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", ts.URL, nil) + _, doErr := Do(ctx, c, req) + + select { + case <-sawBodyClose: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for body to close") + } + + if doErr != ctx.Err() { + t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) + } +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} diff --git a/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go index 77c25ba7e..1e4155180 100644 --- a/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go +++ b/fn/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go @@ -7,11 +7,10 @@ package ctxhttp import ( + "io" "io/ioutil" - "net" "net/http" "net/http/httptest" - "sync" "testing" "time" @@ -23,59 +22,62 @@ const ( requestBody = "ok" ) +func okHandler(w http.ResponseWriter, r *http.Request) { + time.Sleep(requestDuration) + io.WriteString(w, requestBody) +} + func TestNoTimeout(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(okHandler)) + defer ts.Close() + ctx := context.Background() - resp, err := doRequest(ctx) - - if resp == nil || err != nil { - t.Fatalf("error received from client: %v %v", err, resp) + res, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != requestBody { + t.Errorf("body = %q; want %q", slurp, requestBody) } } -func TestCancel(t *testing.T) { +func TestCancelBeforeHeaders(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - go func() { - time.Sleep(requestDuration / 2) + + blockServer := make(chan struct{}) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cancel() - }() + <-blockServer + io.WriteString(w, requestBody) + })) + defer ts.Close() + defer close(blockServer) - resp, err := doRequest(ctx) - - if resp != nil || err == nil { - t.Fatalf("expected error, didn't get one. resp: %v", resp) + res, err := Get(ctx, nil, ts.URL) + if err == nil { + res.Body.Close() + t.Fatal("Get returned unexpected nil error") } - if err != ctx.Err() { - t.Fatalf("expected error from context but got: %v", err) - } -} - -func TestCancelAfterRequest(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - - resp, err := doRequest(ctx) - - // Cancel before reading the body. - // Request.Body should still be readable after the context is canceled. - cancel() - - b, err := ioutil.ReadAll(resp.Body) - if err != nil || string(b) != requestBody { - t.Fatalf("could not read body: %q %v", b, err) + if err != context.Canceled { + t.Errorf("err = %v; want %v", err, context.Canceled) } } func TestCancelAfterHangingRequest(t *testing.T) { - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.(http.Flusher).Flush() <-w.(http.CloseNotifier).CloseNotify() - }) - - serv := httptest.NewServer(handler) - defer serv.Close() + })) + defer ts.Close() ctx, cancel := context.WithCancel(context.Background()) - resp, err := Get(ctx, nil, serv.URL) + resp, err := Get(ctx, nil, ts.URL) if err != nil { t.Fatalf("unexpected error in Get: %v", err) } @@ -101,76 +103,3 @@ func TestCancelAfterHangingRequest(t *testing.T) { case <-done: } } - -func doRequest(ctx context.Context) (*http.Response, error) { - var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(requestDuration) - w.Write([]byte(requestBody)) - }) - - serv := httptest.NewServer(okHandler) - defer serv.Close() - - return Get(ctx, nil, serv.URL) -} - -// golang.org/issue/14065 -func TestClosesResponseBodyOnCancel(t *testing.T) { - defer func() { testHookContextDoneBeforeHeaders = nop }() - defer func() { testHookDoReturned = nop }() - defer func() { testHookDidBodyClose = nop }() - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) - defer ts.Close() - - ctx, cancel := context.WithCancel(context.Background()) - - // closed when Do enters select case <-ctx.Done() - enteredDonePath := make(chan struct{}) - - testHookContextDoneBeforeHeaders = func() { - close(enteredDonePath) - } - - testHookDoReturned = func() { - // We now have the result (the Flush'd headers) at least, - // so we can cancel the request. - cancel() - - // But block the client.Do goroutine from sending - // until Do enters into the <-ctx.Done() path, since - // otherwise if both channels are readable, select - // picks a random one. - <-enteredDonePath - } - - sawBodyClose := make(chan struct{}) - testHookDidBodyClose = func() { close(sawBodyClose) } - - tr := &http.Transport{} - defer tr.CloseIdleConnections() - c := &http.Client{Transport: tr} - req, _ := http.NewRequest("GET", ts.URL, nil) - _, doErr := Do(ctx, c, req) - - select { - case <-sawBodyClose: - case <-time.After(5 * time.Second): - t.Fatal("timeout waiting for body to close") - } - - if doErr != ctx.Err() { - t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) - } -} - -type noteCloseConn struct { - net.Conn - onceClose sync.Once - closefn func() -} - -func (c *noteCloseConn) Close() error { - c.onceClose.Do(c.closefn) - return c.Conn.Close() -} diff --git a/fn/vendor/golang.org/x/net/context/go17.go b/fn/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 000000000..d20f52b7d --- /dev/null +++ b/fn/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/fn/vendor/golang.org/x/net/context/pre_go17.go b/fn/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 000000000..0f35592df --- /dev/null +++ b/fn/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/fn/vendor/golang.org/x/net/context/withtimeout_test.go b/fn/vendor/golang.org/x/net/context/withtimeout_test.go index a6754dc36..e6f56691d 100644 --- a/fn/vendor/golang.org/x/net/context/withtimeout_test.go +++ b/fn/vendor/golang.org/x/net/context/withtimeout_test.go @@ -11,16 +11,21 @@ import ( "golang.org/x/net/context" ) +// This example passes a context with a timeout to tell a blocking function that +// it should abandon its work after the timeout elapses. func ExampleWithTimeout() { // Pass a context with a timeout to tell a blocking function that it // should abandon its work after the timeout elapses. - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + select { - case <-time.After(200 * time.Millisecond): + case <-time.After(1 * time.Second): fmt.Println("overslept") case <-ctx.Done(): fmt.Println(ctx.Err()) // prints "context deadline exceeded" } + // Output: // context deadline exceeded } diff --git a/fn/vendor/golang.org/x/net/dict/dict.go b/fn/vendor/golang.org/x/net/dict/dict.go index 58fef89e0..93e65c03c 100644 --- a/fn/vendor/golang.org/x/net/dict/dict.go +++ b/fn/vendor/golang.org/x/net/dict/dict.go @@ -1,4 +1,4 @@ -// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/dns/dnsmessage/example_test.go b/fn/vendor/golang.org/x/net/dns/dnsmessage/example_test.go new file mode 100644 index 000000000..5415c2d3a --- /dev/null +++ b/fn/vendor/golang.org/x/net/dns/dnsmessage/example_test.go @@ -0,0 +1,132 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dnsmessage_test + +import ( + "fmt" + "net" + "strings" + + "golang.org/x/net/dns/dnsmessage" +) + +func mustNewName(name string) dnsmessage.Name { + n, err := dnsmessage.NewName(name) + if err != nil { + panic(err) + } + return n +} + +func ExampleParser() { + msg := dnsmessage.Message{ + Header: dnsmessage.Header{Response: true, Authoritative: true}, + Questions: []dnsmessage.Question{ + { + Name: mustNewName("foo.bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + { + Name: mustNewName("bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + Answers: []dnsmessage.Resource{ + { + dnsmessage.ResourceHeader{ + Name: mustNewName("foo.bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + &dnsmessage.AResource{[4]byte{127, 0, 0, 1}}, + }, + { + dnsmessage.ResourceHeader{ + Name: mustNewName("bar.example.com."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + &dnsmessage.AResource{[4]byte{127, 0, 0, 2}}, + }, + }, + } + + buf, err := msg.Pack() + if err != nil { + panic(err) + } + + wantName := "bar.example.com." + + var p dnsmessage.Parser + if _, err := p.Start(buf); err != nil { + panic(err) + } + + for { + q, err := p.Question() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + panic(err) + } + + if q.Name.String() != wantName { + continue + } + + fmt.Println("Found question for name", wantName) + if err := p.SkipAllQuestions(); err != nil { + panic(err) + } + break + } + + var gotIPs []net.IP + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + panic(err) + } + + if (h.Type != dnsmessage.TypeA && h.Type != dnsmessage.TypeAAAA) || h.Class != dnsmessage.ClassINET { + continue + } + + if !strings.EqualFold(h.Name.String(), wantName) { + if err := p.SkipAnswer(); err != nil { + panic(err) + } + continue + } + + switch h.Type { + case dnsmessage.TypeA: + r, err := p.AResource() + if err != nil { + panic(err) + } + gotIPs = append(gotIPs, r.A[:]) + case dnsmessage.TypeAAAA: + r, err := p.AAAAResource() + if err != nil { + panic(err) + } + gotIPs = append(gotIPs, r.AAAA[:]) + } + } + + fmt.Printf("Found A/AAAA records for name %s: %v\n", wantName, gotIPs) + + // Output: + // Found question for name bar.example.com. + // Found A/AAAA records for name bar.example.com.: [127.0.0.2] +} diff --git a/fn/vendor/golang.org/x/net/dns/dnsmessage/message.go b/fn/vendor/golang.org/x/net/dns/dnsmessage/message.go new file mode 100644 index 000000000..19b260dea --- /dev/null +++ b/fn/vendor/golang.org/x/net/dns/dnsmessage/message.go @@ -0,0 +1,1997 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dnsmessage provides a mostly RFC 1035 compliant implementation of +// DNS message packing and unpacking. +// +// This implementation is designed to minimize heap allocations and avoid +// unnecessary packing and unpacking as much as possible. +package dnsmessage + +import ( + "errors" +) + +// Packet formats + +// A Type is a type of DNS request and response. +type Type uint16 + +// A Class is a type of network. +type Class uint16 + +// An OpCode is a DNS operation code. +type OpCode uint16 + +// An RCode is a DNS response status code. +type RCode uint16 + +// Wire constants. +const ( + // ResourceHeader.Type and Question.Type + TypeA Type = 1 + TypeNS Type = 2 + TypeCNAME Type = 5 + TypeSOA Type = 6 + TypePTR Type = 12 + TypeMX Type = 15 + TypeTXT Type = 16 + TypeAAAA Type = 28 + TypeSRV Type = 33 + + // Question.Type + TypeWKS Type = 11 + TypeHINFO Type = 13 + TypeMINFO Type = 14 + TypeAXFR Type = 252 + TypeALL Type = 255 + + // ResourceHeader.Class and Question.Class + ClassINET Class = 1 + ClassCSNET Class = 2 + ClassCHAOS Class = 3 + ClassHESIOD Class = 4 + + // Question.Class + ClassANY Class = 255 + + // Message.Rcode + RCodeSuccess RCode = 0 + RCodeFormatError RCode = 1 + RCodeServerFailure RCode = 2 + RCodeNameError RCode = 3 + RCodeNotImplemented RCode = 4 + RCodeRefused RCode = 5 +) + +var ( + // ErrNotStarted indicates that the prerequisite information isn't + // available yet because the previous records haven't been appropriately + // parsed, skipped or finished. + ErrNotStarted = errors.New("parsing/packing of this type isn't available yet") + + // ErrSectionDone indicated that all records in the section have been + // parsed or finished. + ErrSectionDone = errors.New("parsing/packing of this section has completed") + + errBaseLen = errors.New("insufficient data for base length type") + errCalcLen = errors.New("insufficient data for calculated length type") + errReserved = errors.New("segment prefix is reserved") + errTooManyPtr = errors.New("too many pointers (>10)") + errInvalidPtr = errors.New("invalid pointer") + errNilResouceBody = errors.New("nil resource body") + errResourceLen = errors.New("insufficient data for resource body length") + errSegTooLong = errors.New("segment length too long") + errZeroSegLen = errors.New("zero length segment") + errResTooLong = errors.New("resource length too long") + errTooManyQuestions = errors.New("too many Questions to pack (>65535)") + errTooManyAnswers = errors.New("too many Answers to pack (>65535)") + errTooManyAuthorities = errors.New("too many Authorities to pack (>65535)") + errTooManyAdditionals = errors.New("too many Additionals to pack (>65535)") + errNonCanonicalName = errors.New("name is not in canonical format (it must end with a .)") +) + +// Internal constants. +const ( + // packStartingCap is the default initial buffer size allocated during + // packing. + // + // The starting capacity doesn't matter too much, but most DNS responses + // Will be <= 512 bytes as it is the limit for DNS over UDP. + packStartingCap = 512 + + // uint16Len is the length (in bytes) of a uint16. + uint16Len = 2 + + // uint32Len is the length (in bytes) of a uint32. + uint32Len = 4 + + // headerLen is the length (in bytes) of a DNS header. + // + // A header is comprised of 6 uint16s and no padding. + headerLen = 6 * uint16Len +) + +type nestedError struct { + // s is the current level's error message. + s string + + // err is the nested error. + err error +} + +// nestedError implements error.Error. +func (e *nestedError) Error() string { + return e.s + ": " + e.err.Error() +} + +// Header is a representation of a DNS message header. +type Header struct { + ID uint16 + Response bool + OpCode OpCode + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + RCode RCode +} + +func (m *Header) pack() (id uint16, bits uint16) { + id = m.ID + bits = uint16(m.OpCode)<<11 | uint16(m.RCode) + if m.RecursionAvailable { + bits |= headerBitRA + } + if m.RecursionDesired { + bits |= headerBitRD + } + if m.Truncated { + bits |= headerBitTC + } + if m.Authoritative { + bits |= headerBitAA + } + if m.Response { + bits |= headerBitQR + } + return +} + +// Message is a representation of a DNS message. +type Message struct { + Header + Questions []Question + Answers []Resource + Authorities []Resource + Additionals []Resource +} + +type section uint8 + +const ( + sectionNotStarted section = iota + sectionHeader + sectionQuestions + sectionAnswers + sectionAuthorities + sectionAdditionals + sectionDone + + headerBitQR = 1 << 15 // query/response (response=1) + headerBitAA = 1 << 10 // authoritative + headerBitTC = 1 << 9 // truncated + headerBitRD = 1 << 8 // recursion desired + headerBitRA = 1 << 7 // recursion available +) + +var sectionNames = map[section]string{ + sectionHeader: "header", + sectionQuestions: "Question", + sectionAnswers: "Answer", + sectionAuthorities: "Authority", + sectionAdditionals: "Additional", +} + +// header is the wire format for a DNS message header. +type header struct { + id uint16 + bits uint16 + questions uint16 + answers uint16 + authorities uint16 + additionals uint16 +} + +func (h *header) count(sec section) uint16 { + switch sec { + case sectionQuestions: + return h.questions + case sectionAnswers: + return h.answers + case sectionAuthorities: + return h.authorities + case sectionAdditionals: + return h.additionals + } + return 0 +} + +func (h *header) pack(msg []byte) []byte { + msg = packUint16(msg, h.id) + msg = packUint16(msg, h.bits) + msg = packUint16(msg, h.questions) + msg = packUint16(msg, h.answers) + msg = packUint16(msg, h.authorities) + return packUint16(msg, h.additionals) +} + +func (h *header) unpack(msg []byte, off int) (int, error) { + newOff := off + var err error + if h.id, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"id", err} + } + if h.bits, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"bits", err} + } + if h.questions, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"questions", err} + } + if h.answers, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"answers", err} + } + if h.authorities, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"authorities", err} + } + if h.additionals, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"additionals", err} + } + return newOff, nil +} + +func (h *header) header() Header { + return Header{ + ID: h.id, + Response: (h.bits & headerBitQR) != 0, + OpCode: OpCode(h.bits>>11) & 0xF, + Authoritative: (h.bits & headerBitAA) != 0, + Truncated: (h.bits & headerBitTC) != 0, + RecursionDesired: (h.bits & headerBitRD) != 0, + RecursionAvailable: (h.bits & headerBitRA) != 0, + RCode: RCode(h.bits & 0xF), + } +} + +// A Resource is a DNS resource record. +type Resource struct { + Header ResourceHeader + Body ResourceBody +} + +// A ResourceBody is a DNS resource record minus the header. +type ResourceBody interface { + // pack packs a Resource except for its header. + pack(msg []byte, compression map[string]int) ([]byte, error) + + // realType returns the actual type of the Resource. This is used to + // fill in the header Type field. + realType() Type +} + +func (r *Resource) pack(msg []byte, compression map[string]int) ([]byte, error) { + if r.Body == nil { + return msg, errNilResouceBody + } + oldMsg := msg + r.Header.Type = r.Body.realType() + msg, length, err := r.Header.pack(msg, compression) + if err != nil { + return msg, &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + msg, err = r.Body.pack(msg, compression) + if err != nil { + return msg, &nestedError{"content", err} + } + if err := r.Header.fixLen(msg, length, preLen); err != nil { + return oldMsg, err + } + return msg, nil +} + +// A Parser allows incrementally parsing a DNS message. +// +// When parsing is started, the Header is parsed. Next, each Question can be +// either parsed or skipped. Alternatively, all Questions can be skipped at +// once. When all Questions have been parsed, attempting to parse Questions +// will return (nil, nil) and attempting to skip Questions will return +// (true, nil). After all Questions have been either parsed or skipped, all +// Answers, Authorities and Additionals can be either parsed or skipped in the +// same way, and each type of Resource must be fully parsed or skipped before +// proceeding to the next type of Resource. +// +// Note that there is no requirement to fully skip or parse the message. +type Parser struct { + msg []byte + header header + + section section + off int + index int + resHeaderValid bool + resHeader ResourceHeader +} + +// Start parses the header and enables the parsing of Questions. +func (p *Parser) Start(msg []byte) (Header, error) { + if p.msg != nil { + *p = Parser{} + } + p.msg = msg + var err error + if p.off, err = p.header.unpack(msg, 0); err != nil { + return Header{}, &nestedError{"unpacking header", err} + } + p.section = sectionQuestions + return p.header.header(), nil +} + +func (p *Parser) checkAdvance(sec section) error { + if p.section < sec { + return ErrNotStarted + } + if p.section > sec { + return ErrSectionDone + } + p.resHeaderValid = false + if p.index == int(p.header.count(sec)) { + p.index = 0 + p.section++ + return ErrSectionDone + } + return nil +} + +func (p *Parser) resource(sec section) (Resource, error) { + var r Resource + var err error + r.Header, err = p.resourceHeader(sec) + if err != nil { + return r, err + } + p.resHeaderValid = false + r.Body, p.off, err = unpackResourceBody(p.msg, p.off, r.Header) + if err != nil { + return Resource{}, &nestedError{"unpacking " + sectionNames[sec], err} + } + p.index++ + return r, nil +} + +func (p *Parser) resourceHeader(sec section) (ResourceHeader, error) { + if p.resHeaderValid { + return p.resHeader, nil + } + if err := p.checkAdvance(sec); err != nil { + return ResourceHeader{}, err + } + var hdr ResourceHeader + off, err := hdr.unpack(p.msg, p.off) + if err != nil { + return ResourceHeader{}, err + } + p.resHeaderValid = true + p.resHeader = hdr + p.off = off + return hdr, nil +} + +func (p *Parser) skipResource(sec section) error { + if p.resHeaderValid { + newOff := p.off + int(p.resHeader.Length) + if newOff > len(p.msg) { + return errResourceLen + } + p.off = newOff + p.resHeaderValid = false + p.index++ + return nil + } + if err := p.checkAdvance(sec); err != nil { + return err + } + var err error + p.off, err = skipResource(p.msg, p.off) + if err != nil { + return &nestedError{"skipping: " + sectionNames[sec], err} + } + p.index++ + return nil +} + +// Question parses a single Question. +func (p *Parser) Question() (Question, error) { + if err := p.checkAdvance(sectionQuestions); err != nil { + return Question{}, err + } + var name Name + off, err := name.unpack(p.msg, p.off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Name", err} + } + typ, off, err := unpackType(p.msg, off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Type", err} + } + class, off, err := unpackClass(p.msg, off) + if err != nil { + return Question{}, &nestedError{"unpacking Question.Class", err} + } + p.off = off + p.index++ + return Question{name, typ, class}, nil +} + +// AllQuestions parses all Questions. +func (p *Parser) AllQuestions() ([]Question, error) { + qs := make([]Question, 0, p.header.questions) + for { + q, err := p.Question() + if err == ErrSectionDone { + return qs, nil + } + if err != nil { + return nil, err + } + qs = append(qs, q) + } +} + +// SkipQuestion skips a single Question. +func (p *Parser) SkipQuestion() error { + if err := p.checkAdvance(sectionQuestions); err != nil { + return err + } + off, err := skipName(p.msg, p.off) + if err != nil { + return &nestedError{"skipping Question Name", err} + } + if off, err = skipType(p.msg, off); err != nil { + return &nestedError{"skipping Question Type", err} + } + if off, err = skipClass(p.msg, off); err != nil { + return &nestedError{"skipping Question Class", err} + } + p.off = off + p.index++ + return nil +} + +// SkipAllQuestions skips all Questions. +func (p *Parser) SkipAllQuestions() error { + for { + if err := p.SkipQuestion(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AnswerHeader parses a single Answer ResourceHeader. +func (p *Parser) AnswerHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAnswers) +} + +// Answer parses a single Answer Resource. +func (p *Parser) Answer() (Resource, error) { + return p.resource(sectionAnswers) +} + +// AllAnswers parses all Answer Resources. +func (p *Parser) AllAnswers() ([]Resource, error) { + as := make([]Resource, 0, p.header.answers) + for { + a, err := p.Answer() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAnswer skips a single Answer Resource. +func (p *Parser) SkipAnswer() error { + return p.skipResource(sectionAnswers) +} + +// SkipAllAnswers skips all Answer Resources. +func (p *Parser) SkipAllAnswers() error { + for { + if err := p.SkipAnswer(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AuthorityHeader parses a single Authority ResourceHeader. +func (p *Parser) AuthorityHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAuthorities) +} + +// Authority parses a single Authority Resource. +func (p *Parser) Authority() (Resource, error) { + return p.resource(sectionAuthorities) +} + +// AllAuthorities parses all Authority Resources. +func (p *Parser) AllAuthorities() ([]Resource, error) { + as := make([]Resource, 0, p.header.authorities) + for { + a, err := p.Authority() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAuthority skips a single Authority Resource. +func (p *Parser) SkipAuthority() error { + return p.skipResource(sectionAuthorities) +} + +// SkipAllAuthorities skips all Authority Resources. +func (p *Parser) SkipAllAuthorities() error { + for { + if err := p.SkipAuthority(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// AdditionalHeader parses a single Additional ResourceHeader. +func (p *Parser) AdditionalHeader() (ResourceHeader, error) { + return p.resourceHeader(sectionAdditionals) +} + +// Additional parses a single Additional Resource. +func (p *Parser) Additional() (Resource, error) { + return p.resource(sectionAdditionals) +} + +// AllAdditionals parses all Additional Resources. +func (p *Parser) AllAdditionals() ([]Resource, error) { + as := make([]Resource, 0, p.header.additionals) + for { + a, err := p.Additional() + if err == ErrSectionDone { + return as, nil + } + if err != nil { + return nil, err + } + as = append(as, a) + } +} + +// SkipAdditional skips a single Additional Resource. +func (p *Parser) SkipAdditional() error { + return p.skipResource(sectionAdditionals) +} + +// SkipAllAdditionals skips all Additional Resources. +func (p *Parser) SkipAllAdditionals() error { + for { + if err := p.SkipAdditional(); err == ErrSectionDone { + return nil + } else if err != nil { + return err + } + } +} + +// CNAMEResource parses a single CNAMEResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) CNAMEResource() (CNAMEResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeCNAME { + return CNAMEResource{}, ErrNotStarted + } + r, err := unpackCNAMEResource(p.msg, p.off) + if err != nil { + return CNAMEResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// MXResource parses a single MXResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) MXResource() (MXResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeMX { + return MXResource{}, ErrNotStarted + } + r, err := unpackMXResource(p.msg, p.off) + if err != nil { + return MXResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// NSResource parses a single NSResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) NSResource() (NSResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeNS { + return NSResource{}, ErrNotStarted + } + r, err := unpackNSResource(p.msg, p.off) + if err != nil { + return NSResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// PTRResource parses a single PTRResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) PTRResource() (PTRResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypePTR { + return PTRResource{}, ErrNotStarted + } + r, err := unpackPTRResource(p.msg, p.off) + if err != nil { + return PTRResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// SOAResource parses a single SOAResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) SOAResource() (SOAResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeSOA { + return SOAResource{}, ErrNotStarted + } + r, err := unpackSOAResource(p.msg, p.off) + if err != nil { + return SOAResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// TXTResource parses a single TXTResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) TXTResource() (TXTResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeTXT { + return TXTResource{}, ErrNotStarted + } + r, err := unpackTXTResource(p.msg, p.off, p.resHeader.Length) + if err != nil { + return TXTResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// SRVResource parses a single SRVResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) SRVResource() (SRVResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeSRV { + return SRVResource{}, ErrNotStarted + } + r, err := unpackSRVResource(p.msg, p.off) + if err != nil { + return SRVResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// AResource parses a single AResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) AResource() (AResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeA { + return AResource{}, ErrNotStarted + } + r, err := unpackAResource(p.msg, p.off) + if err != nil { + return AResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// AAAAResource parses a single AAAAResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) AAAAResource() (AAAAResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeAAAA { + return AAAAResource{}, ErrNotStarted + } + r, err := unpackAAAAResource(p.msg, p.off) + if err != nil { + return AAAAResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + +// Unpack parses a full Message. +func (m *Message) Unpack(msg []byte) error { + var p Parser + var err error + if m.Header, err = p.Start(msg); err != nil { + return err + } + if m.Questions, err = p.AllQuestions(); err != nil { + return err + } + if m.Answers, err = p.AllAnswers(); err != nil { + return err + } + if m.Authorities, err = p.AllAuthorities(); err != nil { + return err + } + if m.Additionals, err = p.AllAdditionals(); err != nil { + return err + } + return nil +} + +// Pack packs a full Message. +func (m *Message) Pack() ([]byte, error) { + // Validate the lengths. It is very unlikely that anyone will try to + // pack more than 65535 of any particular type, but it is possible and + // we should fail gracefully. + if len(m.Questions) > int(^uint16(0)) { + return nil, errTooManyQuestions + } + if len(m.Answers) > int(^uint16(0)) { + return nil, errTooManyAnswers + } + if len(m.Authorities) > int(^uint16(0)) { + return nil, errTooManyAuthorities + } + if len(m.Additionals) > int(^uint16(0)) { + return nil, errTooManyAdditionals + } + + var h header + h.id, h.bits = m.Header.pack() + + h.questions = uint16(len(m.Questions)) + h.answers = uint16(len(m.Answers)) + h.authorities = uint16(len(m.Authorities)) + h.additionals = uint16(len(m.Additionals)) + + msg := make([]byte, 0, packStartingCap) + + msg = h.pack(msg) + + // RFC 1035 allows (but does not require) compression for packing. RFC + // 1035 requires unpacking implementations to support compression, so + // unconditionally enabling it is fine. + // + // DNS lookups are typically done over UDP, and RFC 1035 states that UDP + // DNS packets can be a maximum of 512 bytes long. Without compression, + // many DNS response packets are over this limit, so enabling + // compression will help ensure compliance. + compression := map[string]int{} + + for i := range m.Questions { + var err error + if msg, err = m.Questions[i].pack(msg, compression); err != nil { + return nil, &nestedError{"packing Question", err} + } + } + for i := range m.Answers { + var err error + if msg, err = m.Answers[i].pack(msg, compression); err != nil { + return nil, &nestedError{"packing Answer", err} + } + } + for i := range m.Authorities { + var err error + if msg, err = m.Authorities[i].pack(msg, compression); err != nil { + return nil, &nestedError{"packing Authority", err} + } + } + for i := range m.Additionals { + var err error + if msg, err = m.Additionals[i].pack(msg, compression); err != nil { + return nil, &nestedError{"packing Additional", err} + } + } + + return msg, nil +} + +// A Builder allows incrementally packing a DNS message. +type Builder struct { + msg []byte + header header + section section + compression map[string]int +} + +// Start initializes the builder. +// +// buf is optional (nil is fine), but if provided, Start takes ownership of buf. +func (b *Builder) Start(buf []byte, h Header) { + b.StartWithoutCompression(buf, h) + b.compression = map[string]int{} +} + +// StartWithoutCompression initializes the builder with compression disabled. +// +// This avoids compression related allocations, but can result in larger message +// sizes. Be careful with this mode as it can cause messages to exceed the UDP +// size limit. +// +// buf is optional (nil is fine), but if provided, Start takes ownership of buf. +func (b *Builder) StartWithoutCompression(buf []byte, h Header) { + *b = Builder{msg: buf} + b.header.id, b.header.bits = h.pack() + if cap(b.msg) < headerLen { + b.msg = make([]byte, 0, packStartingCap) + } + b.msg = b.msg[:headerLen] + b.section = sectionHeader +} + +func (b *Builder) startCheck(s section) error { + if b.section <= sectionNotStarted { + return ErrNotStarted + } + if b.section > s { + return ErrSectionDone + } + return nil +} + +// StartQuestions prepares the builder for packing Questions. +func (b *Builder) StartQuestions() error { + if err := b.startCheck(sectionQuestions); err != nil { + return err + } + b.section = sectionQuestions + return nil +} + +// StartAnswers prepares the builder for packing Answers. +func (b *Builder) StartAnswers() error { + if err := b.startCheck(sectionAnswers); err != nil { + return err + } + b.section = sectionAnswers + return nil +} + +// StartAuthorities prepares the builder for packing Authorities. +func (b *Builder) StartAuthorities() error { + if err := b.startCheck(sectionAuthorities); err != nil { + return err + } + b.section = sectionAuthorities + return nil +} + +// StartAdditionals prepares the builder for packing Additionals. +func (b *Builder) StartAdditionals() error { + if err := b.startCheck(sectionAdditionals); err != nil { + return err + } + b.section = sectionAdditionals + return nil +} + +func (b *Builder) incrementSectionCount() error { + var count *uint16 + var err error + switch b.section { + case sectionQuestions: + count = &b.header.questions + err = errTooManyQuestions + case sectionAnswers: + count = &b.header.answers + err = errTooManyAnswers + case sectionAuthorities: + count = &b.header.authorities + err = errTooManyAuthorities + case sectionAdditionals: + count = &b.header.additionals + err = errTooManyAdditionals + } + if *count == ^uint16(0) { + return err + } + *count++ + return nil +} + +// Question adds a single Question. +func (b *Builder) Question(q Question) error { + if b.section < sectionQuestions { + return ErrNotStarted + } + if b.section > sectionQuestions { + return ErrSectionDone + } + msg, err := q.pack(b.msg, b.compression) + if err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +func (b *Builder) checkResourceSection() error { + if b.section < sectionAnswers { + return ErrNotStarted + } + if b.section > sectionAdditionals { + return ErrSectionDone + } + return nil +} + +// CNAMEResource adds a single CNAMEResource. +func (b *Builder) CNAMEResource(h ResourceHeader, r CNAMEResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression); err != nil { + return &nestedError{"CNAMEResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// MXResource adds a single MXResource. +func (b *Builder) MXResource(h ResourceHeader, r MXResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression); err != nil { + return &nestedError{"MXResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// NSResource adds a single NSResource. +func (b *Builder) NSResource(h ResourceHeader, r NSResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression); err != nil { + return &nestedError{"NSResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// PTRResource adds a single PTRResource. +func (b *Builder) PTRResource(h ResourceHeader, r PTRResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression); err != nil { + return &nestedError{"PTRResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// SOAResource adds a single SOAResource. +func (b *Builder) SOAResource(h ResourceHeader, r SOAResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression); err != nil { + return &nestedError{"SOAResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// TXTResource adds a single TXTResource. +func (b *Builder) TXTResource(h ResourceHeader, r TXTResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression); err != nil { + return &nestedError{"TXTResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// SRVResource adds a single SRVResource. +func (b *Builder) SRVResource(h ResourceHeader, r SRVResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression); err != nil { + return &nestedError{"SRVResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// AResource adds a single AResource. +func (b *Builder) AResource(h ResourceHeader, r AResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression); err != nil { + return &nestedError{"AResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// AAAAResource adds a single AAAAResource. +func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression); err != nil { + return &nestedError{"AAAAResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + +// Finish ends message building and generates a binary packet. +func (b *Builder) Finish() ([]byte, error) { + if b.section < sectionHeader { + return nil, ErrNotStarted + } + b.section = sectionDone + b.header.pack(b.msg[:0]) + return b.msg, nil +} + +// A ResourceHeader is the header of a DNS resource record. There are +// many types of DNS resource records, but they all share the same header. +type ResourceHeader struct { + // Name is the domain name for which this resource record pertains. + Name Name + + // Type is the type of DNS resource record. + // + // This field will be set automatically during packing. + Type Type + + // Class is the class of network to which this DNS resource record + // pertains. + Class Class + + // TTL is the length of time (measured in seconds) which this resource + // record is valid for (time to live). All Resources in a set should + // have the same TTL (RFC 2181 Section 5.2). + TTL uint32 + + // Length is the length of data in the resource record after the header. + // + // This field will be set automatically during packing. + Length uint16 +} + +// pack packs all of the fields in a ResourceHeader except for the length. The +// length bytes are returned as a slice so they can be filled in after the rest +// of the Resource has been packed. +func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int) (msg []byte, length []byte, err error) { + msg = oldMsg + if msg, err = h.Name.pack(msg, compression); err != nil { + return oldMsg, nil, &nestedError{"Name", err} + } + msg = packType(msg, h.Type) + msg = packClass(msg, h.Class) + msg = packUint32(msg, h.TTL) + lenBegin := len(msg) + msg = packUint16(msg, h.Length) + return msg, msg[lenBegin : lenBegin+uint16Len], nil +} + +func (h *ResourceHeader) unpack(msg []byte, off int) (int, error) { + newOff := off + var err error + if newOff, err = h.Name.unpack(msg, newOff); err != nil { + return off, &nestedError{"Name", err} + } + if h.Type, newOff, err = unpackType(msg, newOff); err != nil { + return off, &nestedError{"Type", err} + } + if h.Class, newOff, err = unpackClass(msg, newOff); err != nil { + return off, &nestedError{"Class", err} + } + if h.TTL, newOff, err = unpackUint32(msg, newOff); err != nil { + return off, &nestedError{"TTL", err} + } + if h.Length, newOff, err = unpackUint16(msg, newOff); err != nil { + return off, &nestedError{"Length", err} + } + return newOff, nil +} + +func (h *ResourceHeader) fixLen(msg []byte, length []byte, preLen int) error { + conLen := len(msg) - preLen + if conLen > int(^uint16(0)) { + return errResTooLong + } + + // Fill in the length now that we know how long the content is. + packUint16(length[:0], uint16(conLen)) + h.Length = uint16(conLen) + + return nil +} + +func skipResource(msg []byte, off int) (int, error) { + newOff, err := skipName(msg, off) + if err != nil { + return off, &nestedError{"Name", err} + } + if newOff, err = skipType(msg, newOff); err != nil { + return off, &nestedError{"Type", err} + } + if newOff, err = skipClass(msg, newOff); err != nil { + return off, &nestedError{"Class", err} + } + if newOff, err = skipUint32(msg, newOff); err != nil { + return off, &nestedError{"TTL", err} + } + length, newOff, err := unpackUint16(msg, newOff) + if err != nil { + return off, &nestedError{"Length", err} + } + if newOff += int(length); newOff > len(msg) { + return off, errResourceLen + } + return newOff, nil +} + +func packUint16(msg []byte, field uint16) []byte { + return append(msg, byte(field>>8), byte(field)) +} + +func unpackUint16(msg []byte, off int) (uint16, int, error) { + if off+uint16Len > len(msg) { + return 0, off, errBaseLen + } + return uint16(msg[off])<<8 | uint16(msg[off+1]), off + uint16Len, nil +} + +func skipUint16(msg []byte, off int) (int, error) { + if off+uint16Len > len(msg) { + return off, errBaseLen + } + return off + uint16Len, nil +} + +func packType(msg []byte, field Type) []byte { + return packUint16(msg, uint16(field)) +} + +func unpackType(msg []byte, off int) (Type, int, error) { + t, o, err := unpackUint16(msg, off) + return Type(t), o, err +} + +func skipType(msg []byte, off int) (int, error) { + return skipUint16(msg, off) +} + +func packClass(msg []byte, field Class) []byte { + return packUint16(msg, uint16(field)) +} + +func unpackClass(msg []byte, off int) (Class, int, error) { + c, o, err := unpackUint16(msg, off) + return Class(c), o, err +} + +func skipClass(msg []byte, off int) (int, error) { + return skipUint16(msg, off) +} + +func packUint32(msg []byte, field uint32) []byte { + return append( + msg, + byte(field>>24), + byte(field>>16), + byte(field>>8), + byte(field), + ) +} + +func unpackUint32(msg []byte, off int) (uint32, int, error) { + if off+uint32Len > len(msg) { + return 0, off, errBaseLen + } + v := uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3]) + return v, off + uint32Len, nil +} + +func skipUint32(msg []byte, off int) (int, error) { + if off+uint32Len > len(msg) { + return off, errBaseLen + } + return off + uint32Len, nil +} + +func packText(msg []byte, field string) []byte { + for len(field) > 0 { + l := len(field) + if l > 255 { + l = 255 + } + msg = append(msg, byte(l)) + msg = append(msg, field[:l]...) + field = field[l:] + } + return msg +} + +func unpackText(msg []byte, off int) (string, int, error) { + if off >= len(msg) { + return "", off, errBaseLen + } + beginOff := off + 1 + endOff := beginOff + int(msg[off]) + if endOff > len(msg) { + return "", off, errCalcLen + } + return string(msg[beginOff:endOff]), endOff, nil +} + +func skipText(msg []byte, off int) (int, error) { + if off >= len(msg) { + return off, errBaseLen + } + endOff := off + 1 + int(msg[off]) + if endOff > len(msg) { + return off, errCalcLen + } + return endOff, nil +} + +func packBytes(msg []byte, field []byte) []byte { + return append(msg, field...) +} + +func unpackBytes(msg []byte, off int, field []byte) (int, error) { + newOff := off + len(field) + if newOff > len(msg) { + return off, errBaseLen + } + copy(field, msg[off:newOff]) + return newOff, nil +} + +func skipBytes(msg []byte, off int, field []byte) (int, error) { + newOff := off + len(field) + if newOff > len(msg) { + return off, errBaseLen + } + return newOff, nil +} + +const nameLen = 255 + +// A Name is a non-encoded domain name. It is used instead of strings to avoid +// allocations. +type Name struct { + Data [nameLen]byte + Length uint8 +} + +// NewName creates a new Name from a string. +func NewName(name string) (Name, error) { + if len([]byte(name)) > nameLen { + return Name{}, errCalcLen + } + n := Name{Length: uint8(len(name))} + copy(n.Data[:], []byte(name)) + return n, nil +} + +func (n Name) String() string { + return string(n.Data[:n.Length]) +} + +// pack packs a domain name. +// +// Domain names are a sequence of counted strings split at the dots. They end +// with a zero-length string. Compression can be used to reuse domain suffixes. +// +// The compression map will be updated with new domain suffixes. If compression +// is nil, compression will not be used. +func (n *Name) pack(msg []byte, compression map[string]int) ([]byte, error) { + oldMsg := msg + + // Add a trailing dot to canonicalize name. + if n.Length == 0 || n.Data[n.Length-1] != '.' { + return oldMsg, errNonCanonicalName + } + + // Allow root domain. + if n.Data[0] == '.' && n.Length == 1 { + return append(msg, 0), nil + } + + // Emit sequence of counted strings, chopping at dots. + for i, begin := 0, 0; i < int(n.Length); i++ { + // Check for the end of the segment. + if n.Data[i] == '.' { + // The two most significant bits have special meaning. + // It isn't allowed for segments to be long enough to + // need them. + if i-begin >= 1<<6 { + return oldMsg, errSegTooLong + } + + // Segments must have a non-zero length. + if i-begin == 0 { + return oldMsg, errZeroSegLen + } + + msg = append(msg, byte(i-begin)) + + for j := begin; j < i; j++ { + msg = append(msg, n.Data[j]) + } + + begin = i + 1 + continue + } + + // We can only compress domain suffixes starting with a new + // segment. A pointer is two bytes with the two most significant + // bits set to 1 to indicate that it is a pointer. + if (i == 0 || n.Data[i-1] == '.') && compression != nil { + if ptr, ok := compression[string(n.Data[i:])]; ok { + // Hit. Emit a pointer instead of the rest of + // the domain. + return append(msg, byte(ptr>>8|0xC0), byte(ptr)), nil + } + + // Miss. Add the suffix to the compression table if the + // offset can be stored in the available 14 bytes. + if len(msg) <= int(^uint16(0)>>2) { + compression[string(n.Data[i:])] = len(msg) + } + } + } + return append(msg, 0), nil +} + +// unpack unpacks a domain name. +func (n *Name) unpack(msg []byte, off int) (int, error) { + // currOff is the current working offset. + currOff := off + + // newOff is the offset where the next record will start. Pointers lead + // to data that belongs to other names and thus doesn't count towards to + // the usage of this name. + newOff := off + + // ptr is the number of pointers followed. + var ptr int + + // Name is a slice representation of the name data. + name := n.Data[:0] + +Loop: + for { + if currOff >= len(msg) { + return off, errBaseLen + } + c := int(msg[currOff]) + currOff++ + switch c & 0xC0 { + case 0x00: // String segment + if c == 0x00 { + // A zero length signals the end of the name. + break Loop + } + endOff := currOff + c + if endOff > len(msg) { + return off, errCalcLen + } + name = append(name, msg[currOff:endOff]...) + name = append(name, '.') + currOff = endOff + case 0xC0: // Pointer + if currOff >= len(msg) { + return off, errInvalidPtr + } + c1 := msg[currOff] + currOff++ + if ptr == 0 { + newOff = currOff + } + // Don't follow too many pointers, maybe there's a loop. + if ptr++; ptr > 10 { + return off, errTooManyPtr + } + currOff = (c^0xC0)<<8 | int(c1) + default: + // Prefixes 0x80 and 0x40 are reserved. + return off, errReserved + } + } + if len(name) == 0 { + name = append(name, '.') + } + if len(name) > len(n.Data) { + return off, errCalcLen + } + n.Length = uint8(len(name)) + if ptr == 0 { + newOff = currOff + } + return newOff, nil +} + +func skipName(msg []byte, off int) (int, error) { + // newOff is the offset where the next record will start. Pointers lead + // to data that belongs to other names and thus doesn't count towards to + // the usage of this name. + newOff := off + +Loop: + for { + if newOff >= len(msg) { + return off, errBaseLen + } + c := int(msg[newOff]) + newOff++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // A zero length signals the end of the name. + break Loop + } + // literal string + newOff += c + if newOff > len(msg) { + return off, errCalcLen + } + case 0xC0: + // Pointer to somewhere else in msg. + + // Pointers are two bytes. + newOff++ + + // Don't follow the pointer as the data here has ended. + break Loop + default: + // Prefixes 0x80 and 0x40 are reserved. + return off, errReserved + } + } + + return newOff, nil +} + +// A Question is a DNS query. +type Question struct { + Name Name + Type Type + Class Class +} + +func (q *Question) pack(msg []byte, compression map[string]int) ([]byte, error) { + msg, err := q.Name.pack(msg, compression) + if err != nil { + return msg, &nestedError{"Name", err} + } + msg = packType(msg, q.Type) + return packClass(msg, q.Class), nil +} + +func unpackResourceBody(msg []byte, off int, hdr ResourceHeader) (ResourceBody, int, error) { + var ( + r ResourceBody + err error + name string + ) + switch hdr.Type { + case TypeA: + var rb AResource + rb, err = unpackAResource(msg, off) + r = &rb + name = "A" + case TypeNS: + var rb NSResource + rb, err = unpackNSResource(msg, off) + r = &rb + name = "NS" + case TypeCNAME: + var rb CNAMEResource + rb, err = unpackCNAMEResource(msg, off) + r = &rb + name = "CNAME" + case TypeSOA: + var rb SOAResource + rb, err = unpackSOAResource(msg, off) + r = &rb + name = "SOA" + case TypePTR: + var rb PTRResource + rb, err = unpackPTRResource(msg, off) + r = &rb + name = "PTR" + case TypeMX: + var rb MXResource + rb, err = unpackMXResource(msg, off) + r = &rb + name = "MX" + case TypeTXT: + var rb TXTResource + rb, err = unpackTXTResource(msg, off, hdr.Length) + r = &rb + name = "TXT" + case TypeAAAA: + var rb AAAAResource + rb, err = unpackAAAAResource(msg, off) + r = &rb + name = "AAAA" + case TypeSRV: + var rb SRVResource + rb, err = unpackSRVResource(msg, off) + r = &rb + name = "SRV" + } + if err != nil { + return nil, off, &nestedError{name + " record", err} + } + if r == nil { + return nil, off, errors.New("invalid resource type: " + string(hdr.Type+'0')) + } + return r, off + int(hdr.Length), nil +} + +// A CNAMEResource is a CNAME Resource record. +type CNAMEResource struct { + CNAME Name +} + +func (r *CNAMEResource) realType() Type { + return TypeCNAME +} + +func (r *CNAMEResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return r.CNAME.pack(msg, compression) +} + +func unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) { + var cname Name + if _, err := cname.unpack(msg, off); err != nil { + return CNAMEResource{}, err + } + return CNAMEResource{cname}, nil +} + +// An MXResource is an MX Resource record. +type MXResource struct { + Pref uint16 + MX Name +} + +func (r *MXResource) realType() Type { + return TypeMX +} + +func (r *MXResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + oldMsg := msg + msg = packUint16(msg, r.Pref) + msg, err := r.MX.pack(msg, compression) + if err != nil { + return oldMsg, &nestedError{"MXResource.MX", err} + } + return msg, nil +} + +func unpackMXResource(msg []byte, off int) (MXResource, error) { + pref, off, err := unpackUint16(msg, off) + if err != nil { + return MXResource{}, &nestedError{"Pref", err} + } + var mx Name + if _, err := mx.unpack(msg, off); err != nil { + return MXResource{}, &nestedError{"MX", err} + } + return MXResource{pref, mx}, nil +} + +// An NSResource is an NS Resource record. +type NSResource struct { + NS Name +} + +func (r *NSResource) realType() Type { + return TypeNS +} + +func (r *NSResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return r.NS.pack(msg, compression) +} + +func unpackNSResource(msg []byte, off int) (NSResource, error) { + var ns Name + if _, err := ns.unpack(msg, off); err != nil { + return NSResource{}, err + } + return NSResource{ns}, nil +} + +// A PTRResource is a PTR Resource record. +type PTRResource struct { + PTR Name +} + +func (r *PTRResource) realType() Type { + return TypePTR +} + +func (r *PTRResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return r.PTR.pack(msg, compression) +} + +func unpackPTRResource(msg []byte, off int) (PTRResource, error) { + var ptr Name + if _, err := ptr.unpack(msg, off); err != nil { + return PTRResource{}, err + } + return PTRResource{ptr}, nil +} + +// An SOAResource is an SOA Resource record. +type SOAResource struct { + NS Name + MBox Name + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + + // MinTTL the is the default TTL of Resources records which did not + // contain a TTL value and the TTL of negative responses. (RFC 2308 + // Section 4) + MinTTL uint32 +} + +func (r *SOAResource) realType() Type { + return TypeSOA +} + +func (r *SOAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + oldMsg := msg + msg, err := r.NS.pack(msg, compression) + if err != nil { + return oldMsg, &nestedError{"SOAResource.NS", err} + } + msg, err = r.MBox.pack(msg, compression) + if err != nil { + return oldMsg, &nestedError{"SOAResource.MBox", err} + } + msg = packUint32(msg, r.Serial) + msg = packUint32(msg, r.Refresh) + msg = packUint32(msg, r.Retry) + msg = packUint32(msg, r.Expire) + return packUint32(msg, r.MinTTL), nil +} + +func unpackSOAResource(msg []byte, off int) (SOAResource, error) { + var ns Name + off, err := ns.unpack(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"NS", err} + } + var mbox Name + if off, err = mbox.unpack(msg, off); err != nil { + return SOAResource{}, &nestedError{"MBox", err} + } + serial, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Serial", err} + } + refresh, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Refresh", err} + } + retry, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Retry", err} + } + expire, off, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"Expire", err} + } + minTTL, _, err := unpackUint32(msg, off) + if err != nil { + return SOAResource{}, &nestedError{"MinTTL", err} + } + return SOAResource{ns, mbox, serial, refresh, retry, expire, minTTL}, nil +} + +// A TXTResource is a TXT Resource record. +type TXTResource struct { + Txt string // Not a domain name. +} + +func (r *TXTResource) realType() Type { + return TypeTXT +} + +func (r *TXTResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return packText(msg, r.Txt), nil +} + +func unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) { + var txt string + for n := uint16(0); n < length; { + var t string + var err error + if t, off, err = unpackText(msg, off); err != nil { + return TXTResource{}, &nestedError{"text", err} + } + // Check if we got too many bytes. + if length-n < uint16(len(t))+1 { + return TXTResource{}, errCalcLen + } + n += uint16(len(t)) + 1 + txt += t + } + return TXTResource{txt}, nil +} + +// An SRVResource is an SRV Resource record. +type SRVResource struct { + Priority uint16 + Weight uint16 + Port uint16 + Target Name // Not compressed as per RFC 2782. +} + +func (r *SRVResource) realType() Type { + return TypeSRV +} + +func (r *SRVResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + oldMsg := msg + msg = packUint16(msg, r.Priority) + msg = packUint16(msg, r.Weight) + msg = packUint16(msg, r.Port) + msg, err := r.Target.pack(msg, nil) + if err != nil { + return oldMsg, &nestedError{"SRVResource.Target", err} + } + return msg, nil +} + +func unpackSRVResource(msg []byte, off int) (SRVResource, error) { + priority, off, err := unpackUint16(msg, off) + if err != nil { + return SRVResource{}, &nestedError{"Priority", err} + } + weight, off, err := unpackUint16(msg, off) + if err != nil { + return SRVResource{}, &nestedError{"Weight", err} + } + port, off, err := unpackUint16(msg, off) + if err != nil { + return SRVResource{}, &nestedError{"Port", err} + } + var target Name + if _, err := target.unpack(msg, off); err != nil { + return SRVResource{}, &nestedError{"Target", err} + } + return SRVResource{priority, weight, port, target}, nil +} + +// An AResource is an A Resource record. +type AResource struct { + A [4]byte +} + +func (r *AResource) realType() Type { + return TypeA +} + +func (r *AResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return packBytes(msg, r.A[:]), nil +} + +func unpackAResource(msg []byte, off int) (AResource, error) { + var a [4]byte + if _, err := unpackBytes(msg, off, a[:]); err != nil { + return AResource{}, err + } + return AResource{a}, nil +} + +// An AAAAResource is an AAAA Resource record. +type AAAAResource struct { + AAAA [16]byte +} + +func (r *AAAAResource) realType() Type { + return TypeAAAA +} + +func (r *AAAAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { + return packBytes(msg, r.AAAA[:]), nil +} + +func unpackAAAAResource(msg []byte, off int) (AAAAResource, error) { + var aaaa [16]byte + if _, err := unpackBytes(msg, off, aaaa[:]); err != nil { + return AAAAResource{}, err + } + return AAAAResource{aaaa}, nil +} diff --git a/fn/vendor/golang.org/x/net/dns/dnsmessage/message_test.go b/fn/vendor/golang.org/x/net/dns/dnsmessage/message_test.go new file mode 100644 index 000000000..0f98daa7e --- /dev/null +++ b/fn/vendor/golang.org/x/net/dns/dnsmessage/message_test.go @@ -0,0 +1,1009 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dnsmessage + +import ( + "bytes" + "fmt" + "reflect" + "testing" +) + +func mustNewName(name string) Name { + n, err := NewName(name) + if err != nil { + panic(err) + } + return n +} + +func (m *Message) String() string { + s := fmt.Sprintf("Message: %#v\n", &m.Header) + if len(m.Questions) > 0 { + s += "-- Questions\n" + for _, q := range m.Questions { + s += fmt.Sprintf("%#v\n", q) + } + } + if len(m.Answers) > 0 { + s += "-- Answers\n" + for _, a := range m.Answers { + s += fmt.Sprintf("%#v\n", a) + } + } + if len(m.Authorities) > 0 { + s += "-- Authorities\n" + for _, ns := range m.Authorities { + s += fmt.Sprintf("%#v\n", ns) + } + } + if len(m.Additionals) > 0 { + s += "-- Additionals\n" + for _, e := range m.Additionals { + s += fmt.Sprintf("%#v\n", e) + } + } + return s +} + +func TestNameString(t *testing.T) { + want := "foo" + name := mustNewName(want) + if got := fmt.Sprint(name); got != want { + t.Errorf("got fmt.Sprint(%#v) = %s, want = %s", name, got, want) + } +} + +func TestQuestionPackUnpack(t *testing.T) { + want := Question{ + Name: mustNewName("."), + Type: TypeA, + Class: ClassINET, + } + buf, err := want.pack(make([]byte, 1, 50), map[string]int{}) + if err != nil { + t.Fatal("Packing failed:", err) + } + var p Parser + p.msg = buf + p.header.questions = 1 + p.section = sectionQuestions + p.off = 1 + got, err := p.Question() + if err != nil { + t.Fatalf("Unpacking failed: %v\n%s", err, string(buf[1:])) + } + if p.off != len(buf) { + t.Errorf("Unpacked different amount than packed: got n = %d, want = %d", p.off, len(buf)) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Got = %+v, want = %+v", got, want) + } +} + +func TestName(t *testing.T) { + tests := []string{ + "", + ".", + "google..com", + "google.com", + "google..com.", + "google.com.", + ".google.com.", + "www..google.com.", + "www.google.com.", + } + + for _, test := range tests { + n, err := NewName(test) + if err != nil { + t.Errorf("Creating name for %q: %v", test, err) + continue + } + if ns := n.String(); ns != test { + t.Errorf("Got %#v.String() = %q, want = %q", n, ns, test) + continue + } + } +} + +func TestNamePackUnpack(t *testing.T) { + tests := []struct { + in string + want string + err error + }{ + {"", "", errNonCanonicalName}, + {".", ".", nil}, + {"google..com", "", errNonCanonicalName}, + {"google.com", "", errNonCanonicalName}, + {"google..com.", "", errZeroSegLen}, + {"google.com.", "google.com.", nil}, + {".google.com.", "", errZeroSegLen}, + {"www..google.com.", "", errZeroSegLen}, + {"www.google.com.", "www.google.com.", nil}, + } + + for _, test := range tests { + in := mustNewName(test.in) + want := mustNewName(test.want) + buf, err := in.pack(make([]byte, 0, 30), map[string]int{}) + if err != test.err { + t.Errorf("Packing of %q: got err = %v, want err = %v", test.in, err, test.err) + continue + } + if test.err != nil { + continue + } + var got Name + n, err := got.unpack(buf, 0) + if err != nil { + t.Errorf("Unpacking for %q failed: %v", test.in, err) + continue + } + if n != len(buf) { + t.Errorf( + "Unpacked different amount than packed for %q: got n = %d, want = %d", + test.in, + n, + len(buf), + ) + } + if got != want { + t.Errorf("Unpacking packing of %q: got = %#v, want = %#v", test.in, got, want) + } + } +} + +func checkErrorPrefix(err error, prefix string) bool { + e, ok := err.(*nestedError) + return ok && e.s == prefix +} + +func TestHeaderUnpackError(t *testing.T) { + wants := []string{ + "id", + "bits", + "questions", + "answers", + "authorities", + "additionals", + } + var buf []byte + var h header + for _, want := range wants { + n, err := h.unpack(buf, 0) + if n != 0 || !checkErrorPrefix(err, want) { + t.Errorf("got h.unpack([%d]byte, 0) = %d, %v, want = 0, %s", len(buf), n, err, want) + } + buf = append(buf, 0, 0) + } +} + +func TestParserStart(t *testing.T) { + const want = "unpacking header" + var p Parser + for i := 0; i <= 1; i++ { + _, err := p.Start([]byte{}) + if !checkErrorPrefix(err, want) { + t.Errorf("got p.Start(nil) = _, %v, want = _, %s", err, want) + } + } +} + +func TestResourceNotStarted(t *testing.T) { + tests := []struct { + name string + fn func(*Parser) error + }{ + {"CNAMEResource", func(p *Parser) error { _, err := p.CNAMEResource(); return err }}, + {"MXResource", func(p *Parser) error { _, err := p.MXResource(); return err }}, + {"NSResource", func(p *Parser) error { _, err := p.NSResource(); return err }}, + {"PTRResource", func(p *Parser) error { _, err := p.PTRResource(); return err }}, + {"SOAResource", func(p *Parser) error { _, err := p.SOAResource(); return err }}, + {"TXTResource", func(p *Parser) error { _, err := p.TXTResource(); return err }}, + {"SRVResource", func(p *Parser) error { _, err := p.SRVResource(); return err }}, + {"AResource", func(p *Parser) error { _, err := p.AResource(); return err }}, + {"AAAAResource", func(p *Parser) error { _, err := p.AAAAResource(); return err }}, + } + + for _, test := range tests { + if err := test.fn(&Parser{}); err != ErrNotStarted { + t.Errorf("got _, %v = p.%s(), want = _, %v", err, test.name, ErrNotStarted) + } + } +} + +func TestDNSPackUnpack(t *testing.T) { + wants := []Message{ + { + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{}, + Authorities: []Resource{}, + Additionals: []Resource{}, + }, + largeTestMsg(), + } + for i, want := range wants { + b, err := want.Pack() + if err != nil { + t.Fatalf("%d: packing failed: %v", i, err) + } + var got Message + err = got.Unpack(b) + if err != nil { + t.Fatalf("%d: unpacking failed: %v", i, err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: got = %+v, want = %+v", i, &got, &want) + } + } +} + +func TestSkipAll(t *testing.T) { + msg := largeTestMsg() + buf, err := msg.Pack() + if err != nil { + t.Fatal("Packing large test message:", err) + } + var p Parser + if _, err := p.Start(buf); err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + f func() error + }{ + {"SkipAllQuestions", p.SkipAllQuestions}, + {"SkipAllAnswers", p.SkipAllAnswers}, + {"SkipAllAuthorities", p.SkipAllAuthorities}, + {"SkipAllAdditionals", p.SkipAllAdditionals}, + } + for _, test := range tests { + for i := 1; i <= 3; i++ { + if err := test.f(); err != nil { + t.Errorf("Call #%d to %s(): %v", i, test.name, err) + } + } + } +} + +func TestSkipNotStarted(t *testing.T) { + var p Parser + + tests := []struct { + name string + f func() error + }{ + {"SkipAllQuestions", p.SkipAllQuestions}, + {"SkipAllAnswers", p.SkipAllAnswers}, + {"SkipAllAuthorities", p.SkipAllAuthorities}, + {"SkipAllAdditionals", p.SkipAllAdditionals}, + } + for _, test := range tests { + if err := test.f(); err != ErrNotStarted { + t.Errorf("Got %s() = %v, want = %v", test.name, err, ErrNotStarted) + } + } +} + +func TestTooManyRecords(t *testing.T) { + const recs = int(^uint16(0)) + 1 + tests := []struct { + name string + msg Message + want error + }{ + { + "Questions", + Message{ + Questions: make([]Question, recs), + }, + errTooManyQuestions, + }, + { + "Answers", + Message{ + Answers: make([]Resource, recs), + }, + errTooManyAnswers, + }, + { + "Authorities", + Message{ + Authorities: make([]Resource, recs), + }, + errTooManyAuthorities, + }, + { + "Additionals", + Message{ + Additionals: make([]Resource, recs), + }, + errTooManyAdditionals, + }, + } + + for _, test := range tests { + if _, got := test.msg.Pack(); got != test.want { + t.Errorf("Packing %d %s: got = %v, want = %v", recs, test.name, got, test.want) + } + } +} + +func TestVeryLongTxt(t *testing.T) { + want := Resource{ + ResourceHeader{ + Name: mustNewName("foo.bar.example.com."), + Type: TypeTXT, + Class: ClassINET, + }, + &TXTResource{loremIpsum}, + } + buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}) + if err != nil { + t.Fatal("Packing failed:", err) + } + var got Resource + off, err := got.Header.unpack(buf, 0) + if err != nil { + t.Fatal("Unpacking ResourceHeader failed:", err) + } + body, n, err := unpackResourceBody(buf, off, got.Header) + if err != nil { + t.Fatal("Unpacking failed:", err) + } + got.Body = body + if n != len(buf) { + t.Errorf("Unpacked different amount than packed: got n = %d, want = %d", n, len(buf)) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Got = %#v, want = %#v", got, want) + } +} + +func TestStartError(t *testing.T) { + tests := []struct { + name string + fn func(*Builder) error + }{ + {"Questions", func(b *Builder) error { return b.StartQuestions() }}, + {"Answers", func(b *Builder) error { return b.StartAnswers() }}, + {"Authorities", func(b *Builder) error { return b.StartAuthorities() }}, + {"Additionals", func(b *Builder) error { return b.StartAdditionals() }}, + } + + envs := []struct { + name string + fn func() *Builder + want error + }{ + {"sectionNotStarted", func() *Builder { return &Builder{section: sectionNotStarted} }, ErrNotStarted}, + {"sectionDone", func() *Builder { return &Builder{section: sectionDone} }, ErrSectionDone}, + } + + for _, env := range envs { + for _, test := range tests { + if got := test.fn(env.fn()); got != env.want { + t.Errorf("got Builder{%s}.Start%s = %v, want = %v", env.name, test.name, got, env.want) + } + } + } +} + +func TestBuilderResourceError(t *testing.T) { + tests := []struct { + name string + fn func(*Builder) error + }{ + {"CNAMEResource", func(b *Builder) error { return b.CNAMEResource(ResourceHeader{}, CNAMEResource{}) }}, + {"MXResource", func(b *Builder) error { return b.MXResource(ResourceHeader{}, MXResource{}) }}, + {"NSResource", func(b *Builder) error { return b.NSResource(ResourceHeader{}, NSResource{}) }}, + {"PTRResource", func(b *Builder) error { return b.PTRResource(ResourceHeader{}, PTRResource{}) }}, + {"SOAResource", func(b *Builder) error { return b.SOAResource(ResourceHeader{}, SOAResource{}) }}, + {"TXTResource", func(b *Builder) error { return b.TXTResource(ResourceHeader{}, TXTResource{}) }}, + {"SRVResource", func(b *Builder) error { return b.SRVResource(ResourceHeader{}, SRVResource{}) }}, + {"AResource", func(b *Builder) error { return b.AResource(ResourceHeader{}, AResource{}) }}, + {"AAAAResource", func(b *Builder) error { return b.AAAAResource(ResourceHeader{}, AAAAResource{}) }}, + } + + envs := []struct { + name string + fn func() *Builder + want error + }{ + {"sectionNotStarted", func() *Builder { return &Builder{section: sectionNotStarted} }, ErrNotStarted}, + {"sectionHeader", func() *Builder { return &Builder{section: sectionHeader} }, ErrNotStarted}, + {"sectionQuestions", func() *Builder { return &Builder{section: sectionQuestions} }, ErrNotStarted}, + {"sectionDone", func() *Builder { return &Builder{section: sectionDone} }, ErrSectionDone}, + } + + for _, env := range envs { + for _, test := range tests { + if got := test.fn(env.fn()); got != env.want { + t.Errorf("got Builder{%s}.%s = %v, want = %v", env.name, test.name, got, env.want) + } + } + } +} + +func TestFinishError(t *testing.T) { + var b Builder + want := ErrNotStarted + if _, got := b.Finish(); got != want { + t.Errorf("got Builder{}.Finish() = %v, want = %v", got, want) + } +} + +func TestBuilder(t *testing.T) { + msg := largeTestMsg() + want, err := msg.Pack() + if err != nil { + t.Fatal("Packing without builder:", err) + } + + var b Builder + b.Start(nil, msg.Header) + + if err := b.StartQuestions(); err != nil { + t.Fatal("b.StartQuestions():", err) + } + for _, q := range msg.Questions { + if err := b.Question(q); err != nil { + t.Fatalf("b.Question(%#v): %v", q, err) + } + } + + if err := b.StartAnswers(); err != nil { + t.Fatal("b.StartAnswers():", err) + } + for _, a := range msg.Answers { + switch a.Header.Type { + case TypeA: + if err := b.AResource(a.Header, *a.Body.(*AResource)); err != nil { + t.Fatalf("b.AResource(%#v): %v", a, err) + } + case TypeNS: + if err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil { + t.Fatalf("b.NSResource(%#v): %v", a, err) + } + case TypeCNAME: + if err := b.CNAMEResource(a.Header, *a.Body.(*CNAMEResource)); err != nil { + t.Fatalf("b.CNAMEResource(%#v): %v", a, err) + } + case TypeSOA: + if err := b.SOAResource(a.Header, *a.Body.(*SOAResource)); err != nil { + t.Fatalf("b.SOAResource(%#v): %v", a, err) + } + case TypePTR: + if err := b.PTRResource(a.Header, *a.Body.(*PTRResource)); err != nil { + t.Fatalf("b.PTRResource(%#v): %v", a, err) + } + case TypeMX: + if err := b.MXResource(a.Header, *a.Body.(*MXResource)); err != nil { + t.Fatalf("b.MXResource(%#v): %v", a, err) + } + case TypeTXT: + if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { + t.Fatalf("b.TXTResource(%#v): %v", a, err) + } + case TypeAAAA: + if err := b.AAAAResource(a.Header, *a.Body.(*AAAAResource)); err != nil { + t.Fatalf("b.AAAAResource(%#v): %v", a, err) + } + case TypeSRV: + if err := b.SRVResource(a.Header, *a.Body.(*SRVResource)); err != nil { + t.Fatalf("b.SRVResource(%#v): %v", a, err) + } + } + } + + if err := b.StartAuthorities(); err != nil { + t.Fatal("b.StartAuthorities():", err) + } + for _, a := range msg.Authorities { + if err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil { + t.Fatalf("b.NSResource(%#v): %v", a, err) + } + } + + if err := b.StartAdditionals(); err != nil { + t.Fatal("b.StartAdditionals():", err) + } + for _, a := range msg.Additionals { + if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { + t.Fatalf("b.TXTResource(%#v): %v", a, err) + } + } + + got, err := b.Finish() + if err != nil { + t.Fatal("b.Finish():", err) + } + if !bytes.Equal(got, want) { + t.Fatalf("Got from Builder: %#v\nwant = %#v", got, want) + } +} + +func TestResourcePack(t *testing.T) { + for _, tt := range []struct { + m Message + err error + }{ + { + Message{ + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{{ResourceHeader{}, nil}}, + }, + &nestedError{"packing Answer", errNilResouceBody}, + }, + { + Message{ + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Authorities: []Resource{{ResourceHeader{}, (*NSResource)(nil)}}, + }, + &nestedError{"packing Authority", + &nestedError{"ResourceHeader", + &nestedError{"Name", errNonCanonicalName}, + }, + }, + }, + { + Message{ + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeA, + Class: ClassINET, + }, + }, + Additionals: []Resource{{ResourceHeader{}, nil}}, + }, + &nestedError{"packing Additional", errNilResouceBody}, + }, + } { + _, err := tt.m.Pack() + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("got %v for %v; want %v", err, tt.m, tt.err) + } + } +} + +func BenchmarkParsing(b *testing.B) { + b.ReportAllocs() + + name := mustNewName("foo.bar.example.com.") + msg := Message{ + Header: Header{Response: true, Authoritative: true}, + Questions: []Question{ + { + Name: name, + Type: TypeA, + Class: ClassINET, + }, + }, + Answers: []Resource{ + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &AResource{[4]byte{}}, + }, + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &AAAAResource{[16]byte{}}, + }, + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &CNAMEResource{name}, + }, + { + ResourceHeader{ + Name: name, + Class: ClassINET, + }, + &NSResource{name}, + }, + }, + } + + buf, err := msg.Pack() + if err != nil { + b.Fatal("msg.Pack():", err) + } + + for i := 0; i < b.N; i++ { + var p Parser + if _, err := p.Start(buf); err != nil { + b.Fatal("p.Start(buf):", err) + } + + for { + _, err := p.Question() + if err == ErrSectionDone { + break + } + if err != nil { + b.Fatal("p.Question():", err) + } + } + + for { + h, err := p.AnswerHeader() + if err == ErrSectionDone { + break + } + if err != nil { + panic(err) + } + + switch h.Type { + case TypeA: + if _, err := p.AResource(); err != nil { + b.Fatal("p.AResource():", err) + } + case TypeAAAA: + if _, err := p.AAAAResource(); err != nil { + b.Fatal("p.AAAAResource():", err) + } + case TypeCNAME: + if _, err := p.CNAMEResource(); err != nil { + b.Fatal("p.CNAMEResource():", err) + } + case TypeNS: + if _, err := p.NSResource(); err != nil { + b.Fatal("p.NSResource():", err) + } + default: + b.Fatalf("unknown type: %T", h) + } + } + } +} + +func BenchmarkBuilding(b *testing.B) { + b.ReportAllocs() + + name := mustNewName("foo.bar.example.com.") + buf := make([]byte, 0, packStartingCap) + + for i := 0; i < b.N; i++ { + var bld Builder + bld.StartWithoutCompression(buf, Header{Response: true, Authoritative: true}) + + if err := bld.StartQuestions(); err != nil { + b.Fatal("bld.StartQuestions():", err) + } + q := Question{ + Name: name, + Type: TypeA, + Class: ClassINET, + } + if err := bld.Question(q); err != nil { + b.Fatalf("bld.Question(%+v): %v", q, err) + } + + hdr := ResourceHeader{ + Name: name, + Class: ClassINET, + } + if err := bld.StartAnswers(); err != nil { + b.Fatal("bld.StartQuestions():", err) + } + + ar := AResource{[4]byte{}} + if err := bld.AResource(hdr, ar); err != nil { + b.Fatalf("bld.AResource(%+v, %+v): %v", hdr, ar, err) + } + + aaar := AAAAResource{[16]byte{}} + if err := bld.AAAAResource(hdr, aaar); err != nil { + b.Fatalf("bld.AAAAResource(%+v, %+v): %v", hdr, aaar, err) + } + + cnr := CNAMEResource{name} + if err := bld.CNAMEResource(hdr, cnr); err != nil { + b.Fatalf("bld.CNAMEResource(%+v, %+v): %v", hdr, cnr, err) + } + + nsr := NSResource{name} + if err := bld.NSResource(hdr, nsr); err != nil { + b.Fatalf("bld.NSResource(%+v, %+v): %v", hdr, nsr, err) + } + + if _, err := bld.Finish(); err != nil { + b.Fatal("bld.Finish():", err) + } + } +} + +func largeTestMsg() Message { + name := mustNewName("foo.bar.example.com.") + return Message{ + Header: Header{Response: true, Authoritative: true}, + Questions: []Question{ + { + Name: name, + Type: TypeA, + Class: ClassINET, + }, + }, + Answers: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 2}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeAAAA, + Class: ClassINET, + }, + &AAAAResource{[16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeCNAME, + Class: ClassINET, + }, + &CNAMEResource{mustNewName("alias.example.com.")}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeSOA, + Class: ClassINET, + }, + &SOAResource{ + NS: mustNewName("ns1.example.com."), + MBox: mustNewName("mb.example.com."), + Serial: 1, + Refresh: 2, + Retry: 3, + Expire: 4, + MinTTL: 5, + }, + }, + { + ResourceHeader{ + Name: name, + Type: TypePTR, + Class: ClassINET, + }, + &PTRResource{mustNewName("ptr.example.com.")}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeMX, + Class: ClassINET, + }, + &MXResource{ + 7, + mustNewName("mx.example.com."), + }, + }, + { + ResourceHeader{ + Name: name, + Type: TypeSRV, + Class: ClassINET, + }, + &SRVResource{ + 8, + 9, + 11, + mustNewName("srv.example.com."), + }, + }, + }, + Authorities: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeNS, + Class: ClassINET, + }, + &NSResource{mustNewName("ns1.example.com.")}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeNS, + Class: ClassINET, + }, + &NSResource{mustNewName("ns2.example.com.")}, + }, + }, + Additionals: []Resource{ + { + ResourceHeader{ + Name: name, + Type: TypeTXT, + Class: ClassINET, + }, + &TXTResource{"So Long, and Thanks for All the Fish"}, + }, + { + ResourceHeader{ + Name: name, + Type: TypeTXT, + Class: ClassINET, + }, + &TXTResource{"Hamster Huey and the Gooey Kablooie"}, + }, + }, + } +} + +const loremIpsum = ` +Lorem ipsum dolor sit amet, nec enim antiopam id, an ullum choro +nonumes qui, pro eu debet honestatis mediocritatem. No alia enim eos, +magna signiferumque ex vis. Mei no aperiri dissentias, cu vel quas +regione. Malorum quaeque vim ut, eum cu semper aliquid invidunt, ei +nam ipsum assentior. + +Nostrum appellantur usu no, vis ex probatus adipiscing. Cu usu illum +facilis eleifend. Iusto conceptam complectitur vim id. Tale omnesque +no usu, ei oblique sadipscing vim. At nullam voluptua usu, mei laudem +reformidans et. Qui ei eros porro reformidans, ius suas veritus +torquatos ex. Mea te facer alterum consequat. + +Soleat torquatos democritum sed et, no mea congue appareat, facer +aliquam nec in. Has te ipsum tritani. At justo dicta option nec, movet +phaedrum ad nam. Ea detracto verterem liberavisse has, delectus +suscipiantur in mei. Ex nam meliore complectitur. Ut nam omnis +honestatis quaerendum, ea mea nihil affert detracto, ad vix rebum +mollis. + +Ut epicurei praesent neglegentur pri, prima fuisset intellegebat ad +vim. An habemus comprehensam usu, at enim dignissim pro. Eam reque +vivendum adipisci ea. Vel ne odio choro minimum. Sea admodum +dissentiet ex. Mundi tamquam evertitur ius cu. Homero postea iisque ut +pro, vel ne saepe senserit consetetur. + +Nulla utamur facilisis ius ea, in viderer diceret pertinax eum. Mei no +enim quodsi facilisi, ex sed aeterno appareat mediocritatem, eum +sententiae deterruisset ut. At suas timeam euismod cum, offendit +appareat interpretaris ne vix. Vel ea civibus albucius, ex vim quidam +accusata intellegebat, noluisse instructior sea id. Nec te nonumes +habemus appellantur, quis dignissim vituperata eu nam. + +At vix apeirian patrioque vituperatoribus, an usu agam assum. Debet +iisque an mea. Per eu dicant ponderum accommodare. Pri alienum +placerat senserit an, ne eum ferri abhorreant vituperatoribus. Ut mea +eligendi disputationi. Ius no tation everti impedit, ei magna quidam +mediocritatem pri. + +Legendos perpetua iracundia ne usu, no ius ullum epicurei intellegam, +ad modus epicuri lucilius eam. In unum quaerendum usu. Ne diam paulo +has, ea veri virtute sed. Alia honestatis conclusionemque mea eu, ut +iudico albucius his. + +Usu essent probatus eu, sed omnis dolor delicatissimi ex. No qui augue +dissentias dissentiet. Laudem recteque no usu, vel an velit noluisse, +an sed utinam eirmod appetere. Ne mea fuisset inimicus ocurreret. At +vis dicant abhorreant, utinam forensibus nec ne, mei te docendi +consequat. Brute inermis persecuti cum id. Ut ipsum munere propriae +usu, dicit graeco disputando id has. + +Eros dolore quaerendum nam ei. Timeam ornatus inciderint pro id. Nec +torquatos sadipscing ei, ancillae molestie per in. Malis principes duo +ea, usu liber postulant ei. + +Graece timeam voluptatibus eu eam. Alia probatus quo no, ea scripta +feugiat duo. Congue option meliore ex qui, noster invenire appellantur +ea vel. Eu exerci legendos vel. Consetetur repudiandae vim ut. Vix an +probo minimum, et nam illud falli tempor. + +Cum dico signiferumque eu. Sed ut regione maiorum, id veritus insolens +tacimates vix. Eu mel sint tamquam lucilius, duo no oporteat +tacimates. Atqui augue concludaturque vix ei, id mel utroque menandri. + +Ad oratio blandit aliquando pro. Vis et dolorum rationibus +philosophia, ad cum nulla molestie. Hinc fuisset adversarium eum et, +ne qui nisl verear saperet, vel te quaestio forensibus. Per odio +option delenit an. Alii placerat has no, in pri nihil platonem +cotidieque. Est ut elit copiosae scaevola, debet tollit maluisset sea +an. + +Te sea hinc debet pericula, liber ridens fabulas cu sed, quem mutat +accusam mea et. Elitr labitur albucius et pri, an labore feugait mel. +Velit zril melius usu ea. Ad stet putent interpretaris qui. Mel no +error volumus scripserit. In pro paulo iudico, quo ei dolorem +verterem, affert fabellas dissentiet ea vix. + +Vis quot deserunt te. Error aliquid detraxit eu usu, vis alia eruditi +salutatus cu. Est nostrud bonorum an, ei usu alii salutatus. Vel at +nisl primis, eum ex aperiri noluisse reformidans. Ad veri velit +utroque vis, ex equidem detraxit temporibus has. + +Inermis appareat usu ne. Eros placerat periculis mea ad, in dictas +pericula pro. Errem postulant at usu, ea nec amet ornatus mentitum. Ad +mazim graeco eum, vel ex percipit volutpat iudicabit, sit ne delicata +interesset. Mel sapientem prodesset abhorreant et, oblique suscipit +eam id. + +An maluisset disputando mea, vidit mnesarchum pri et. Malis insolens +inciderint no sea. Ea persius maluisset vix, ne vim appellantur +instructior, consul quidam definiebas pri id. Cum integre feugiat +pericula in, ex sed persius similique, mel ne natum dicit percipitur. + +Primis discere ne pri, errem putent definitionem at vis. Ei mel dolore +neglegentur, mei tincidunt percipitur ei. Pro ad simul integre +rationibus. Eu vel alii honestatis definitiones, mea no nonumy +reprehendunt. + +Dicta appareat legendos est cu. Eu vel congue dicunt omittam, no vix +adhuc minimum constituam, quot noluisse id mel. Eu quot sale mutat +duo, ex nisl munere invenire duo. Ne nec ullum utamur. Pro alterum +debitis nostrum no, ut vel aliquid vivendo. + +Aliquip fierent praesent quo ne, id sit audiam recusabo delicatissimi. +Usu postulant incorrupte cu. At pro dicit tibique intellegam, cibo +dolore impedit id eam, et aeque feugait assentior has. Quando sensibus +nec ex. Possit sensibus pri ad, unum mutat periculis cu vix. + +Mundi tibique vix te, duo simul partiendo qualisque id, est at vidit +sonet tempor. No per solet aeterno deseruisse. Petentium salutandi +definiebas pri cu. Munere vivendum est in. Ei justo congue eligendi +vis, modus offendit omittantur te mel. + +Integre voluptaria in qui, sit habemus tractatos constituam no. Utinam +melius conceptam est ne, quo in minimum apeirian delicata, ut ius +porro recusabo. Dicant expetenda vix no, ludus scripserit sed ex, eu +his modo nostro. Ut etiam sonet his, quodsi inciderint philosophia te +per. Nullam lobortis eu cum, vix an sonet efficiendi repudiandae. Vis +ad idque fabellas intellegebat. + +Eum commodo senserit conclusionemque ex. Sed forensibus sadipscing ut, +mei in facer delicata periculis, sea ne hinc putent cetero. Nec ne +alia corpora invenire, alia prima soleat te cum. Eleifend posidonium +nam at. + +Dolorum indoctum cu quo, ex dolor legendos recteque eam, cu pri zril +discere. Nec civibus officiis dissentiunt ex, est te liber ludus +elaboraret. Cum ea fabellas invenire. Ex vim nostrud eripuit +comprehensam, nam te inermis delectus, saepe inermis senserit. +` diff --git a/fn/vendor/golang.org/x/net/http2/ciphers.go b/fn/vendor/golang.org/x/net/http2/ciphers.go new file mode 100644 index 000000000..698860b77 --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/ciphers.go @@ -0,0 +1,641 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +// A list of the possible cipher suite ids. Taken from +// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt + +const ( + cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 + cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 + cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 + cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 + cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 + cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 + cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 + cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 + cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B + // Reserved uint16 = 0x001C-1D + cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F + cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 + cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 + cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B + cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C + cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D + cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E + cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 + cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A + cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 + // Reserved uint16 = 0x0047-4F + // Reserved uint16 = 0x0050-58 + // Reserved uint16 = 0x0059-5C + // Unassigned uint16 = 0x005D-5F + // Reserved uint16 = 0x0060-66 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D + // Unassigned uint16 = 0x006E-83 + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 + cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B + cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C + cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 + cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D + cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E + cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 + cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 + cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 + cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA + cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF + cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 + cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 + cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 + cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 + cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 + cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 + // Unassigned uint16 = 0x00C6-FE + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF + // Unassigned uint16 = 0x01-55,* + cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 + // Unassigned uint16 = 0x5601 - 0xC000 + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A + cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 + cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E + cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F + cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 + cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 + cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 + cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 + cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B + cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C + cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B + cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C + cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D + cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E + cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F + cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 + cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 + cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 + cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 + cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 + cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 + cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 + cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 + cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 + cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 + cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA + cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF + // Unassigned uint16 = 0xC0B0-FF + // Unassigned uint16 = 0xC1-CB,* + // Unassigned uint16 = 0xCC00-A7 + cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 + cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 + cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA + cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC + cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD + cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE +) + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +// References: +// https://tools.ietf.org/html/rfc7540#appendix-A +// Reject cipher suites from Appendix A. +// "This list includes those cipher suites that do not +// offer an ephemeral key exchange and those that are +// based on the TLS null, stream or block cipher type" +func isBadCipher(cipher uint16) bool { + switch cipher { + case cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8: + return true + default: + return false + } +} diff --git a/fn/vendor/golang.org/x/net/http2/ciphers_test.go b/fn/vendor/golang.org/x/net/http2/ciphers_test.go new file mode 100644 index 000000000..25aead013 --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/ciphers_test.go @@ -0,0 +1,309 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "testing" + +func TestIsBadCipherBad(t *testing.T) { + for _, c := range badCiphers { + if !isBadCipher(c) { + t.Errorf("Wrong result for isBadCipher(%d), want true") + } + } +} + +// verify we don't give false positives on ciphers not on blacklist +func TestIsBadCipherGood(t *testing.T) { + goodCiphers := map[uint16]string{ + cipher_TLS_DHE_RSA_WITH_AES_256_CCM: "cipher_TLS_DHE_RSA_WITH_AES_256_CCM", + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM: "cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM", + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256: "cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256", + } + for c, name := range goodCiphers { + if isBadCipher(c) { + t.Errorf("Wrong result for isBadCipher(%d) %s, want false", c, name) + } + } +} + +// copied from https://http2.github.io/http2-spec/#BadCipherSuites, +var badCiphers = []uint16{ + cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8, +} diff --git a/fn/vendor/golang.org/x/net/http2/client_conn_pool.go b/fn/vendor/golang.org/x/net/http2/client_conn_pool.go index 772ea5e92..bdf5652b0 100644 --- a/fn/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/fn/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -18,6 +18,18 @@ type ClientConnPool interface { MarkDead(*ClientConn) } +// clientConnPoolIdleCloser is the interface implemented by ClientConnPool +// implementations which can close their idle connections. +type clientConnPoolIdleCloser interface { + ClientConnPool + closeIdleConnections() +} + +var ( + _ clientConnPoolIdleCloser = (*clientConnPool)(nil) + _ clientConnPoolIdleCloser = noDialClientConnPool{} +) + // TODO: use singleflight for dialing and addConnCalls? type clientConnPool struct { t *Transport @@ -40,7 +52,16 @@ const ( noDialOnMiss = false ) -func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { +func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + if isConnectionCloseRequest(req) && dialOnMiss { + // It gets its own connection. + const singleUse = true + cc, err := p.t.dialClientConn(addr, singleUse) + if err != nil { + return nil, err + } + return cc, nil + } p.mu.Lock() for _, cc := range p.conns[addr] { if cc.CanTakeNewRequest() { @@ -83,7 +104,8 @@ func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { // run in its own goroutine. func (c *dialCall) dial(addr string) { - c.res, c.err = c.p.t.dialClientConn(addr) + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(addr, singleUse) close(c.done) c.p.mu.Lock() @@ -223,3 +245,12 @@ func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { } return out } + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} diff --git a/fn/vendor/golang.org/x/net/http2/configure_transport.go b/fn/vendor/golang.org/x/net/http2/configure_transport.go index daa17f5d4..b65fc6d42 100644 --- a/fn/vendor/golang.org/x/net/http2/configure_transport.go +++ b/fn/vendor/golang.org/x/net/http2/configure_transport.go @@ -32,7 +32,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr(authority) + addr := authorityAddr("https", authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -56,7 +56,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) { } // registerHTTPSProtocol calls Transport.RegisterProtocol but -// convering panics into errors. +// converting panics into errors. func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { defer func() { if e := recover(); e != nil { @@ -67,15 +67,6 @@ func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) return nil } -// noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS -// connection instead. -type noDialClientConnPool struct{ *clientConnPool } - -func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, noDialOnMiss) -} - // noDialH2RoundTripper is a RoundTripper which only tries to complete the request // if there's already has a cached connection to the host. type noDialH2RoundTripper struct{ t *Transport } diff --git a/fn/vendor/golang.org/x/net/http2/databuffer.go b/fn/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 000000000..a3067f8de --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/fn/vendor/golang.org/x/net/http2/databuffer_test.go b/fn/vendor/golang.org/x/net/http2/databuffer_test.go new file mode 100644 index 000000000..028e12e52 --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/databuffer_test.go @@ -0,0 +1,157 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package http2 + +import ( + "bytes" + "fmt" + "reflect" + "testing" +) + +func fmtDataChunk(chunk []byte) string { + out := "" + var last byte + var count int + for _, c := range chunk { + if c != last { + if count > 0 { + out += fmt.Sprintf(" x %d ", count) + count = 0 + } + out += string([]byte{c}) + last = c + } + count++ + } + if count > 0 { + out += fmt.Sprintf(" x %d", count) + } + return out +} + +func fmtDataChunks(chunks [][]byte) string { + var out string + for _, chunk := range chunks { + out += fmt.Sprintf("{%q}", fmtDataChunk(chunk)) + } + return out +} + +func testDataBuffer(t *testing.T, wantBytes []byte, setup func(t *testing.T) *dataBuffer) { + // Run setup, then read the remaining bytes from the dataBuffer and check + // that they match wantBytes. We use different read sizes to check corner + // cases in Read. + for _, readSize := range []int{1, 2, 1 * 1024, 32 * 1024} { + t.Run(fmt.Sprintf("ReadSize=%d", readSize), func(t *testing.T) { + b := setup(t) + buf := make([]byte, readSize) + var gotRead bytes.Buffer + for { + n, err := b.Read(buf) + gotRead.Write(buf[:n]) + if err == errReadEmpty { + break + } + if err != nil { + t.Fatalf("error after %v bytes: %v", gotRead.Len(), err) + } + } + if got, want := gotRead.Bytes(), wantBytes; !bytes.Equal(got, want) { + t.Errorf("FinalRead=%q, want %q", fmtDataChunk(got), fmtDataChunk(want)) + } + }) + } +} + +func TestDataBufferAllocation(t *testing.T) { + writes := [][]byte{ + bytes.Repeat([]byte("a"), 1*1024-1), + []byte("a"), + bytes.Repeat([]byte("b"), 4*1024-1), + []byte("b"), + bytes.Repeat([]byte("c"), 8*1024-1), + []byte("c"), + bytes.Repeat([]byte("d"), 16*1024-1), + []byte("d"), + bytes.Repeat([]byte("e"), 32*1024), + } + var wantRead bytes.Buffer + for _, p := range writes { + wantRead.Write(p) + } + + testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer { + b := &dataBuffer{} + for _, p := range writes { + if n, err := b.Write(p); n != len(p) || err != nil { + t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p)) + } + } + want := [][]byte{ + bytes.Repeat([]byte("a"), 1*1024), + bytes.Repeat([]byte("b"), 4*1024), + bytes.Repeat([]byte("c"), 8*1024), + bytes.Repeat([]byte("d"), 16*1024), + bytes.Repeat([]byte("e"), 16*1024), + bytes.Repeat([]byte("e"), 16*1024), + } + if !reflect.DeepEqual(b.chunks, want) { + t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want)) + } + return b + }) +} + +func TestDataBufferAllocationWithExpected(t *testing.T) { + writes := [][]byte{ + bytes.Repeat([]byte("a"), 1*1024), // allocates 16KB + bytes.Repeat([]byte("b"), 14*1024), + bytes.Repeat([]byte("c"), 15*1024), // allocates 16KB more + bytes.Repeat([]byte("d"), 2*1024), + bytes.Repeat([]byte("e"), 1*1024), // overflows 32KB expectation, allocates just 1KB + } + var wantRead bytes.Buffer + for _, p := range writes { + wantRead.Write(p) + } + + testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer { + b := &dataBuffer{expected: 32 * 1024} + for _, p := range writes { + if n, err := b.Write(p); n != len(p) || err != nil { + t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p)) + } + } + want := [][]byte{ + append(bytes.Repeat([]byte("a"), 1*1024), append(bytes.Repeat([]byte("b"), 14*1024), bytes.Repeat([]byte("c"), 1*1024)...)...), + append(bytes.Repeat([]byte("c"), 14*1024), bytes.Repeat([]byte("d"), 2*1024)...), + bytes.Repeat([]byte("e"), 1*1024), + } + if !reflect.DeepEqual(b.chunks, want) { + t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want)) + } + return b + }) +} + +func TestDataBufferWriteAfterPartialRead(t *testing.T) { + testDataBuffer(t, []byte("cdxyz"), func(t *testing.T) *dataBuffer { + b := &dataBuffer{} + if n, err := b.Write([]byte("abcd")); n != 4 || err != nil { + t.Fatalf("Write(\"abcd\")=%v,%v want 4,nil", n, err) + } + p := make([]byte, 2) + if n, err := b.Read(p); n != 2 || err != nil || !bytes.Equal(p, []byte("ab")) { + t.Fatalf("Read()=%q,%v,%v want \"ab\",2,nil", p, n, err) + } + if n, err := b.Write([]byte("xyz")); n != 3 || err != nil { + t.Fatalf("Write(\"xyz\")=%v,%v want 3,nil", n, err) + } + return b + }) +} diff --git a/fn/vendor/golang.org/x/net/http2/errors.go b/fn/vendor/golang.org/x/net/http2/errors.go index f320b2c09..71f2c4631 100644 --- a/fn/vendor/golang.org/x/net/http2/errors.go +++ b/fn/vendor/golang.org/x/net/http2/errors.go @@ -4,7 +4,10 @@ package http2 -import "fmt" +import ( + "errors" + "fmt" +) // An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. type ErrCode uint32 @@ -61,9 +64,17 @@ func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: type StreamError struct { StreamID uint32 Code ErrCode + Cause error // optional additional detail +} + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} } func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) } @@ -76,15 +87,47 @@ type goAwayFlowError struct{} func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } -// connErrorReason wraps a ConnectionError with an informative error about why it occurs. - +// connError represents an HTTP/2 ConnectionError error code, along +// with a string (for debugging) explaining why. +// // Errors of this type are only returned by the frame parser functions -// and converted into ConnectionError(ErrCodeProtocol). +// and converted into ConnectionError(Code), after stashing away +// the Reason into the Framer's errDetail field, accessible via +// the (*Framer).ErrorDetail method. type connError struct { - Code ErrCode - Reason string + Code ErrCode // the ConnectionError error code + Reason string // additional reason } func (e connError) Error() string { return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) } + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/fn/vendor/golang.org/x/net/http2/fixed_buffer.go b/fn/vendor/golang.org/x/net/http2/fixed_buffer.go deleted file mode 100644 index 47da0f0bf..000000000 --- a/fn/vendor/golang.org/x/net/http2/fixed_buffer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" -) - -// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. -// It never allocates, but moves old data as new data is written. -type fixedBuffer struct { - buf []byte - r, w int -} - -var ( - errReadEmpty = errors.New("read from empty fixedBuffer") - errWriteFull = errors.New("write on full fixedBuffer") -) - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *fixedBuffer) Read(p []byte) (n int, err error) { - if b.r == b.w { - return 0, errReadEmpty - } - n = copy(p, b.buf[b.r:b.w]) - b.r += n - if b.r == b.w { - b.r = 0 - b.w = 0 - } - return n, nil -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *fixedBuffer) Len() int { - return b.w - b.r -} - -// Write copies bytes from p into the buffer. -// It is an error to write more data than the buffer can hold. -func (b *fixedBuffer) Write(p []byte) (n int, err error) { - // Slide existing data to beginning. - if b.r > 0 && len(p) > len(b.buf)-b.w { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Write new data. - n = copy(b.buf[b.w:], p) - b.w += n - if n < len(p) { - err = errWriteFull - } - return n, err -} diff --git a/fn/vendor/golang.org/x/net/http2/fixed_buffer_test.go b/fn/vendor/golang.org/x/net/http2/fixed_buffer_test.go deleted file mode 100644 index f5432f8d8..000000000 --- a/fn/vendor/golang.org/x/net/http2/fixed_buffer_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "reflect" - "testing" -) - -var bufferReadTests = []struct { - buf fixedBuffer - read, wn int - werr error - wp []byte - wbuf fixedBuffer -}{ - { - fixedBuffer{[]byte{'a', 0}, 0, 1}, - 5, 1, nil, []byte{'a'}, - fixedBuffer{[]byte{'a', 0}, 0, 0}, - }, - { - fixedBuffer{[]byte{0, 'a'}, 1, 2}, - 5, 1, nil, []byte{'a'}, - fixedBuffer{[]byte{0, 'a'}, 0, 0}, - }, - { - fixedBuffer{[]byte{'a', 'b'}, 0, 2}, - 1, 1, nil, []byte{'a'}, - fixedBuffer{[]byte{'a', 'b'}, 1, 2}, - }, - { - fixedBuffer{[]byte{}, 0, 0}, - 5, 0, errReadEmpty, []byte{}, - fixedBuffer{[]byte{}, 0, 0}, - }, -} - -func TestBufferRead(t *testing.T) { - for i, tt := range bufferReadTests { - read := make([]byte, tt.read) - n, err := tt.buf.Read(read) - if n != tt.wn { - t.Errorf("#%d: wn = %d want %d", i, n, tt.wn) - continue - } - if err != tt.werr { - t.Errorf("#%d: werr = %v want %v", i, err, tt.werr) - continue - } - read = read[:n] - if !reflect.DeepEqual(read, tt.wp) { - t.Errorf("#%d: read = %+v want %+v", i, read, tt.wp) - } - if !reflect.DeepEqual(tt.buf, tt.wbuf) { - t.Errorf("#%d: buf = %+v want %+v", i, tt.buf, tt.wbuf) - } - } -} - -var bufferWriteTests = []struct { - buf fixedBuffer - write, wn int - werr error - wbuf fixedBuffer -}{ - { - buf: fixedBuffer{ - buf: []byte{}, - }, - wbuf: fixedBuffer{ - buf: []byte{}, - }, - }, - { - buf: fixedBuffer{ - buf: []byte{1, 'a'}, - }, - write: 1, - wn: 1, - wbuf: fixedBuffer{ - buf: []byte{0, 'a'}, - w: 1, - }, - }, - { - buf: fixedBuffer{ - buf: []byte{'a', 1}, - r: 1, - w: 1, - }, - write: 2, - wn: 2, - wbuf: fixedBuffer{ - buf: []byte{0, 0}, - w: 2, - }, - }, - { - buf: fixedBuffer{ - buf: []byte{}, - }, - write: 5, - werr: errWriteFull, - wbuf: fixedBuffer{ - buf: []byte{}, - }, - }, -} - -func TestBufferWrite(t *testing.T) { - for i, tt := range bufferWriteTests { - n, err := tt.buf.Write(make([]byte, tt.write)) - if n != tt.wn { - t.Errorf("#%d: wrote %d bytes; want %d", i, n, tt.wn) - continue - } - if err != tt.werr { - t.Errorf("#%d: error = %v; want %v", i, err, tt.werr) - continue - } - if !reflect.DeepEqual(tt.buf, tt.wbuf) { - t.Errorf("#%d: buf = %+v; want %+v", i, tt.buf, tt.wbuf) - } - } -} diff --git a/fn/vendor/golang.org/x/net/http2/frame.go b/fn/vendor/golang.org/x/net/http2/frame.go index e1e837cc8..3b1489072 100644 --- a/fn/vendor/golang.org/x/net/http2/frame.go +++ b/fn/vendor/golang.org/x/net/http2/frame.go @@ -11,7 +11,11 @@ import ( "fmt" "io" "log" + "strings" "sync" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" ) const frameHeaderLen = 9 @@ -118,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{ // a frameParser parses a frame given its FrameHeader and payload // bytes. The length of payload will always equal fh.Length (which // might be 0). -type frameParser func(fh FrameHeader, payload []byte) (Frame, error) +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) var frameParsers = map[FrameType]frameParser{ FrameData: parseDataFrame, @@ -261,7 +265,7 @@ type Frame interface { type Framer struct { r io.Reader lastFrame Frame - errReason string + errDetail error // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. @@ -293,18 +297,41 @@ type Framer struct { // to return non-compliant frames or frame orders. // This is for testing and permits using the Framer to test // other HTTP/2 implementations' conformance to the spec. + // It is not compatible with ReadMetaHeaders. AllowIllegalReads bool + // ReadMetaHeaders if non-nil causes ReadFrame to merge + // HEADERS and CONTINUATION frames together and return + // MetaHeadersFrame instead. + ReadMetaHeaders *hpack.Decoder + + // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE. + // It's used only if ReadMetaHeaders is set; 0 means a sane default + // (currently 16MB) + // If the limit is hit, MetaHeadersFrame.Truncated is set true. + MaxHeaderListSize uint32 + // TODO: track which type of frame & with which flags was sent - // last. Then return an error (unless AllowIllegalWrites) if + // last. Then return an error (unless AllowIllegalWrites) if // we're in the middle of a header block and a // non-Continuation or Continuation on a different stream is // attempted to be written. - logReads bool + logReads, logWrites bool - debugFramer *Framer // only use for logging written writes - debugFramerBuf *bytes.Buffer + debugFramer *Framer // only use for logging written writes + debugFramerBuf *bytes.Buffer + debugReadLoggerf func(string, ...interface{}) + debugWriteLoggerf func(string, ...interface{}) + + frameCache *frameCache // nil if frames aren't reused (default) +} + +func (fr *Framer) maxHeaderListSize() uint32 { + if fr.MaxHeaderListSize == 0 { + return 16 << 20 // sane default, per docs + } + return fr.MaxHeaderListSize } func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { @@ -332,7 +359,7 @@ func (f *Framer) endWrite() error { byte(length>>16), byte(length>>8), byte(length)) - if logFrameWrites { + if f.logWrites { f.logWrite() } @@ -355,10 +382,10 @@ func (f *Framer) logWrite() { f.debugFramerBuf.Write(f.wbuf) fr, err := f.debugFramer.ReadFrame() if err != nil { - log.Printf("http2: Framer %p: failed to decode just-written frame", f) + f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) return } - log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) + f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) } func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } @@ -373,12 +400,36 @@ const ( maxFrameSize = 1<<24 - 1 ) +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + // NewFramer returns a Framer that writes frames to w and reads them from r. func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ - w: w, - r: r, - logReads: logFrameReads, + w: w, + r: r, + logReads: logFrameReads, + logWrites: logFrameWrites, + debugReadLoggerf: log.Printf, + debugWriteLoggerf: log.Printf, } fr.getReadBuf = func(size uint32) []byte { if cap(fr.readBuf) >= int(size) { @@ -402,6 +453,17 @@ func (fr *Framer) SetMaxReadFrameSize(v uint32) { fr.maxReadSize = v } +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + // ErrFrameTooLarge is returned from Framer.ReadFrame when the peer // sends a frame that is larger than declared with SetMaxReadFrameSize. var ErrFrameTooLarge = errors.New("http2: frame too large") @@ -420,9 +482,10 @@ func terminalReadFrameError(err error) bool { // // If the frame is larger than previously set with SetMaxReadFrameSize, the // returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from from the underlying +// ConnectionError, StreamError, or anything else from the underlying // reader. func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil if fr.lastFrame != nil { fr.lastFrame.invalidate() } @@ -437,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { if _, err := io.ReadFull(fr.r, payload); err != nil { return nil, err } - f, err := typeFrameParser(fh.Type)(fh, payload) + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) if err != nil { if ce, ok := err.(connError); ok { return nil, fr.connError(ce.Code, ce.Reason) @@ -448,7 +511,10 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fr.logReads { - log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) } return f, nil } @@ -458,7 +524,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { // to the peer before hanging up on them. This might help others debug // their implementations. func (fr *Framer) connError(code ErrCode, reason string) error { - fr.errReason = reason + fr.errDetail = errors.New(reason) return ConnectionError(code) } @@ -522,7 +588,7 @@ func (f *DataFrame) Data() []byte { return f.data } -func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { // DATA frames MUST be associated with a stream. If a // DATA frame is received whose stream identifier @@ -531,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { // PROTOCOL_ERROR. return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} } - f := &DataFrame{ - FrameHeader: fh, - } + f := fc.getDataFrame() + f.FrameHeader = fh + var padSize byte if fh.Flags.Has(FlagDataPadded) { var err error @@ -553,7 +619,16 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { return f, nil } -var errStreamID = errors.New("invalid streamid") +var ( + errStreamID = errors.New("invalid stream ID") + errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") + errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") +) + +func validStreamIDOrZero(streamID uint32) bool { + return streamID&(1<<31) == 0 +} func validStreamID(streamID uint32) bool { return streamID != 0 && streamID&(1<<31) == 0 @@ -562,18 +637,51 @@ func validStreamID(streamID uint32) bool { // WriteData writes a DATA frame. // // It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { - // TODO: ignoring padding for now. will add when somebody cares. + return f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteData writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } + if len(pad) > 0 { + if len(pad) > 255 { + return errPadLength + } + if !f.AllowIllegalWrites { + for _, b := range pad { + if b != 0 { + // "Padding octets MUST be set to zero when sending." + return errPadBytes + } + } + } + } var flags Flags if endStream { flags |= FlagDataEndStream } + if pad != nil { + flags |= FlagDataPadded + } f.startWrite(FrameData, flags, streamID) + if pad != nil { + f.wbuf = append(f.wbuf, byte(len(pad))) + } f.wbuf = append(f.wbuf, data...) + f.wbuf = append(f.wbuf, pad...) return f.endWrite() } @@ -587,10 +695,10 @@ type SettingsFrame struct { p []byte } -func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { // When this (ACK 0x1) bit is set, the payload of the - // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame MUST be empty. Receipt of a // SETTINGS frame with the ACK flag set and a length // field value other than 0 MUST be treated as a // connection error (Section 5.4.1) of type @@ -599,7 +707,7 @@ func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { } if fh.StreamID != 0 { // SETTINGS frames always apply to a connection, - // never a single stream. The stream identifier for a + // never a single stream. The stream identifier for a // SETTINGS frame MUST be zero (0x0). If an endpoint // receives a SETTINGS frame whose stream identifier // field is anything other than 0x0, the endpoint MUST @@ -669,7 +777,7 @@ func (f *Framer) WriteSettings(settings ...Setting) error { return f.endWrite() } -// WriteSettings writes an empty SETTINGS frame with the ACK bit set. +// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. @@ -689,7 +797,7 @@ type PingFrame struct { func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } -func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if len(payload) != 8 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -729,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte { return f.debugData } -func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID != 0 { return nil, ConnectionError(ErrCodeProtocol) } @@ -769,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte { return f.p } -func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { return &UnknownFrame{fh, p}, nil } @@ -780,7 +888,7 @@ type WindowUpdateFrame struct { Increment uint32 // never read with high bit set } -func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -795,7 +903,7 @@ func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID == 0 { return nil, ConnectionError(ErrCodeProtocol) } - return nil, StreamError{fh.StreamID, ErrCodeProtocol} + return nil, streamError(fh.StreamID, ErrCodeProtocol) } return &WindowUpdateFrame{ FrameHeader: fh, @@ -845,12 +953,12 @@ func (f *HeadersFrame) HasPriority() bool { return f.FrameHeader.Flags.Has(FlagHeadersPriority) } -func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { hf := &HeadersFrame{ FrameHeader: fh, } if fh.StreamID == 0 { - // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // HEADERS frames MUST be associated with a stream. If a HEADERS frame // is received whose stream identifier field is 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR. @@ -876,7 +984,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { } } if len(p)-int(padLength) <= 0 { - return nil, StreamError{fh.StreamID, ErrCodeProtocol} + return nil, streamError(fh.StreamID, ErrCodeProtocol) } hf.headerFragBuf = p[:len(p)-int(padLength)] return hf, nil @@ -940,8 +1048,8 @@ func (f *Framer) WriteHeaders(p HeadersFrameParam) error { } if !p.Priority.IsZero() { v := p.Priority.StreamDep - if !validStreamID(v) && !f.AllowIllegalWrites { - return errors.New("invalid dependent stream id") + if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { + return errDepStreamID } if p.Priority.Exclusive { v |= 1 << 31 @@ -972,7 +1080,7 @@ type PriorityParam struct { Exclusive bool // Weight is the stream's zero-indexed weight. It should be - // set together with StreamDep, or neither should be set. Per + // set together with StreamDep, or neither should be set. Per // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 @@ -982,7 +1090,7 @@ func (p PriorityParam) IsZero() bool { return p == PriorityParam{} } -func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } @@ -1009,6 +1117,9 @@ func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } + if !validStreamIDOrZero(p.StreamDep) { + return errDepStreamID + } f.startWrite(FramePriority, 0, streamID) v := p.StreamDep if p.Exclusive { @@ -1026,7 +1137,7 @@ type RSTStreamFrame struct { ErrCode ErrCode } -func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -1056,7 +1167,7 @@ type ContinuationFrame struct { headerFragBuf []byte } -func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} } @@ -1106,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) } -func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { pp := &PushPromiseFrame{ FrameHeader: fh, } @@ -1225,6 +1336,205 @@ type headersEnder interface { HeadersEnded() bool } +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if VerboseLogs && fr.logReads { + fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) + } + if !httplex.ValidHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validWireHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} + } + return mh, nil +} + func summarizeFrame(f Frame) string { var buf bytes.Buffer f.Header().writeDebug(&buf) diff --git a/fn/vendor/golang.org/x/net/http2/frame_test.go b/fn/vendor/golang.org/x/net/http2/frame_test.go index 281a2d14a..37266bc58 100644 --- a/fn/vendor/golang.org/x/net/http2/frame_test.go +++ b/fn/vendor/golang.org/x/net/http2/frame_test.go @@ -12,6 +12,8 @@ import ( "strings" "testing" "unsafe" + + "golang.org/x/net/http2/hpack" ) func testFramer() (*Framer, *bytes.Buffer) { @@ -98,6 +100,77 @@ func TestWriteData(t *testing.T) { } } +func TestWriteDataPadded(t *testing.T) { + tests := [...]struct { + streamID uint32 + endStream bool + data []byte + pad []byte + wantHeader FrameHeader + }{ + // Unpadded: + 0: { + streamID: 1, + endStream: true, + data: []byte("foo"), + pad: nil, + wantHeader: FrameHeader{ + Type: FrameData, + Flags: FlagDataEndStream, + Length: 3, + StreamID: 1, + }, + }, + + // Padded bit set, but no padding: + 1: { + streamID: 1, + endStream: true, + data: []byte("foo"), + pad: []byte{}, + wantHeader: FrameHeader{ + Type: FrameData, + Flags: FlagDataEndStream | FlagDataPadded, + Length: 4, + StreamID: 1, + }, + }, + + // Padded bit set, with padding: + 2: { + streamID: 1, + endStream: false, + data: []byte("foo"), + pad: []byte{0, 0, 0}, + wantHeader: FrameHeader{ + Type: FrameData, + Flags: FlagDataPadded, + Length: 7, + StreamID: 1, + }, + }, + } + for i, tt := range tests { + fr, _ := testFramer() + fr.WriteDataPadded(tt.streamID, tt.endStream, tt.data, tt.pad) + f, err := fr.ReadFrame() + if err != nil { + t.Errorf("%d. ReadFrame: %v", i, err) + continue + } + got := f.Header() + tt.wantHeader.valid = true + if got != tt.wantHeader { + t.Errorf("%d. read %+v; want %+v", i, got, tt.wantHeader) + continue + } + df := f.(*DataFrame) + if !bytes.Equal(df.Data(), tt.data) { + t.Errorf("%d. got %q; want %q", i, df.Data(), tt.data) + } + } +} + func TestWriteHeaders(t *testing.T) { tests := []struct { name string @@ -200,6 +273,37 @@ func TestWriteHeaders(t *testing.T) { headerFragBuf: []byte("abc"), }, }, + { + "with priority stream dep zero", // golang.org/issue/15444 + HeadersFrameParam{ + StreamID: 42, + BlockFragment: []byte("abc"), + EndStream: true, + EndHeaders: true, + PadLength: 2, + Priority: PriorityParam{ + StreamDep: 0, + Exclusive: true, + Weight: 127, + }, + }, + "\x00\x00\v\x01-\x00\x00\x00*\x02\x80\x00\x00\x00\u007fabc\x00\x00", + &HeadersFrame{ + FrameHeader: FrameHeader{ + valid: true, + StreamID: 42, + Type: FrameHeaders, + Flags: FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority, + Length: uint32(1 + 5 + len("abc") + 2), // pad length + priority + contents + padding + }, + Priority: PriorityParam{ + StreamDep: 0, + Exclusive: true, + Weight: 127, + }, + headerFragBuf: []byte("abc"), + }, + }, } for _, tt := range tests { fr, buf := testFramer() @@ -221,6 +325,24 @@ func TestWriteHeaders(t *testing.T) { } } +func TestWriteInvalidStreamDep(t *testing.T) { + fr, _ := testFramer() + err := fr.WriteHeaders(HeadersFrameParam{ + StreamID: 42, + Priority: PriorityParam{ + StreamDep: 1 << 31, + }, + }) + if err != errDepStreamID { + t.Errorf("header error = %v; want %q", err, errDepStreamID) + } + + err = fr.WritePriority(2, PriorityParam{StreamDep: 1 << 31}) + if err != errDepStreamID { + t.Errorf("priority error = %v; want %q", err, errDepStreamID) + } +} + func TestWriteContinuation(t *testing.T) { const streamID = 42 tests := []struct { @@ -725,11 +847,345 @@ func TestReadFrameOrder(t *testing.T) { t.Errorf("%d. after %d good frames, ReadFrame = %v; want ConnectionError(ErrCodeProtocol)\n%s", i, n, err, log.Bytes()) continue } - if f.errReason != tt.wantErr { - t.Errorf("%d. framer eror = %q; want %q\n%s", i, f.errReason, tt.wantErr, log.Bytes()) + if !((f.errDetail == nil && tt.wantErr == "") || (fmt.Sprint(f.errDetail) == tt.wantErr)) { + t.Errorf("%d. framer eror = %q; want %q\n%s", i, f.errDetail, tt.wantErr, log.Bytes()) } if n < tt.atLeast { t.Errorf("%d. framer only read %d frames; want at least %d\n%s", i, n, tt.atLeast, log.Bytes()) } } } + +func TestMetaFrameHeader(t *testing.T) { + write := func(f *Framer, frags ...[]byte) { + for i, frag := range frags { + end := (i == len(frags)-1) + if i == 0 { + f.WriteHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: frag, + EndHeaders: end, + }) + } else { + f.WriteContinuation(1, end, frag) + } + } + } + + want := func(flags Flags, length uint32, pairs ...string) *MetaHeadersFrame { + mh := &MetaHeadersFrame{ + HeadersFrame: &HeadersFrame{ + FrameHeader: FrameHeader{ + Type: FrameHeaders, + Flags: flags, + Length: length, + StreamID: 1, + }, + }, + Fields: []hpack.HeaderField(nil), + } + for len(pairs) > 0 { + mh.Fields = append(mh.Fields, hpack.HeaderField{ + Name: pairs[0], + Value: pairs[1], + }) + pairs = pairs[2:] + } + return mh + } + truncated := func(mh *MetaHeadersFrame) *MetaHeadersFrame { + mh.Truncated = true + return mh + } + + const noFlags Flags = 0 + + oneKBString := strings.Repeat("a", 1<<10) + + tests := [...]struct { + name string + w func(*Framer) + want interface{} // *MetaHeaderFrame or error + wantErrReason string + maxHeaderListSize uint32 + }{ + 0: { + name: "single_headers", + w: func(f *Framer) { + var he hpackEncoder + all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/") + write(f, all) + }, + want: want(FlagHeadersEndHeaders, 2, ":method", "GET", ":path", "/"), + }, + 1: { + name: "with_continuation", + w: func(f *Framer) { + var he hpackEncoder + all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", "bar") + write(f, all[:1], all[1:]) + }, + want: want(noFlags, 1, ":method", "GET", ":path", "/", "foo", "bar"), + }, + 2: { + name: "with_two_continuation", + w: func(f *Framer) { + var he hpackEncoder + all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", "bar") + write(f, all[:2], all[2:4], all[4:]) + }, + want: want(noFlags, 2, ":method", "GET", ":path", "/", "foo", "bar"), + }, + 3: { + name: "big_string_okay", + w: func(f *Framer) { + var he hpackEncoder + all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", oneKBString) + write(f, all[:2], all[2:]) + }, + want: want(noFlags, 2, ":method", "GET", ":path", "/", "foo", oneKBString), + }, + 4: { + name: "big_string_error", + w: func(f *Framer) { + var he hpackEncoder + all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", oneKBString) + write(f, all[:2], all[2:]) + }, + maxHeaderListSize: (1 << 10) / 2, + want: ConnectionError(ErrCodeCompression), + }, + 5: { + name: "max_header_list_truncated", + w: func(f *Framer) { + var he hpackEncoder + var pairs = []string{":method", "GET", ":path", "/"} + for i := 0; i < 100; i++ { + pairs = append(pairs, "foo", "bar") + } + all := he.encodeHeaderRaw(t, pairs...) + write(f, all[:2], all[2:]) + }, + maxHeaderListSize: (1 << 10) / 2, + want: truncated(want(noFlags, 2, + ":method", "GET", + ":path", "/", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", // 11 + )), + }, + 6: { + name: "pseudo_order", + w: func(f *Framer) { + write(f, encodeHeaderRaw(t, + ":method", "GET", + "foo", "bar", + ":path", "/", // bogus + )) + }, + want: streamError(1, ErrCodeProtocol), + wantErrReason: "pseudo header field after regular", + }, + 7: { + name: "pseudo_unknown", + w: func(f *Framer) { + write(f, encodeHeaderRaw(t, + ":unknown", "foo", // bogus + "foo", "bar", + )) + }, + want: streamError(1, ErrCodeProtocol), + wantErrReason: "invalid pseudo-header \":unknown\"", + }, + 8: { + name: "pseudo_mix_request_response", + w: func(f *Framer) { + write(f, encodeHeaderRaw(t, + ":method", "GET", + ":status", "100", + )) + }, + want: streamError(1, ErrCodeProtocol), + wantErrReason: "mix of request and response pseudo headers", + }, + 9: { + name: "pseudo_dup", + w: func(f *Framer) { + write(f, encodeHeaderRaw(t, + ":method", "GET", + ":method", "POST", + )) + }, + want: streamError(1, ErrCodeProtocol), + wantErrReason: "duplicate pseudo-header \":method\"", + }, + 10: { + name: "trailer_okay_no_pseudo", + w: func(f *Framer) { write(f, encodeHeaderRaw(t, "foo", "bar")) }, + want: want(FlagHeadersEndHeaders, 8, "foo", "bar"), + }, + 11: { + name: "invalid_field_name", + w: func(f *Framer) { write(f, encodeHeaderRaw(t, "CapitalBad", "x")) }, + want: streamError(1, ErrCodeProtocol), + wantErrReason: "invalid header field name \"CapitalBad\"", + }, + 12: { + name: "invalid_field_value", + w: func(f *Framer) { write(f, encodeHeaderRaw(t, "key", "bad_null\x00")) }, + want: streamError(1, ErrCodeProtocol), + wantErrReason: "invalid header field value \"bad_null\\x00\"", + }, + } + for i, tt := range tests { + buf := new(bytes.Buffer) + f := NewFramer(buf, buf) + f.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + f.MaxHeaderListSize = tt.maxHeaderListSize + tt.w(f) + + name := tt.name + if name == "" { + name = fmt.Sprintf("test index %d", i) + } + + var got interface{} + var err error + got, err = f.ReadFrame() + if err != nil { + got = err + + // Ignore the StreamError.Cause field, if it matches the wantErrReason. + // The test table above predates the Cause field. + if se, ok := err.(StreamError); ok && se.Cause != nil && se.Cause.Error() == tt.wantErrReason { + se.Cause = nil + got = se + } + } + if !reflect.DeepEqual(got, tt.want) { + if mhg, ok := got.(*MetaHeadersFrame); ok { + if mhw, ok := tt.want.(*MetaHeadersFrame); ok { + hg := mhg.HeadersFrame + hw := mhw.HeadersFrame + if hg != nil && hw != nil && !reflect.DeepEqual(*hg, *hw) { + t.Errorf("%s: headers differ:\n got: %+v\nwant: %+v\n", name, *hg, *hw) + } + } + } + str := func(v interface{}) string { + if _, ok := v.(error); ok { + return fmt.Sprintf("error %v", v) + } else { + return fmt.Sprintf("value %#v", v) + } + } + t.Errorf("%s:\n got: %v\nwant: %s", name, str(got), str(tt.want)) + } + if tt.wantErrReason != "" && tt.wantErrReason != fmt.Sprint(f.errDetail) { + t.Errorf("%s: got error reason %q; want %q", name, f.errDetail, tt.wantErrReason) + } + } +} + +func TestSetReuseFrames(t *testing.T) { + fr, buf := testFramer() + fr.SetReuseFrames() + + // Check that DataFrames are reused. Note that + // SetReuseFrames only currently implements reuse of DataFrames. + firstDf := readAndVerifyDataFrame("ABC", 3, fr, buf, t) + + for i := 0; i < 10; i++ { + df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t) + if df != firstDf { + t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf) + } + } + + for i := 0; i < 10; i++ { + df := readAndVerifyDataFrame("", 0, fr, buf, t) + if df != firstDf { + t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf) + } + } + + for i := 0; i < 10; i++ { + df := readAndVerifyDataFrame("HHH", 3, fr, buf, t) + if df != firstDf { + t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf) + } + } +} + +func TestSetReuseFramesMoreThanOnce(t *testing.T) { + fr, buf := testFramer() + fr.SetReuseFrames() + + firstDf := readAndVerifyDataFrame("ABC", 3, fr, buf, t) + fr.SetReuseFrames() + + for i := 0; i < 10; i++ { + df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t) + // SetReuseFrames should be idempotent + fr.SetReuseFrames() + if df != firstDf { + t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf) + } + } +} + +func TestNoSetReuseFrames(t *testing.T) { + fr, buf := testFramer() + const numNewDataFrames = 10 + dfSoFar := make([]interface{}, numNewDataFrames) + + // Check that DataFrames are not reused if SetReuseFrames wasn't called. + // SetReuseFrames only currently implements reuse of DataFrames. + for i := 0; i < numNewDataFrames; i++ { + df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t) + for _, item := range dfSoFar { + if df == item { + t.Errorf("Expected Framer to return new DataFrames since SetNoReuseFrames not set.") + } + } + dfSoFar[i] = df + } +} + +func readAndVerifyDataFrame(data string, length byte, fr *Framer, buf *bytes.Buffer, t *testing.T) *DataFrame { + var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4 + fr.WriteData(streamID, true, []byte(data)) + wantEnc := "\x00\x00" + string(length) + "\x00\x01\x01\x02\x03\x04" + data + if buf.String() != wantEnc { + t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc) + } + f, err := fr.ReadFrame() + if err != nil { + t.Fatal(err) + } + df, ok := f.(*DataFrame) + if !ok { + t.Fatalf("got %T; want *DataFrame", f) + } + if !bytes.Equal(df.Data(), []byte(data)) { + t.Errorf("got %q; want %q", df.Data(), []byte(data)) + } + if f.Header().Flags&1 == 0 { + t.Errorf("didn't see END_STREAM flag") + } + return df +} + +func encodeHeaderRaw(t *testing.T, pairs ...string) []byte { + var he hpackEncoder + return he.encodeHeaderRaw(t, pairs...) +} diff --git a/fn/vendor/golang.org/x/net/http2/go15.go b/fn/vendor/golang.org/x/net/http2/go15.go deleted file mode 100644 index f0a562414..000000000 --- a/fn/vendor/golang.org/x/net/http2/go15.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package http2 - -import "net/http" - -func requestCancel(req *http.Request) <-chan struct{} { return req.Cancel } diff --git a/fn/vendor/golang.org/x/net/http2/go16.go b/fn/vendor/golang.org/x/net/http2/go16.go new file mode 100644 index 000000000..00b2e9e3c --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/go16.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "net/http" + "time" +) + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return t1.ExpectContinueTimeout +} diff --git a/fn/vendor/golang.org/x/net/http2/go17.go b/fn/vendor/golang.org/x/net/http2/go17.go new file mode 100644 index 000000000..47b7fae08 --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/go17.go @@ -0,0 +1,106 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package http2 + +import ( + "context" + "net" + "net/http" + "net/http/httptrace" + "time" +) + +type contextContext interface { + context.Context +} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + ctx, cancel = context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return context.WithCancel(ctx) +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req.WithContext(ctx) +} + +type clientTrace httptrace.ClientTrace + +func reqContext(r *http.Request) context.Context { return r.Context() } + +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } + +func traceGotConn(req *http.Request, cc *ClientConn) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + cc.mu.Lock() + ci.Reused = cc.nextStreamID > 1 + ci.WasIdle = len(cc.streams) == 0 && ci.Reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *clientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *clientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *clientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *clientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *clientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} + +func requestTrace(req *http.Request) *clientTrace { + trace := httptrace.ContextClientTrace(req.Context()) + return (*clientTrace)(trace) +} + +// Ping sends a PING frame to the server and waits for the ack. +func (cc *ClientConn) Ping(ctx context.Context) error { + return cc.ping(ctx) +} diff --git a/fn/vendor/golang.org/x/net/http2/go17_not18.go b/fn/vendor/golang.org/x/net/http2/go17_not18.go new file mode 100644 index 000000000..b4c52ecec --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/go17_not18.go @@ -0,0 +1,36 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,!go1.8 + +package http2 + +import "crypto/tls" + +// temporary copy of Go 1.7's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/fn/vendor/golang.org/x/net/http2/go18.go b/fn/vendor/golang.org/x/net/http2/go18.go new file mode 100644 index 000000000..4f30d228a --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/go18.go @@ -0,0 +1,56 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "crypto/tls" + "io" + "net/http" +) + +func cloneTLSConfig(c *tls.Config) *tls.Config { + c2 := c.Clone() + c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 + return c2 +} + +var _ http.Pusher = (*responseWriter)(nil) + +// Push implements http.Pusher. +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { + internalOpts := pushOptions{} + if opts != nil { + internalOpts.Method = opts.Method + internalOpts.Header = opts.Header + } + return w.push(target, internalOpts) +} + +func configureServer18(h1 *http.Server, h2 *Server) error { + if h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } + } + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil && panicValue != http.ErrAbortHandler +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return req.GetBody +} + +func reqBodyIsNoBody(body io.ReadCloser) bool { + return body == http.NoBody +} + +func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only diff --git a/fn/vendor/golang.org/x/net/http2/go18_test.go b/fn/vendor/golang.org/x/net/http2/go18_test.go new file mode 100644 index 000000000..30e3b038b --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/go18_test.go @@ -0,0 +1,79 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "crypto/tls" + "net/http" + "testing" + "time" +) + +// Tests that http2.Server.IdleTimeout is initialized from +// http.Server.{Idle,Read}Timeout. http.Server.IdleTimeout was +// added in Go 1.8. +func TestConfigureServerIdleTimeout_Go18(t *testing.T) { + const timeout = 5 * time.Second + const notThisOne = 1 * time.Second + + // With a zero http2.Server, verify that it copies IdleTimeout: + { + s1 := &http.Server{ + IdleTimeout: timeout, + ReadTimeout: notThisOne, + } + s2 := &Server{} + if err := ConfigureServer(s1, s2); err != nil { + t.Fatal(err) + } + if s2.IdleTimeout != timeout { + t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout) + } + } + + // And that it falls back to ReadTimeout: + { + s1 := &http.Server{ + ReadTimeout: timeout, + } + s2 := &Server{} + if err := ConfigureServer(s1, s2); err != nil { + t.Fatal(err) + } + if s2.IdleTimeout != timeout { + t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout) + } + } + + // Verify that s1's IdleTimeout doesn't overwrite an existing setting: + { + s1 := &http.Server{ + IdleTimeout: notThisOne, + } + s2 := &Server{ + IdleTimeout: timeout, + } + if err := ConfigureServer(s1, s2); err != nil { + t.Fatal(err) + } + if s2.IdleTimeout != timeout { + t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout) + } + } +} + +func TestCertClone(t *testing.T) { + c := &tls.Config{ + GetClientCertificate: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + panic("shouldn't be called") + }, + } + c2 := cloneTLSConfig(c) + if c2.GetClientCertificate == nil { + t.Error("GetClientCertificate is nil") + } +} diff --git a/fn/vendor/golang.org/x/net/http2/not_go15.go b/fn/vendor/golang.org/x/net/http2/go19.go similarity index 51% rename from fn/vendor/golang.org/x/net/http2/not_go15.go rename to fn/vendor/golang.org/x/net/http2/go19.go index d0fa5c890..38124ba56 100644 --- a/fn/vendor/golang.org/x/net/http2/not_go15.go +++ b/fn/vendor/golang.org/x/net/http2/go19.go @@ -2,10 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !go1.5 +// +build go1.9 package http2 -import "net/http" +import ( + "net/http" +) -func requestCancel(req *http.Request) <-chan struct{} { return nil } +func configureServer19(s *http.Server, conf *Server) error { + s.RegisterOnShutdown(conf.state.startGracefulShutdown) + return nil +} diff --git a/fn/vendor/golang.org/x/net/http2/go19_test.go b/fn/vendor/golang.org/x/net/http2/go19_test.go new file mode 100644 index 000000000..1675d248f --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/go19_test.go @@ -0,0 +1,60 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package http2 + +import ( + "context" + "net/http" + "reflect" + "testing" + "time" +) + +func TestServerGracefulShutdown(t *testing.T) { + var st *serverTester + handlerDone := make(chan struct{}) + st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + defer close(handlerDone) + go st.ts.Config.Shutdown(context.Background()) + + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } + if ga.LastStreamID != 1 { + t.Errorf("GOAWAY LastStreamID = %v; want 1", ga.LastStreamID) + } + + w.Header().Set("x-foo", "bar") + }) + defer st.Close() + + st.greet() + st.bodylessReq1() + + select { + case <-handlerDone: + case <-time.After(5 * time.Second): + t.Fatalf("server did not shutdown?") + } + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"x-foo", "bar"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + + n, err := st.cc.Read([]byte{0}) + if n != 0 || err == nil { + t.Errorf("Read = %v, %v; want 0, non-nil", n, err) + } +} diff --git a/fn/vendor/golang.org/x/net/http2/h2demo/h2demo.go b/fn/vendor/golang.org/x/net/http2/h2demo/h2demo.go index 15ef52f9b..9853107b9 100644 --- a/fn/vendor/golang.org/x/net/http2/h2demo/h2demo.go +++ b/fn/vendor/golang.org/x/net/http2/h2demo/h2demo.go @@ -19,6 +19,7 @@ import ( "log" "net" "net/http" + "os" "path" "regexp" "runtime" @@ -27,8 +28,8 @@ import ( "sync" "time" - "camlistore.org/pkg/googlestorage" - "camlistore.org/pkg/singleflight" + "go4.org/syncutil/singleflight" + "golang.org/x/crypto/acme/autocert" "golang.org/x/net/http2" ) @@ -79,13 +80,14 @@ is used transparently by the Go standard library from Go 1.6 and later.

Contact info: bradfitz@golang.org, or file a bug.

+href="https://golang.org/s/http2bug">file a bug.

Handlers for testing

  • GET /reqinfo to dump the request + headers received
  • GET /clockstream streams the current time every second
  • GET /gophertiles to see a page with a bunch of images
  • +
  • GET /serverpush to see a page with server push
  • GET /file/gopher.png for a small file (does If-Modified-Since, Content-Range, etc)
  • GET /file/go.src.tar.gz for a larger file (~10 MB)
  • GET /redirect to redirect back to / (this page)
  • @@ -167,8 +169,11 @@ var ( // fileServer returns a file-serving handler that proxies URL. // It lazily fetches URL on the first access and caches its contents forever. -func fileServer(url string) http.Handler { +func fileServer(url string, latency time.Duration) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if latency > 0 { + time.Sleep(latency) + } hi, err := fsGrp.Do(url, func() (interface{}, error) { fsMu.Lock() if h, ok := fsCache[url]; ok { @@ -226,14 +231,18 @@ func clockStreamHandler(w http.ResponseWriter, r *http.Request) { func registerHandlers() { tiles := newGopherTilesHandler() + push := newPushHandler() mux2 := http.NewServeMux() http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if r.TLS == nil { - if r.URL.Path == "/gophertiles" { - tiles.ServeHTTP(w, r) - return - } + switch { + case r.URL.Path == "/gophertiles": + tiles.ServeHTTP(w, r) // allow HTTP/2 + HTTP/1.x + return + case strings.HasPrefix(r.URL.Path, "/serverpush"): + push.ServeHTTP(w, r) // allow HTTP/2 + HTTP/1.x + return + case r.TLS == nil: // do not allow HTTP/1.x for anything else http.Redirect(w, r, "https://"+httpsHost()+"/", http.StatusFound) return } @@ -248,8 +257,8 @@ func registerHandlers() { mux2.ServeHTTP(w, r) }) mux2.HandleFunc("/", home) - mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png")) - mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz")) + mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png", 0)) + mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz", 0)) mux2.HandleFunc("/reqinfo", reqInfoHandler) mux2.HandleFunc("/crc32", crcHandler) mux2.HandleFunc("/ECHO", echoCapitalHandler) @@ -266,6 +275,46 @@ func registerHandlers() { }) } +var pushResources = map[string]http.Handler{ + "/serverpush/static/jquery.min.js": fileServer("https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js", 100*time.Millisecond), + "/serverpush/static/godocs.js": fileServer("https://golang.org/lib/godoc/godocs.js", 100*time.Millisecond), + "/serverpush/static/playground.js": fileServer("https://golang.org/lib/godoc/playground.js", 100*time.Millisecond), + "/serverpush/static/style.css": fileServer("https://golang.org/lib/godoc/style.css", 100*time.Millisecond), +} + +func newPushHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for path, handler := range pushResources { + if r.URL.Path == path { + handler.ServeHTTP(w, r) + return + } + } + + cacheBust := time.Now().UnixNano() + if pusher, ok := w.(http.Pusher); ok { + for path := range pushResources { + url := fmt.Sprintf("%s?%d", path, cacheBust) + if err := pusher.Push(url, nil); err != nil { + log.Printf("Failed to push %v: %v", path, err) + } + } + } + time.Sleep(100 * time.Millisecond) // fake network latency + parsing time + if err := pushTmpl.Execute(w, struct { + CacheBust int64 + HTTPSHost string + HTTPHost string + }{ + CacheBust: cacheBust, + HTTPSHost: httpsHost(), + HTTPHost: httpHost(), + }); err != nil { + log.Printf("Executing server push template: %v", err) + } + }) +} + func newGopherTilesHandler() http.Handler { const gopherURL = "https://blog.golang.org/go-programming-language-turns-two_gophers.jpg" res, err := http.Get(gopherURL) @@ -378,40 +427,25 @@ func httpHost() string { } func serveProdTLS() error { - c, err := googlestorage.NewServiceClient() - if err != nil { + const cacheDir = "/var/cache/autocert" + if err := os.MkdirAll(cacheDir, 0700); err != nil { return err } - slurp := func(key string) ([]byte, error) { - const bucket = "http2-demo-server-tls" - rc, _, err := c.GetObject(&googlestorage.Object{ - Bucket: bucket, - Key: key, - }) - if err != nil { - return nil, fmt.Errorf("Error fetching GCS object %q in bucket %q: %v", key, bucket, err) - } - defer rc.Close() - return ioutil.ReadAll(rc) - } - certPem, err := slurp("http2.golang.org.chained.pem") - if err != nil { - return err - } - keyPem, err := slurp("http2.golang.org.key") - if err != nil { - return err - } - cert, err := tls.X509KeyPair(certPem, keyPem) - if err != nil { - return err + m := autocert.Manager{ + Cache: autocert.DirCache(cacheDir), + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("http2.golang.org"), } srv := &http.Server{ TLSConfig: &tls.Config{ - Certificates: []tls.Certificate{cert}, + GetCertificate: m.GetCertificate, }, } - http2.ConfigureServer(srv, &http2.Server{}) + http2.ConfigureServer(srv, &http2.Server{ + NewWriteScheduler: func() http2.WriteScheduler { + return http2.NewPriorityWriteScheduler(nil) + }, + }) ln, err := net.Listen("tcp", ":443") if err != nil { return err @@ -440,11 +474,43 @@ func serveProd() error { return <-errc } +const idleTimeout = 5 * time.Minute +const activeTimeout = 10 * time.Minute + +// TODO: put this into the standard library and actually send +// PING frames and GOAWAY, etc: golang.org/issue/14204 +func idleTimeoutHook() func(net.Conn, http.ConnState) { + var mu sync.Mutex + m := map[net.Conn]*time.Timer{} + return func(c net.Conn, cs http.ConnState) { + mu.Lock() + defer mu.Unlock() + if t, ok := m[c]; ok { + delete(m, c) + t.Stop() + } + var d time.Duration + switch cs { + case http.StateNew, http.StateIdle: + d = idleTimeout + case http.StateActive: + d = activeTimeout + default: + return + } + m[c] = time.AfterFunc(d, func() { + log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d) + go c.Close() + }) + } +} + func main() { var srv http.Server flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.") flag.Parse() srv.Addr = *httpsAddr + srv.ConnState = idleTimeoutHook() registerHandlers() diff --git a/fn/vendor/golang.org/x/net/http2/h2demo/launch.go b/fn/vendor/golang.org/x/net/http2/h2demo/launch.go index 13b1cfd73..df0866a30 100644 --- a/fn/vendor/golang.org/x/net/http2/h2demo/launch.go +++ b/fn/vendor/golang.org/x/net/http2/h2demo/launch.go @@ -170,9 +170,9 @@ func main() { }, }, NetworkInterfaces: []*compute.NetworkInterface{ - &compute.NetworkInterface{ + { AccessConfigs: []*compute.AccessConfig{ - &compute.AccessConfig{ + { Type: "ONE_TO_ONE_NAT", Name: "External NAT", NatIP: natIP, diff --git a/fn/vendor/golang.org/x/net/http2/h2demo/tmpl.go b/fn/vendor/golang.org/x/net/http2/h2demo/tmpl.go new file mode 100644 index 000000000..504d6a78a --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/h2demo/tmpl.go @@ -0,0 +1,1991 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build h2demo + +package main + +import "html/template" + +var pushTmpl = template.Must(template.New("serverpush").Parse(` + + + + + + + + + HTTP/2 Server Push Demo + + + + + + + + + +
    +Note: This page exists for demonstration purposes. For the actual cmd/go docs, go to golang.org/cmd/go. +
    + + + +
    +... +
    + + + + +
    +
    +
    +
    + Run + Format + + + +
    +
    + + +
    +
    + + +

    Command go

    + + + + + + + + + + + + + + +

    +Go is a tool for managing Go source code. +

    +

    +Usage: +

    +
    go command [arguments]
    +
    +

    +The commands are: +

    +
    build       compile packages and dependencies
    +clean       remove object files
    +doc         show documentation for package or symbol
    +env         print Go environment information
    +bug         start a bug report
    +fix         run go tool fix on packages
    +fmt         run gofmt on package sources
    +generate    generate Go files by processing source
    +get         download and install packages and dependencies
    +install     compile and install packages and dependencies
    +list        list packages
    +run         compile and run Go program
    +test        test packages
    +tool        run specified go tool
    +version     print Go version
    +vet         run go tool vet on packages
    +
    +

    +Use "go help [command]" for more information about a command. +

    +

    +Additional help topics: +

    +
    c           calling between Go and C
    +buildmode   description of build modes
    +filetype    file types
    +gopath      GOPATH environment variable
    +environment environment variables
    +importpath  import path syntax
    +packages    description of package lists
    +testflag    description of testing flags
    +testfunc    description of testing functions
    +
    +

    +Use "go help [topic]" for more information about that topic. +

    +

    Compile packages and dependencies

    +

    +Usage: +

    +
    go build [-o output] [-i] [build flags] [packages]
    +
    +

    +Build compiles the packages named by the import paths, +along with their dependencies, but it does not install the results. +

    +

    +If the arguments to build are a list of .go files, build treats +them as a list of source files specifying a single package. +

    +

    +When compiling a single main package, build writes +the resulting executable to an output file named after +the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe') +or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe'). +The '.exe' suffix is added when writing a Windows executable. +

    +

    +When compiling multiple packages or a single non-main package, +build compiles the packages but discards the resulting object, +serving only as a check that the packages can be built. +

    +

    +When compiling packages, build ignores files that end in '_test.go'. +

    +

    +The -o flag, only allowed when compiling a single package, +forces build to write the resulting executable or object +to the named output file, instead of the default behavior described +in the last two paragraphs. +

    +

    +The -i flag installs the packages that are dependencies of the target. +

    +

    +The build flags are shared by the build, clean, get, install, list, run, +and test commands: +

    +
    -a
    +	force rebuilding of packages that are already up-to-date.
    +-n
    +	print the commands but do not run them.
    +-p n
    +	the number of programs, such as build commands or
    +	test binaries, that can be run in parallel.
    +	The default is the number of CPUs available.
    +-race
    +	enable data race detection.
    +	Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
    +-msan
    +	enable interoperation with memory sanitizer.
    +	Supported only on linux/amd64,
    +	and only with Clang/LLVM as the host C compiler.
    +-v
    +	print the names of packages as they are compiled.
    +-work
    +	print the name of the temporary work directory and
    +	do not delete it when exiting.
    +-x
    +	print the commands.
    +
    +-asmflags 'flag list'
    +	arguments to pass on each go tool asm invocation.
    +-buildmode mode
    +	build mode to use. See 'go help buildmode' for more.
    +-compiler name
    +	name of compiler to use, as in runtime.Compiler (gccgo or gc).
    +-gccgoflags 'arg list'
    +	arguments to pass on each gccgo compiler/linker invocation.
    +-gcflags 'arg list'
    +	arguments to pass on each go tool compile invocation.
    +-installsuffix suffix
    +	a suffix to use in the name of the package installation directory,
    +	in order to keep output separate from default builds.
    +	If using the -race flag, the install suffix is automatically set to race
    +	or, if set explicitly, has _race appended to it.  Likewise for the -msan
    +	flag.  Using a -buildmode option that requires non-default compile flags
    +	has a similar effect.
    +-ldflags 'flag list'
    +	arguments to pass on each go tool link invocation.
    +-linkshared
    +	link against shared libraries previously created with
    +	-buildmode=shared.
    +-pkgdir dir
    +	install and load all packages from dir instead of the usual locations.
    +	For example, when building with a non-standard configuration,
    +	use -pkgdir to keep generated packages in a separate location.
    +-tags 'tag list'
    +	a list of build tags to consider satisfied during the build.
    +	For more information about build tags, see the description of
    +	build constraints in the documentation for the go/build package.
    +-toolexec 'cmd args'
    +	a program to use to invoke toolchain programs like vet and asm.
    +	For example, instead of running asm, the go command will run
    +	'cmd args /path/to/asm <arguments for asm>'.
    +
    +

    +The list flags accept a space-separated list of strings. To embed spaces +in an element in the list, surround it with either single or double quotes. +

    +

    +For more about specifying packages, see 'go help packages'. +For more about where packages and binaries are installed, +run 'go help gopath'. +For more about calling between Go and C/C++, run 'go help c'. +

    +

    +Note: Build adheres to certain conventions such as those described +by 'go help gopath'. Not all projects can follow these conventions, +however. Installations that have their own conventions or that use +a separate software build system may choose to use lower-level +invocations such as 'go tool compile' and 'go tool link' to avoid +some of the overheads and design decisions of the build tool. +

    +

    +See also: go install, go get, go clean. +

    +

    Remove object files

    +

    +Usage: +

    +
    go clean [-i] [-r] [-n] [-x] [build flags] [packages]
    +
    +

    +Clean removes object files from package source directories. +The go command builds most objects in a temporary directory, +so go clean is mainly concerned with object files left by other +tools or by manual invocations of go build. +

    +

    +Specifically, clean removes the following files from each of the +source directories corresponding to the import paths: +

    +
    _obj/            old object directory, left from Makefiles
    +_test/           old test directory, left from Makefiles
    +_testmain.go     old gotest file, left from Makefiles
    +test.out         old test log, left from Makefiles
    +build.out        old test log, left from Makefiles
    +*.[568ao]        object files, left from Makefiles
    +
    +DIR(.exe)        from go build
    +DIR.test(.exe)   from go test -c
    +MAINFILE(.exe)   from go build MAINFILE.go
    +*.so             from SWIG
    +
    +

    +In the list, DIR represents the final path element of the +directory, and MAINFILE is the base name of any Go source +file in the directory that is not included when building +the package. +

    +

    +The -i flag causes clean to remove the corresponding installed +archive or binary (what 'go install' would create). +

    +

    +The -n flag causes clean to print the remove commands it would execute, +but not run them. +

    +

    +The -r flag causes clean to be applied recursively to all the +dependencies of the packages named by the import paths. +

    +

    +The -x flag causes clean to print remove commands as it executes them. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Show documentation for package or symbol

    +

    +Usage: +

    +
    go doc [-u] [-c] [package|[package.]symbol[.method]]
    +
    +

    +Doc prints the documentation comments associated with the item identified by its +arguments (a package, const, func, type, var, or method) followed by a one-line +summary of each of the first-level items "under" that item (package-level +declarations for a package, methods for a type, etc.). +

    +

    +Doc accepts zero, one, or two arguments. +

    +

    +Given no arguments, that is, when run as +

    +
    go doc
    +
    +

    +it prints the package documentation for the package in the current directory. +If the package is a command (package main), the exported symbols of the package +are elided from the presentation unless the -cmd flag is provided. +

    +

    +When run with one argument, the argument is treated as a Go-syntax-like +representation of the item to be documented. What the argument selects depends +on what is installed in GOROOT and GOPATH, as well as the form of the argument, +which is schematically one of these: +

    +
    go doc <pkg>
    +go doc <sym>[.<method>]
    +go doc [<pkg>.]<sym>[.<method>]
    +go doc [<pkg>.][<sym>.]<method>
    +
    +

    +The first item in this list matched by the argument is the one whose documentation +is printed. (See the examples below.) However, if the argument starts with a capital +letter it is assumed to identify a symbol or method in the current directory. +

    +

    +For packages, the order of scanning is determined lexically in breadth-first order. +That is, the package presented is the one that matches the search and is nearest +the root and lexically first at its level of the hierarchy. The GOROOT tree is +always scanned in its entirety before GOPATH. +

    +

    +If there is no package specified or matched, the package in the current +directory is selected, so "go doc Foo" shows the documentation for symbol Foo in +the current package. +

    +

    +The package path must be either a qualified path or a proper suffix of a +path. The go tool's usual package mechanism does not apply: package path +elements like . and ... are not implemented by go doc. +

    +

    +When run with two arguments, the first must be a full package path (not just a +suffix), and the second is a symbol or symbol and method; this is similar to the +syntax accepted by godoc: +

    +
    go doc <pkg> <sym>[.<method>]
    +
    +

    +In all forms, when matching symbols, lower-case letters in the argument match +either case but upper-case letters match exactly. This means that there may be +multiple matches of a lower-case argument in a package if different symbols have +different cases. If this occurs, documentation for all matches is printed. +

    +

    +Examples: +

    +
    go doc
    +	Show documentation for current package.
    +go doc Foo
    +	Show documentation for Foo in the current package.
    +	(Foo starts with a capital letter so it cannot match
    +	a package path.)
    +go doc encoding/json
    +	Show documentation for the encoding/json package.
    +go doc json
    +	Shorthand for encoding/json.
    +go doc json.Number (or go doc json.number)
    +	Show documentation and method summary for json.Number.
    +go doc json.Number.Int64 (or go doc json.number.int64)
    +	Show documentation for json.Number's Int64 method.
    +go doc cmd/doc
    +	Show package docs for the doc command.
    +go doc -cmd cmd/doc
    +	Show package docs and exported symbols within the doc command.
    +go doc template.new
    +	Show documentation for html/template's New function.
    +	(html/template is lexically before text/template)
    +go doc text/template.new # One argument
    +	Show documentation for text/template's New function.
    +go doc text/template new # Two arguments
    +	Show documentation for text/template's New function.
    +
    +At least in the current tree, these invocations all print the
    +documentation for json.Decoder's Decode method:
    +
    +go doc json.Decoder.Decode
    +go doc json.decoder.decode
    +go doc json.decode
    +cd go/src/encoding/json; go doc decode
    +
    +

    +Flags: +

    +
    -c
    +	Respect case when matching symbols.
    +-cmd
    +	Treat a command (package main) like a regular package.
    +	Otherwise package main's exported symbols are hidden
    +	when showing the package's top-level documentation.
    +-u
    +	Show documentation for unexported as well as exported
    +	symbols and methods.
    +
    +

    Print Go environment information

    +

    +Usage: +

    +
    go env [var ...]
    +
    +

    +Env prints Go environment information. +

    +

    +By default env prints information as a shell script +(on Windows, a batch file). If one or more variable +names is given as arguments, env prints the value of +each named variable on its own line. +

    +

    Start a bug report

    +

    +Usage: +

    +
    go bug
    +
    +

    +Bug opens the default browser and starts a new bug report. +The report includes useful system information. +

    +

    Run go tool fix on packages

    +

    +Usage: +

    +
    go fix [packages]
    +
    +

    +Fix runs the Go fix command on the packages named by the import paths. +

    +

    +For more about fix, see 'go doc cmd/fix'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run fix with specific options, run 'go tool fix'. +

    +

    +See also: go fmt, go vet. +

    +

    Run gofmt on package sources

    +

    +Usage: +

    +
    go fmt [-n] [-x] [packages]
    +
    +

    +Fmt runs the command 'gofmt -l -w' on the packages named +by the import paths. It prints the names of the files that are modified. +

    +

    +For more about gofmt, see 'go doc cmd/gofmt'. +For more about specifying packages, see 'go help packages'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +To run gofmt with specific options, run gofmt itself. +

    +

    +See also: go fix, go vet. +

    +

    Generate Go files by processing source

    +

    +Usage: +

    +
    go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
    +
    +

    +Generate runs commands described by directives within existing +files. Those commands can run any process but the intent is to +create or update Go source files. +

    +

    +Go generate is never run automatically by go build, go get, go test, +and so on. It must be run explicitly. +

    +

    +Go generate scans the file for directives, which are lines of +the form, +

    +
    //go:generate command argument...
    +
    +

    +(note: no leading spaces and no space in "//go") where command +is the generator to be run, corresponding to an executable file +that can be run locally. It must either be in the shell path +(gofmt), a fully qualified path (/usr/you/bin/mytool), or a +command alias, described below. +

    +

    +Note that go generate does not parse the file, so lines that look +like directives in comments or multiline strings will be treated +as directives. +

    +

    +The arguments to the directive are space-separated tokens or +double-quoted strings passed to the generator as individual +arguments when it is run. +

    +

    +Quoted strings use Go syntax and are evaluated before execution; a +quoted string appears as a single argument to the generator. +

    +

    +Go generate sets several variables when it runs the generator: +

    +
    $GOARCH
    +	The execution architecture (arm, amd64, etc.)
    +$GOOS
    +	The execution operating system (linux, windows, etc.)
    +$GOFILE
    +	The base name of the file.
    +$GOLINE
    +	The line number of the directive in the source file.
    +$GOPACKAGE
    +	The name of the package of the file containing the directive.
    +$DOLLAR
    +	A dollar sign.
    +
    +

    +Other than variable substitution and quoted-string evaluation, no +special processing such as "globbing" is performed on the command +line. +

    +

    +As a last step before running the command, any invocations of any +environment variables with alphanumeric names, such as $GOFILE or +$HOME, are expanded throughout the command line. The syntax for +variable expansion is $NAME on all operating systems. Due to the +order of evaluation, variables are expanded even inside quoted +strings. If the variable NAME is not set, $NAME expands to the +empty string. +

    +

    +A directive of the form, +

    +
    //go:generate -command xxx args...
    +
    +

    +specifies, for the remainder of this source file only, that the +string xxx represents the command identified by the arguments. This +can be used to create aliases or to handle multiword generators. +For example, +

    +
    //go:generate -command foo go tool foo
    +
    +

    +specifies that the command "foo" represents the generator +"go tool foo". +

    +

    +Generate processes packages in the order given on the command line, +one at a time. If the command line lists .go files, they are treated +as a single package. Within a package, generate processes the +source files in a package in file name order, one at a time. Within +a source file, generate runs generators in the order they appear +in the file, one at a time. +

    +

    +If any generator returns an error exit status, "go generate" skips +all further processing for that package. +

    +

    +The generator is run in the package's source directory. +

    +

    +Go generate accepts one specific flag: +

    +
    -run=""
    +	if non-empty, specifies a regular expression to select
    +	directives whose full original source text (excluding
    +	any trailing spaces and final newline) matches the
    +	expression.
    +
    +

    +It also accepts the standard build flags including -v, -n, and -x. +The -v flag prints the names of packages and files as they are +processed. +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Download and install packages and dependencies

    +

    +Usage: +

    +
    go get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]
    +
    +

    +Get downloads the packages named by the import paths, along with their +dependencies. It then installs the named packages, like 'go install'. +

    +

    +The -d flag instructs get to stop after downloading the packages; that is, +it instructs get not to install the packages. +

    +

    +The -f flag, valid only when -u is set, forces get -u not to verify that +each package has been checked out from the source control repository +implied by its import path. This can be useful if the source is a local fork +of the original. +

    +

    +The -fix flag instructs get to run the fix tool on the downloaded packages +before resolving dependencies or building the code. +

    +

    +The -insecure flag permits fetching from repositories and resolving +custom domains using insecure schemes such as HTTP. Use with caution. +

    +

    +The -t flag instructs get to also download the packages required to build +the tests for the specified packages. +

    +

    +The -u flag instructs get to use the network to update the named packages +and their dependencies. By default, get uses the network to check out +missing packages but does not use it to look for updates to existing packages. +

    +

    +The -v flag enables verbose progress and debug output. +

    +

    +Get also accepts build flags to control the installation. See 'go help build'. +

    +

    +When checking out a new package, get creates the target directory +GOPATH/src/<import-path>. If the GOPATH contains multiple entries, +get uses the first one. For more details see: 'go help gopath'. +

    +

    +When checking out or updating a package, get looks for a branch or tag +that matches the locally installed version of Go. The most important +rule is that if the local installation is running version "go1", get +searches for a branch or tag named "go1". If no such version exists it +retrieves the most recent version of the package. +

    +

    +When go get checks out or updates a Git repository, +it also updates any git submodules referenced by the repository. +

    +

    +Get never checks out or updates code stored in vendor directories. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    +For more about how 'go get' finds source code to +download, see 'go help importpath'. +

    +

    +See also: go build, go install, go clean. +

    +

    Compile and install packages and dependencies

    +

    +Usage: +

    +
    go install [build flags] [packages]
    +
    +

    +Install compiles and installs the packages named by the import paths, +along with their dependencies. +

    +

    +For more about the build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go get, go clean. +

    +

    List packages

    +

    +Usage: +

    +
    go list [-e] [-f format] [-json] [build flags] [packages]
    +
    +

    +List lists the packages named by the import paths, one per line. +

    +

    +The default output shows the package import path: +

    +
    bytes
    +encoding/json
    +github.com/gorilla/mux
    +golang.org/x/net/html
    +
    +

    +The -f flag specifies an alternate format for the list, using the +syntax of package template. The default output is equivalent to -f +''. The struct being passed to the template is: +

    +
    type Package struct {
    +    Dir           string // directory containing package sources
    +    ImportPath    string // import path of package in dir
    +    ImportComment string // path in import comment on package statement
    +    Name          string // package name
    +    Doc           string // package documentation string
    +    Target        string // install path
    +    Shlib         string // the shared library that contains this package (only set when -linkshared)
    +    Goroot        bool   // is this package in the Go root?
    +    Standard      bool   // is this package part of the standard Go library?
    +    Stale         bool   // would 'go install' do anything for this package?
    +    StaleReason   string // explanation for Stale==true
    +    Root          string // Go root or Go path dir containing this package
    +    ConflictDir   string // this directory shadows Dir in $GOPATH
    +    BinaryOnly    bool   // binary-only package: cannot be recompiled from sources
    +
    +    // Source files
    +    GoFiles        []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
    +    CgoFiles       []string // .go sources files that import "C"
    +    IgnoredGoFiles []string // .go sources ignored due to build constraints
    +    CFiles         []string // .c source files
    +    CXXFiles       []string // .cc, .cxx and .cpp source files
    +    MFiles         []string // .m source files
    +    HFiles         []string // .h, .hh, .hpp and .hxx source files
    +    FFiles         []string // .f, .F, .for and .f90 Fortran source files
    +    SFiles         []string // .s source files
    +    SwigFiles      []string // .swig files
    +    SwigCXXFiles   []string // .swigcxx files
    +    SysoFiles      []string // .syso object files to add to archive
    +    TestGoFiles    []string // _test.go files in package
    +    XTestGoFiles   []string // _test.go files outside package
    +
    +    // Cgo directives
    +    CgoCFLAGS    []string // cgo: flags for C compiler
    +    CgoCPPFLAGS  []string // cgo: flags for C preprocessor
    +    CgoCXXFLAGS  []string // cgo: flags for C++ compiler
    +    CgoFFLAGS    []string // cgo: flags for Fortran compiler
    +    CgoLDFLAGS   []string // cgo: flags for linker
    +    CgoPkgConfig []string // cgo: pkg-config names
    +
    +    // Dependency information
    +    Imports      []string // import paths used by this package
    +    Deps         []string // all (recursively) imported dependencies
    +    TestImports  []string // imports from TestGoFiles
    +    XTestImports []string // imports from XTestGoFiles
    +
    +    // Error information
    +    Incomplete bool            // this package or a dependency has an error
    +    Error      *PackageError   // error loading package
    +    DepsErrors []*PackageError // errors loading dependencies
    +}
    +
    +

    +Packages stored in vendor directories report an ImportPath that includes the +path to the vendor directory (for example, "d/vendor/p" instead of "p"), +so that the ImportPath uniquely identifies a given copy of a package. +The Imports, Deps, TestImports, and XTestImports lists also contain these +expanded imports paths. See golang.org/s/go15vendor for more about vendoring. +

    +

    +The error information, if any, is +

    +
    type PackageError struct {
    +    ImportStack   []string // shortest path from package named on command line to this one
    +    Pos           string   // position of error (if present, file:line:col)
    +    Err           string   // the error itself
    +}
    +
    +

    +The template function "join" calls strings.Join. +

    +

    +The template function "context" returns the build context, defined as: +

    +
    type Context struct {
    +	GOARCH        string   // target architecture
    +	GOOS          string   // target operating system
    +	GOROOT        string   // Go root
    +	GOPATH        string   // Go path
    +	CgoEnabled    bool     // whether cgo can be used
    +	UseAllFiles   bool     // use files regardless of +build lines, file names
    +	Compiler      string   // compiler to assume when computing target paths
    +	BuildTags     []string // build constraints to match in +build lines
    +	ReleaseTags   []string // releases the current release is compatible with
    +	InstallSuffix string   // suffix to use in the name of the install dir
    +}
    +
    +

    +For more information about the meaning of these fields see the documentation +for the go/build package's Context type. +

    +

    +The -json flag causes the package data to be printed in JSON format +instead of using the template format. +

    +

    +The -e flag changes the handling of erroneous packages, those that +cannot be found or are malformed. By default, the list command +prints an error to standard error for each erroneous package and +omits the packages from consideration during the usual printing. +With the -e flag, the list command never prints errors to standard +error and instead processes the erroneous packages with the usual +printing. Erroneous packages will have a non-empty ImportPath and +a non-nil Error field; other information may or may not be missing +(zeroed). +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Compile and run Go program

    +

    +Usage: +

    +
    go run [build flags] [-exec xprog] gofiles... [arguments...]
    +
    +

    +Run compiles and runs the main package comprising the named Go source files. +A Go source file is defined to be a file ending in a literal ".go" suffix. +

    +

    +By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. +If the -exec flag is given, 'go run' invokes the binary using xprog: +

    +
    'xprog a.out arguments...'.
    +
    +

    +If the -exec flag is not given, GOOS or GOARCH is different from the system +default, and a program named go_$GOOS_$GOARCH_exec can be found +on the current search path, 'go run' invokes the binary using that program, +for example 'go_nacl_386_exec a.out arguments...'. This allows execution of +cross-compiled programs when a simulator or other execution method is +available. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go build. +

    +

    Test packages

    +

    +Usage: +

    +
    go test [build/test flags] [packages] [build/test flags & test binary flags]
    +
    +

    +'Go test' automates testing the packages named by the import paths. +It prints a summary of the test results in the format: +

    +
    ok   archive/tar   0.011s
    +FAIL archive/zip   0.022s
    +ok   compress/gzip 0.033s
    +...
    +
    +

    +followed by detailed output for each failed package. +

    +

    +'Go test' recompiles each package along with any files with names matching +the file pattern "*_test.go". +Files whose names begin with "_" (including "_test.go") or "." are ignored. +These additional files can contain test functions, benchmark functions, and +example functions. See 'go help testfunc' for more. +Each listed package causes the execution of a separate test binary. +

    +

    +Test files that declare a package with the suffix "_test" will be compiled as a +separate package, and then linked and run with the main test binary. +

    +

    +The go tool will ignore a directory named "testdata", making it available +to hold ancillary data needed by the tests. +

    +

    +By default, go test needs no arguments. It compiles and tests the package +with source in the current directory, including tests, and runs the tests. +

    +

    +The package is built in a temporary directory so it does not interfere with the +non-test installation. +

    +

    +In addition to the build flags, the flags handled by 'go test' itself are: +

    +
    -args
    +    Pass the remainder of the command line (everything after -args)
    +    to the test binary, uninterpreted and unchanged.
    +    Because this flag consumes the remainder of the command line,
    +    the package list (if present) must appear before this flag.
    +
    +-c
    +    Compile the test binary to pkg.test but do not run it
    +    (where pkg is the last element of the package's import path).
    +    The file name can be changed with the -o flag.
    +
    +-exec xprog
    +    Run the test binary using xprog. The behavior is the same as
    +    in 'go run'. See 'go help run' for details.
    +
    +-i
    +    Install packages that are dependencies of the test.
    +    Do not run the test.
    +
    +-o file
    +    Compile the test binary to the named file.
    +    The test still runs (unless -c or -i is specified).
    +
    +

    +The test binary also accepts flags that control execution of the test; these +flags are also accessible by 'go test'. See 'go help testflag' for details. +

    +

    +For more about build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go vet. +

    +

    Run specified go tool

    +

    +Usage: +

    +
    go tool [-n] command [args...]
    +
    +

    +Tool runs the go tool command identified by the arguments. +With no arguments it prints the list of known tools. +

    +

    +The -n flag causes tool to print the command that would be +executed but not execute it. +

    +

    +For more about each tool command, see 'go tool command -h'. +

    +

    Print Go version

    +

    +Usage: +

    +
    go version
    +
    +

    +Version prints the Go version, as reported by runtime.Version. +

    +

    Run go tool vet on packages

    +

    +Usage: +

    +
    go vet [-n] [-x] [build flags] [packages]
    +
    +

    +Vet runs the Go vet command on the packages named by the import paths. +

    +

    +For more about vet, see 'go doc cmd/vet'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run the vet tool with specific options, run 'go tool vet'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go fmt, go fix. +

    +

    Calling between Go and C

    +

    +There are two different ways to call between Go and C/C++ code. +

    +

    +The first is the cgo tool, which is part of the Go distribution. For +information on how to use it see the cgo documentation (go doc cmd/cgo). +

    +

    +The second is the SWIG program, which is a general tool for +interfacing between languages. For information on SWIG see +http://swig.org/. When running go build, any file with a .swig +extension will be passed to SWIG. Any file with a .swigcxx extension +will be passed to SWIG with the -c++ option. +

    +

    +When either cgo or SWIG is used, go build will pass any .c, .m, .s, +or .S files to the C compiler, and any .cc, .cpp, .cxx files to the C++ +compiler. The CC or CXX environment variables may be set to determine +the C or C++ compiler, respectively, to use. +

    +

    Description of build modes

    +

    +The 'go build' and 'go install' commands take a -buildmode argument which +indicates which kind of object file is to be built. Currently supported values +are: +

    +
    -buildmode=archive
    +	Build the listed non-main packages into .a files. Packages named
    +	main are ignored.
    +
    +-buildmode=c-archive
    +	Build the listed main package, plus all packages it imports,
    +	into a C archive file. The only callable symbols will be those
    +	functions exported using a cgo //export comment. Requires
    +	exactly one main package to be listed.
    +
    +-buildmode=c-shared
    +	Build the listed main packages, plus all packages that they
    +	import, into C shared libraries. The only callable symbols will
    +	be those functions exported using a cgo //export comment.
    +	Non-main packages are ignored.
    +
    +-buildmode=default
    +	Listed main packages are built into executables and listed
    +	non-main packages are built into .a files (the default
    +	behavior).
    +
    +-buildmode=shared
    +	Combine all the listed non-main packages into a single shared
    +	library that will be used when building with the -linkshared
    +	option. Packages named main are ignored.
    +
    +-buildmode=exe
    +	Build the listed main packages and everything they import into
    +	executables. Packages not named main are ignored.
    +
    +-buildmode=pie
    +	Build the listed main packages and everything they import into
    +	position independent executables (PIE). Packages not named
    +	main are ignored.
    +
    +-buildmode=plugin
    +	Build the listed main packages, plus all packages that they
    +	import, into a Go plugin. Packages not named main are ignored.
    +
    +

    File types

    +

    +The go command examines the contents of a restricted set of files +in each directory. It identifies which files to examine based on +the extension of the file name. These extensions are: +

    +
    .go
    +	Go source files.
    +.c, .h
    +	C source files.
    +	If the package uses cgo or SWIG, these will be compiled with the
    +	OS-native compiler (typically gcc); otherwise they will
    +	trigger an error.
    +.cc, .cpp, .cxx, .hh, .hpp, .hxx
    +	C++ source files. Only useful with cgo or SWIG, and always
    +	compiled with the OS-native compiler.
    +.m
    +	Objective-C source files. Only useful with cgo, and always
    +	compiled with the OS-native compiler.
    +.s, .S
    +	Assembler source files.
    +	If the package uses cgo or SWIG, these will be assembled with the
    +	OS-native assembler (typically gcc (sic)); otherwise they
    +	will be assembled with the Go assembler.
    +.swig, .swigcxx
    +	SWIG definition files.
    +.syso
    +	System object files.
    +
    +

    +Files of each of these types except .syso may contain build +constraints, but the go command stops scanning for build constraints +at the first item in the file that is not a blank line or //-style +line comment. See the go/build package documentation for +more details. +

    +

    +Non-test Go source files can also include a //go:binary-only-package +comment, indicating that the package sources are included +for documentation only and must not be used to build the +package binary. This enables distribution of Go packages in +their compiled form alone. See the go/build package documentation +for more details. +

    +

    GOPATH environment variable

    +

    +The Go path is used to resolve import statements. +It is implemented by and documented in the go/build package. +

    +

    +The GOPATH environment variable lists places to look for Go code. +On Unix, the value is a colon-separated string. +On Windows, the value is a semicolon-separated string. +On Plan 9, the value is a list. +

    +

    +If the environment variable is unset, GOPATH defaults +to a subdirectory named "go" in the user's home directory +($HOME/go on Unix, %USERPROFILE%\go on Windows), +unless that directory holds a Go distribution. +Run "go env GOPATH" to see the current GOPATH. +

    +

    +See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH. +

    +

    +Each directory listed in GOPATH must have a prescribed structure: +

    +

    +The src directory holds source code. The path below src +determines the import path or executable name. +

    +

    +The pkg directory holds installed package objects. +As in the Go tree, each target operating system and +architecture pair has its own subdirectory of pkg +(pkg/GOOS_GOARCH). +

    +

    +If DIR is a directory listed in the GOPATH, a package with +source in DIR/src/foo/bar can be imported as "foo/bar" and +has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a". +

    +

    +The bin directory holds compiled commands. +Each command is named for its source directory, but only +the final element, not the entire path. That is, the +command with source in DIR/src/foo/quux is installed into +DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped +so that you can add DIR/bin to your PATH to get at the +installed commands. If the GOBIN environment variable is +set, commands are installed to the directory it names instead +of DIR/bin. GOBIN must be an absolute path. +

    +

    +Here's an example directory layout: +

    +
    GOPATH=/home/user/go
    +
    +/home/user/go/
    +    src/
    +        foo/
    +            bar/               (go code in package bar)
    +                x.go
    +            quux/              (go code in package main)
    +                y.go
    +    bin/
    +        quux                   (installed command)
    +    pkg/
    +        linux_amd64/
    +            foo/
    +                bar.a          (installed package object)
    +
    +

    +Go searches each directory listed in GOPATH to find source code, +but new packages are always downloaded into the first directory +in the list. +

    +

    +See https://golang.org/doc/code.html for an example. +

    +

    Internal Directories

    +

    +Code in or below a directory named "internal" is importable only +by code in the directory tree rooted at the parent of "internal". +Here's an extended version of the directory layout above: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            internal/
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The code in z.go is imported as "foo/internal/baz", but that +import statement can only appear in source files in the subtree +rooted at foo. The source files foo/f.go, foo/bar/x.go, and +foo/quux/y.go can all import "foo/internal/baz", but the source file +crash/bang/b.go cannot. +

    +

    +See https://golang.org/s/go14internal for details. +

    +

    Vendor Directories

    +

    +Go 1.6 includes support for using local copies of external dependencies +to satisfy imports of those dependencies, often referred to as vendoring. +

    +

    +Code below a directory named "vendor" is importable only +by code in the directory tree rooted at the parent of "vendor", +and only using an import path that omits the prefix up to and +including the vendor element. +

    +

    +Here's the example from the previous section, +but with the "internal" directory renamed to "vendor" +and a new foo/vendor/crash/bang directory added: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            vendor/
    +                crash/
    +                    bang/      (go code in package bang)
    +                        b.go
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The same visibility rules apply as for internal, but the code +in z.go is imported as "baz", not as "foo/vendor/baz". +

    +

    +Code in vendor directories deeper in the source tree shadows +code in higher directories. Within the subtree rooted at foo, an import +of "crash/bang" resolves to "foo/vendor/crash/bang", not the +top-level "crash/bang". +

    +

    +Code in vendor directories is not subject to import path +checking (see 'go help importpath'). +

    +

    +When 'go get' checks out or updates a git repository, it now also +updates submodules. +

    +

    +Vendor directories do not affect the placement of new repositories +being checked out for the first time by 'go get': those are always +placed in the main GOPATH, never in a vendor subtree. +

    +

    +See https://golang.org/s/go15vendor for details. +

    +

    Environment variables

    +

    +The go command, and the tools it invokes, examine a few different +environment variables. For many of these, you can see the default +value of on your system by running 'go env NAME', where NAME is the +name of the variable. +

    +

    +General-purpose environment variables: +

    +
    GCCGO
    +	The gccgo command to run for 'go build -compiler=gccgo'.
    +GOARCH
    +	The architecture, or processor, for which to compile code.
    +	Examples are amd64, 386, arm, ppc64.
    +GOBIN
    +	The directory where 'go install' will install a command.
    +GOOS
    +	The operating system for which to compile code.
    +	Examples are linux, darwin, windows, netbsd.
    +GOPATH
    +	For more details see: 'go help gopath'.
    +GORACE
    +	Options for the race detector.
    +	See https://golang.org/doc/articles/race_detector.html.
    +GOROOT
    +	The root of the go tree.
    +
    +

    +Environment variables for use with cgo: +

    +
    CC
    +	The command to use to compile C code.
    +CGO_ENABLED
    +	Whether the cgo command is supported.  Either 0 or 1.
    +CGO_CFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C code.
    +CGO_CPPFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C or C++ code.
    +CGO_CXXFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C++ code.
    +CGO_FFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	Fortran code.
    +CGO_LDFLAGS
    +	Flags that cgo will pass to the compiler when linking.
    +CXX
    +	The command to use to compile C++ code.
    +PKG_CONFIG
    +	Path to pkg-config tool.
    +
    +

    +Architecture-specific environment variables: +

    +
    GOARM
    +	For GOARCH=arm, the ARM architecture for which to compile.
    +	Valid values are 5, 6, 7.
    +GO386
    +	For GOARCH=386, the floating point instruction set.
    +	Valid values are 387, sse2.
    +
    +

    +Special-purpose environment variables: +

    +
    GOROOT_FINAL
    +	The root of the installed Go tree, when it is
    +	installed in a location other than where it is built.
    +	File names in stack traces are rewritten from GOROOT to
    +	GOROOT_FINAL.
    +GO_EXTLINK_ENABLED
    +	Whether the linker should use external linking mode
    +	when using -linkmode=auto with code that uses cgo.
    +	Set to 0 to disable external linking mode, 1 to enable it.
    +GIT_ALLOW_PROTOCOL
    +	Defined by Git. A colon-separated list of schemes that are allowed to be used
    +	with git fetch/clone. If set, any scheme not explicitly mentioned will be
    +	considered insecure by 'go get'.
    +
    +

    Import path syntax

    +

    +An import path (see 'go help packages') denotes a package stored in the local +file system. In general, an import path denotes either a standard package (such +as "unicode/utf8") or a package found in one of the work spaces (For more +details see: 'go help gopath'). +

    +

    Relative import paths

    +

    +An import path beginning with ./ or ../ is called a relative path. +The toolchain supports relative import paths as a shortcut in two ways. +

    +

    +First, a relative path can be used as a shorthand on the command line. +If you are working in the directory containing the code imported as +"unicode" and want to run the tests for "unicode/utf8", you can type +"go test ./utf8" instead of needing to specify the full path. +Similarly, in the reverse situation, "go test .." will test "unicode" from +the "unicode/utf8" directory. Relative patterns are also allowed, like +"go test ./..." to test all subdirectories. See 'go help packages' for details +on the pattern syntax. +

    +

    +Second, if you are compiling a Go program not in a work space, +you can use a relative path in an import statement in that program +to refer to nearby code also not in a work space. +This makes it easy to experiment with small multipackage programs +outside of the usual work spaces, but such programs cannot be +installed with "go install" (there is no work space in which to install them), +so they are rebuilt from scratch each time they are built. +To avoid ambiguity, Go programs cannot use relative import paths +within a work space. +

    +

    Remote import paths

    +

    +Certain import paths also +describe how to obtain the source code for the package using +a revision control system. +

    +

    +A few common code hosting sites have special syntax: +

    +
    Bitbucket (Git, Mercurial)
    +
    +	import "bitbucket.org/user/project"
    +	import "bitbucket.org/user/project/sub/directory"
    +
    +GitHub (Git)
    +
    +	import "github.com/user/project"
    +	import "github.com/user/project/sub/directory"
    +
    +Launchpad (Bazaar)
    +
    +	import "launchpad.net/project"
    +	import "launchpad.net/project/series"
    +	import "launchpad.net/project/series/sub/directory"
    +
    +	import "launchpad.net/~user/project/branch"
    +	import "launchpad.net/~user/project/branch/sub/directory"
    +
    +IBM DevOps Services (Git)
    +
    +	import "hub.jazz.net/git/user/project"
    +	import "hub.jazz.net/git/user/project/sub/directory"
    +
    +

    +For code hosted on other servers, import paths may either be qualified +with the version control type, or the go tool can dynamically fetch +the import path over https/http and discover where the code resides +from a <meta> tag in the HTML. +

    +

    +To declare the code location, an import path of the form +

    +
    repository.vcs/path
    +
    +

    +specifies the given repository, with or without the .vcs suffix, +using the named version control system, and then the path inside +that repository. The supported version control systems are: +

    +
    Bazaar      .bzr
    +Git         .git
    +Mercurial   .hg
    +Subversion  .svn
    +
    +

    +For example, +

    +
    import "example.org/user/foo.hg"
    +
    +

    +denotes the root directory of the Mercurial repository at +example.org/user/foo or foo.hg, and +

    +
    import "example.org/repo.git/foo/bar"
    +
    +

    +denotes the foo/bar directory of the Git repository at +example.org/repo or repo.git. +

    +

    +When a version control system supports multiple protocols, +each is tried in turn when downloading. For example, a Git +download tries https://, then git+ssh://. +

    +

    +By default, downloads are restricted to known secure protocols +(e.g. https, ssh). To override this setting for Git downloads, the +GIT_ALLOW_PROTOCOL environment variable can be set (For more details see: +'go help environment'). +

    +

    +If the import path is not a known code hosting site and also lacks a +version control qualifier, the go tool attempts to fetch the import +over https/http and looks for a <meta> tag in the document's HTML +<head>. +

    +

    +The meta tag has the form: +

    +
    <meta name="go-import" content="import-prefix vcs repo-root">
    +
    +

    +The import-prefix is the import path corresponding to the repository +root. It must be a prefix or an exact match of the package being +fetched with "go get". If it's not an exact match, another http +request is made at the prefix to verify the <meta> tags match. +

    +

    +The meta tag should appear as early in the file as possible. +In particular, it should appear before any raw JavaScript or CSS, +to avoid confusing the go command's restricted parser. +

    +

    +The vcs is one of "git", "hg", "svn", etc, +

    +

    +The repo-root is the root of the version control system +containing a scheme and not containing a .vcs qualifier. +

    +

    +For example, +

    +
    import "example.org/pkg/foo"
    +
    +

    +will result in the following requests: +

    +
    https://example.org/pkg/foo?go-get=1 (preferred)
    +http://example.org/pkg/foo?go-get=1  (fallback, only with -insecure)
    +
    +

    +If that page contains the meta tag +

    +
    <meta name="go-import" content="example.org git https://code.org/r/p/exproj">
    +
    +

    +the go tool will verify that https://example.org/?go-get=1 contains the +same meta tag and then git clone https://code.org/r/p/exproj into +GOPATH/src/example.org. +

    +

    +New downloaded packages are written to the first directory listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +The go command attempts to download the version of the +package appropriate for the Go release being used. +Run 'go help get' for more. +

    +

    Import path checking

    +

    +When the custom import path feature described above redirects to a +known code hosting site, each of the resulting packages has two possible +import paths, using the custom domain or the known hosting site. +

    +

    +A package statement is said to have an "import comment" if it is immediately +followed (before the next newline) by a comment of one of these two forms: +

    +
    package math // import "path"
    +package math /* import "path" */
    +
    +

    +The go command will refuse to install a package with an import comment +unless it is being referred to by that import path. In this way, import comments +let package authors make sure the custom import path is used and not a +direct path to the underlying code hosting site. +

    +

    +Import path checking is disabled for code found within vendor trees. +This makes it possible to copy code into alternate locations in vendor trees +without needing to update import comments. +

    +

    +See https://golang.org/s/go14customimport for details. +

    +

    Description of package lists

    +

    +Many commands apply to a set of packages: +

    +
    go action [packages]
    +
    +

    +Usually, [packages] is a list of import paths. +

    +

    +An import path that is a rooted path or that begins with +a . or .. element is interpreted as a file system path and +denotes the package in that directory. +

    +

    +Otherwise, the import path P denotes the package found in +the directory DIR/src/P for some DIR listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +If no import paths are given, the action applies to the +package in the current directory. +

    +

    +There are four reserved names for paths that should not be used +for packages to be built with the go tool: +

    +

    +- "main" denotes the top-level package in a stand-alone executable. +

    +

    +- "all" expands to all package directories found in all the GOPATH +trees. For example, 'go list all' lists all the packages on the local +system. +

    +

    +- "std" is like all but expands to just the packages in the standard +Go library. +

    +

    +- "cmd" expands to the Go repository's commands and their +internal libraries. +

    +

    +Import paths beginning with "cmd/" only match source code in +the Go repository. +

    +

    +An import path is a pattern if it includes one or more "..." wildcards, +each of which can match any string, including the empty string and +strings containing slashes. Such a pattern expands to all package +directories found in the GOPATH trees with names matching the +patterns. As a special case, x/... matches x as well as x's subdirectories. +For example, net/... expands to net and packages in its subdirectories. +

    +

    +An import path can also name a package to be downloaded from +a remote repository. Run 'go help importpath' for details. +

    +

    +Every package in a program must have a unique import path. +By convention, this is arranged by starting each path with a +unique prefix that belongs to you. For example, paths used +internally at Google all begin with 'google', and paths +denoting remote repositories begin with the path to the code, +such as 'github.com/user/repo'. +

    +

    +Packages in a program need not have unique package names, +but there are two reserved package names with special meaning. +The name main indicates a command, not a library. +Commands are built into binaries and cannot be imported. +The name documentation indicates documentation for +a non-Go program in the directory. Files in package documentation +are ignored by the go command. +

    +

    +As a special case, if the package list is a list of .go files from a +single directory, the command is applied to a single synthesized +package made up of exactly those files, ignoring any build constraints +in those files and ignoring any other files in the directory. +

    +

    +Directory and file names that begin with "." or "_" are ignored +by the go tool, as are directories named "testdata". +

    +

    Description of testing flags

    +

    +The 'go test' command takes both flags that apply to 'go test' itself +and flags that apply to the resulting test binary. +

    +

    +Several of the flags control profiling and write an execution profile +suitable for "go tool pprof"; run "go tool pprof -h" for more +information. The --alloc_space, --alloc_objects, and --show_bytes +options of pprof control how the information is presented. +

    +

    +The following flags are recognized by the 'go test' command and +control the execution of any test: +

    +
    -bench regexp
    +    Run (sub)benchmarks matching a regular expression.
    +    The given regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    benchmark's identifier.
    +    By default, no benchmarks run. To run all benchmarks,
    +    use '-bench .' or '-bench=.'.
    +
    +-benchtime t
    +    Run enough iterations of each benchmark to take t, specified
    +    as a time.Duration (for example, -benchtime 1h30s).
    +    The default is 1 second (1s).
    +
    +-count n
    +    Run each test and benchmark n times (default 1).
    +    If -cpu is set, run n times for each GOMAXPROCS value.
    +    Examples are always run once.
    +
    +-cover
    +    Enable coverage analysis.
    +
    +-covermode set,count,atomic
    +    Set the mode for coverage analysis for the package[s]
    +    being tested. The default is "set" unless -race is enabled,
    +    in which case it is "atomic".
    +    The values:
    +	set: bool: does this statement run?
    +	count: int: how many times does this statement run?
    +	atomic: int: count, but correct in multithreaded tests;
    +		significantly more expensive.
    +    Sets -cover.
    +
    +-coverpkg pkg1,pkg2,pkg3
    +    Apply coverage analysis in each test to the given list of packages.
    +    The default is for each test to analyze only the package being tested.
    +    Packages are specified as import paths.
    +    Sets -cover.
    +
    +-cpu 1,2,4
    +    Specify a list of GOMAXPROCS values for which the tests or
    +    benchmarks should be executed.  The default is the current value
    +    of GOMAXPROCS.
    +
    +-parallel n
    +    Allow parallel execution of test functions that call t.Parallel.
    +    The value of this flag is the maximum number of tests to run
    +    simultaneously; by default, it is set to the value of GOMAXPROCS.
    +    Note that -parallel only applies within a single test binary.
    +    The 'go test' command may run tests for different packages
    +    in parallel as well, according to the setting of the -p flag
    +    (see 'go help build').
    +
    +-run regexp
    +    Run only those tests and examples matching the regular expression.
    +    For tests the regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    test's identifier.
    +
    +-short
    +    Tell long-running tests to shorten their run time.
    +    It is off by default but set during all.bash so that installing
    +    the Go tree can run a sanity check but not spend time running
    +    exhaustive tests.
    +
    +-timeout t
    +    If a test runs longer than t, panic.
    +    The default is 10 minutes (10m).
    +
    +-v
    +    Verbose output: log all tests as they are run. Also print all
    +    text from Log and Logf calls even if the test succeeds.
    +
    +

    +The following flags are also recognized by 'go test' and can be used to +profile the tests during execution: +

    +
    -benchmem
    +    Print memory allocation statistics for benchmarks.
    +
    +-blockprofile block.out
    +    Write a goroutine blocking profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-blockprofilerate n
    +    Control the detail provided in goroutine blocking profiles by
    +    calling runtime.SetBlockProfileRate with n.
    +    See 'go doc runtime.SetBlockProfileRate'.
    +    The profiler aims to sample, on average, one blocking event every
    +    n nanoseconds the program spends blocked.  By default,
    +    if -test.blockprofile is set without this flag, all blocking events
    +    are recorded, equivalent to -test.blockprofilerate=1.
    +
    +-coverprofile cover.out
    +    Write a coverage profile to the file after all tests have passed.
    +    Sets -cover.
    +
    +-cpuprofile cpu.out
    +    Write a CPU profile to the specified file before exiting.
    +    Writes test binary as -c would.
    +
    +-memprofile mem.out
    +    Write a memory profile to the file after all tests have passed.
    +    Writes test binary as -c would.
    +
    +-memprofilerate n
    +    Enable more precise (and expensive) memory profiles by setting
    +    runtime.MemProfileRate.  See 'go doc runtime.MemProfileRate'.
    +    To profile all memory allocations, use -test.memprofilerate=1
    +    and pass --alloc_space flag to the pprof tool.
    +
    +-mutexprofile mutex.out
    +    Write a mutex contention profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-mutexprofilefraction n
    +    Sample 1 in n stack traces of goroutines holding a
    +    contended mutex.
    +
    +-outputdir directory
    +    Place output files from profiling in the specified directory,
    +    by default the directory in which "go test" is running.
    +
    +-trace trace.out
    +    Write an execution trace to the specified file before exiting.
    +
    +

    +Each of these flags is also recognized with an optional 'test.' prefix, +as in -test.v. When invoking the generated test binary (the result of +'go test -c') directly, however, the prefix is mandatory. +

    +

    +The 'go test' command rewrites or removes recognized flags, +as appropriate, both before and after the optional package list, +before invoking the test binary. +

    +

    +For instance, the command +

    +
    go test -v -myflag testdata -cpuprofile=prof.out -x
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
    +
    +

    +(The -x flag is removed because it applies only to the go command's +execution, not to the test itself.) +

    +

    +The test flags that generate profiles (other than for coverage) also +leave the test binary in pkg.test for use when analyzing the profiles. +

    +

    +When 'go test' runs a test binary, it does so from within the +corresponding package's source code directory. Depending on the test, +it may be necessary to do the same when invoking a generated test +binary directly. +

    +

    +The command-line package list, if present, must appear before any +flag not known to the go test command. Continuing the example above, +the package list would have to appear before -myflag, but could appear +on either side of -v. +

    +

    +To keep an argument for a test binary from being interpreted as a +known flag or a package name, use -args (see 'go help test') which +passes the remainder of the command line through to the test binary +uninterpreted and unaltered. +

    +

    +For instance, the command +

    +
    go test -v -args -x -v
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -x -v
    +
    +

    +Similarly, +

    +
    go test -args math
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test math
    +
    +

    +In the first example, the -x and the second -v are passed through to the +test binary unchanged and with no effect on the go command itself. +In the second example, the argument math is passed through to the test +binary, instead of being interpreted as the package list. +

    +

    Description of testing functions

    +

    +The 'go test' command expects to find test, benchmark, and example functions +in the "*_test.go" files corresponding to the package under test. +

    +

    +A test function is one named TestXXX (where XXX is any alphanumeric string +not starting with a lower case letter) and should have the signature, +

    +
    func TestXXX(t *testing.T) { ... }
    +
    +

    +A benchmark function is one named BenchmarkXXX and should have the signature, +

    +
    func BenchmarkXXX(b *testing.B) { ... }
    +
    +

    +An example function is similar to a test function but, instead of using +*testing.T to report success or failure, prints output to os.Stdout. +If the last comment in the function starts with "Output:" then the output +is compared exactly against the comment (see examples below). If the last +comment begins with "Unordered output:" then the output is compared to the +comment, however the order of the lines is ignored. An example with no such +comment is compiled but not executed. An example with no text after +"Output:" is compiled, executed, and expected to produce no output. +

    +

    +Godoc displays the body of ExampleXXX to demonstrate the use +of the function, constant, or variable XXX. An example of a method M with +receiver type T or *T is named ExampleT_M. There may be multiple examples +for a given function, constant, or variable, distinguished by a trailing _xxx, +where xxx is a suffix not beginning with an upper case letter. +

    +

    +Here is an example of an example: +

    +
    func ExamplePrintln() {
    +	Println("The output of\nthis example.")
    +	// Output: The output of
    +	// this example.
    +}
    +
    +

    +Here is another example where the ordering of the output is ignored: +

    +
    func ExamplePerm() {
    +	for _, value := range Perm(4) {
    +		fmt.Println(value)
    +	}
    +
    +	// Unordered output: 4
    +	// 2
    +	// 1
    +	// 3
    +	// 0
    +}
    +
    +

    +The entire test file is presented as the example when it contains a single +example function, at least one other function, type, variable, or constant +declaration, and no test or benchmark functions. +

    +

    +See the documentation of the testing package for more information. +

    + + + +
    +
    + + + + + + + + +`)) diff --git a/fn/vendor/golang.org/x/net/http2/h2i/h2i.go b/fn/vendor/golang.org/x/net/http2/h2i/h2i.go index 5e9a86787..76c778711 100644 --- a/fn/vendor/golang.org/x/net/http2/h2i/h2i.go +++ b/fn/vendor/golang.org/x/net/http2/h2i/h2i.go @@ -56,8 +56,8 @@ type command struct { } var commands = map[string]command{ - "ping": command{run: (*h2i).cmdPing}, - "settings": command{ + "ping": {run: (*h2i).cmdPing}, + "settings": { run: (*h2i).cmdSettings, complete: func() []string { return []string{ @@ -71,14 +71,13 @@ var commands = map[string]command{ } }, }, - "quit": command{run: (*h2i).cmdQuit}, - "headers": command{run: (*h2i).cmdHeaders}, + "quit": {run: (*h2i).cmdQuit}, + "headers": {run: (*h2i).cmdHeaders}, } func usage() { fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") flag.PrintDefaults() - os.Exit(1) } // withPort adds ":443" if another port isn't already present. @@ -89,6 +88,14 @@ func withPort(host string) string { return host } +// withoutPort strips the port from addr if present. +func withoutPort(addr string) string { + if h, _, err := net.SplitHostPort(addr); err == nil { + return h + } + return addr +} + // h2i is the app's state. type h2i struct { host string @@ -111,6 +118,7 @@ func main() { flag.Parse() if flag.NArg() != 1 { usage() + os.Exit(2) } log.SetFlags(0) @@ -134,7 +142,7 @@ func main() { func (app *h2i) Main() error { cfg := &tls.Config{ - ServerName: app.host, + ServerName: withoutPort(app.host), NextProtos: strings.Split(*flagNextProto, ","), InsecureSkipVerify: *flagInsecure, } @@ -168,7 +176,7 @@ func (app *h2i) Main() error { app.framer = http2.NewFramer(tc, tc) - oldState, err := terminal.MakeRaw(0) + oldState, err := terminal.MakeRaw(int(os.Stdin.Fd())) if err != nil { return err } @@ -238,7 +246,7 @@ func (app *h2i) Main() error { } func (app *h2i) logf(format string, args ...interface{}) { - fmt.Fprintf(app.term, format+"\n", args...) + fmt.Fprintf(app.term, format+"\r\n", args...) } func (app *h2i) readConsole() error { @@ -435,9 +443,9 @@ func (app *h2i) readFrames() error { return nil }) case *http2.WindowUpdateFrame: - app.logf(" Window-Increment = %v\n", f.Increment) + app.logf(" Window-Increment = %v", f.Increment) case *http2.GoAwayFrame: - app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)\n", f.LastStreamID, f.ErrCode, f.ErrCode) + app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)", f.LastStreamID, f.ErrCode, f.ErrCode) case *http2.DataFrame: app.logf(" %q", f.Data()) case *http2.HeadersFrame: @@ -473,7 +481,7 @@ func (app *h2i) encodeHeaders(req *http.Request) []byte { host = req.URL.Host } - path := req.URL.Path + path := req.RequestURI if path == "" { path = "/" } diff --git a/fn/vendor/golang.org/x/net/http2/hpack/encode.go b/fn/vendor/golang.org/x/net/http2/hpack/encode.go index 80d621cf3..54726c2a3 100644 --- a/fn/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/fn/vendor/golang.org/x/net/http2/hpack/encode.go @@ -39,13 +39,14 @@ func NewEncoder(w io.Writer) *Encoder { tableSizeUpdate: false, w: w, } + e.dynTab.table.init() e.dynTab.setMaxSize(initialHeaderTableSize) return e } // WriteField encodes f into a single Write to e's underlying Writer. // This function may also produce bytes for "Header Table Size Update" -// if necessary. If produced, it is done before encoding f. +// if necessary. If produced, it is done before encoding f. func (e *Encoder) WriteField(f HeaderField) error { e.buf = e.buf[:0] @@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error { // only name matches, i points to that index and nameValueMatch // becomes false. func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - for idx, hf := range staticTable { - if !constantTimeStringCompare(hf.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(idx + 1) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(hf.Value, f.Value) { - continue - } - i = uint64(idx + 1) - nameValueMatch = true - return + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true } - j, nameValueMatch := e.dynTab.search(f) + j, nameValueMatch := e.dynTab.table.search(f) if nameValueMatch || (i == 0 && j != 0) { - i = j + uint64(len(staticTable)) + return j + uint64(staticTable.len()), nameValueMatch } - return + + return i, false } // SetMaxDynamicTableSize changes the dynamic header table size to v. @@ -144,7 +133,7 @@ func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { // shouldIndex reports whether f should be indexed. func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.size() <= e.dynTab.maxSize + return !f.Sensitive && f.Size() <= e.dynTab.maxSize } // appendIndexed appends index i, as encoded in "Indexed Header Field" diff --git a/fn/vendor/golang.org/x/net/http2/hpack/encode_test.go b/fn/vendor/golang.org/x/net/http2/hpack/encode_test.go index 92286f3ba..05f12db9c 100644 --- a/fn/vendor/golang.org/x/net/http2/hpack/encode_test.go +++ b/fn/vendor/golang.org/x/net/http2/hpack/encode_test.go @@ -7,6 +7,8 @@ package hpack import ( "bytes" "encoding/hex" + "fmt" + "math/rand" "reflect" "strings" "testing" @@ -101,17 +103,20 @@ func TestEncoderSearchTable(t *testing.T) { wantMatch bool }{ // Name and Value match - {pair("foo", "bar"), uint64(len(staticTable) + 3), true}, - {pair("blake", "miz"), uint64(len(staticTable) + 2), true}, + {pair("foo", "bar"), uint64(staticTable.len()) + 3, true}, + {pair("blake", "miz"), uint64(staticTable.len()) + 2, true}, {pair(":method", "GET"), 2, true}, - // Only name match because Sensitive == true - {HeaderField{":method", "GET", true}, 2, false}, + // Only name match because Sensitive == true. This is allowed to match + // any ":method" entry. The current implementation uses the last entry + // added in newStaticTable. + {HeaderField{":method", "GET", true}, 3, false}, // Only Name matches - {pair("foo", "..."), uint64(len(staticTable) + 3), false}, - {pair("blake", "..."), uint64(len(staticTable) + 2), false}, - {pair(":method", "..."), 2, false}, + {pair("foo", "..."), uint64(staticTable.len()) + 3, false}, + {pair("blake", "..."), uint64(staticTable.len()) + 2, false}, + // As before, this is allowed to match any ":method" entry. + {pair(":method", "..."), 3, false}, // None match {pair("foo-", "bar"), 0, false}, @@ -328,3 +333,54 @@ func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) { func removeSpace(s string) string { return strings.Replace(s, " ", "", -1) } + +func BenchmarkEncoderSearchTable(b *testing.B) { + e := NewEncoder(nil) + + // A sample of possible header fields. + // This is not based on any actual data from HTTP/2 traces. + var possible []HeaderField + for _, f := range staticTable.ents { + if f.Value == "" { + possible = append(possible, f) + continue + } + // Generate 5 random values, except for cookie and set-cookie, + // which we know can have many values in practice. + num := 5 + if f.Name == "cookie" || f.Name == "set-cookie" { + num = 25 + } + for i := 0; i < num; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + for k := 0; k < 10; k++ { + f := HeaderField{ + Name: fmt.Sprintf("x-header-%d", k), + Sensitive: rand.Int()%2 == 0, + } + for i := 0; i < 5; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + + // Add a random sample to the dynamic table. This very loosely simulates + // a history of 100 requests with 20 header fields per request. + for r := 0; r < 100*20; r++ { + f := possible[rand.Int31n(int32(len(possible)))] + // Skip if this is in the staticTable verbatim. + if _, has := staticTable.search(f); !has { + e.dynTab.add(f) + } + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + for _, f := range possible { + e.searchTable(f) + } + } +} diff --git a/fn/vendor/golang.org/x/net/http2/hpack/hpack.go b/fn/vendor/golang.org/x/net/http2/hpack/hpack.go index 2ea4949ab..176644acd 100644 --- a/fn/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/fn/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -41,6 +41,14 @@ type HeaderField struct { Sensitive bool } +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + func (hf HeaderField) String() string { var suffix string if hf.Sensitive { @@ -49,10 +57,11 @@ func (hf HeaderField) String() string { return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) } -func (hf *HeaderField) size() uint32 { +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of - // its entries. The size of an entry is the sum of its name's + // its entries. The size of an entry is the sum of its name's // length in octets (as defined in Section 5.2), its value's // length in octets (see Section 5.2), plus 32. The size of // an entry is calculated using the length of the name and @@ -93,6 +102,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod emit: emitFunc, emitEnabled: true, } + d.dynTab.table.init() d.dynTab.allowedMaxSize = maxDynamicTableSize d.dynTab.setMaxSize(maxDynamicTableSize) return d @@ -145,12 +155,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { } type dynamicTable struct { - // ents is the FIFO described at // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - // The newest (low index) is append at the end, and items are - // evicted from the front. - ents []HeaderField - size uint32 + table headerFieldTable + size uint32 // in bytes maxSize uint32 // current maxSize allowedMaxSize uint32 // maxSize may go up to this, inclusive } @@ -160,95 +167,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) { dt.evict() } -// TODO: change dynamicTable to be a struct with a slice and a size int field, -// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: -// -// -// Then make add increment the size. maybe the max size should move from Decoder to -// dynamicTable and add should return an ok bool if there was enough space. -// -// Later we'll need a remove operation on dynamicTable. - func (dt *dynamicTable) add(f HeaderField) { - dt.ents = append(dt.ents, f) - dt.size += f.size() + dt.table.addEntry(f) + dt.size += f.Size() dt.evict() } -// If we're too big, evict old stuff (front of the slice) +// If we're too big, evict old stuff. func (dt *dynamicTable) evict() { - base := dt.ents // keep base pointer of slice - for dt.size > dt.maxSize { - dt.size -= dt.ents[0].size() - dt.ents = dt.ents[1:] + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ } - - // Shift slice contents down if we evicted things. - if len(dt.ents) != len(base) { - copy(base, dt.ents) - dt.ents = base[:len(dt.ents)] - } -} - -// constantTimeStringCompare compares string a and b in a constant -// time manner. -func constantTimeStringCompare(a, b string) bool { - if len(a) != len(b) { - return false - } - - c := byte(0) - - for i := 0; i < len(a); i++ { - c |= a[i] ^ b[i] - } - - return c == 0 -} - -// Search searches f in the table. The return value i is 0 if there is -// no name match. If there is name match or name/value match, i is the -// index of that entry (1-based). If both name and value match, -// nameValueMatch becomes true. -func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - l := len(dt.ents) - for j := l - 1; j >= 0; j-- { - ent := dt.ents[j] - if !constantTimeStringCompare(ent.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(l - j) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(ent.Value, f.Value) { - continue - } - i = uint64(l - j) - nameValueMatch = true - return - } - return + dt.table.evictOldest(n) } func (d *Decoder) maxTableIndex() int { - return len(d.dynTab.ents) + len(staticTable) + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() } func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - if i < 1 { + // See Section 2.3.3. + if i == 0 { return } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } if i > uint64(d.maxTableIndex()) { return } - if i <= uint64(len(staticTable)) { - return staticTable[i-1], true - } - dents := d.dynTab.ents - return dents[len(dents)-(int(i)-len(staticTable))], true + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true } // Decode decodes an entire block. @@ -298,7 +255,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) { err = d.parseHeaderFieldRepr() if err == errNeedMore { // Extra paranoia, making sure saveBuf won't - // get too large. All the varint and string + // get too large. All the varint and string // reading code earlier should already catch // overlong things and return ErrStringLength, // but keep this as a last resort. diff --git a/fn/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/fn/vendor/golang.org/x/net/http2/hpack/hpack_test.go index 6dc69f957..bc7f47678 100644 --- a/fn/vendor/golang.org/x/net/http2/hpack/hpack_test.go +++ b/fn/vendor/golang.org/x/net/http2/hpack/hpack_test.go @@ -5,117 +5,16 @@ package hpack import ( - "bufio" "bytes" "encoding/hex" "fmt" "math/rand" "reflect" - "regexp" - "strconv" "strings" "testing" "time" ) -func TestStaticTable(t *testing.T) { - fromSpec := ` - +-------+-----------------------------+---------------+ - | 1 | :authority | | - | 2 | :method | GET | - | 3 | :method | POST | - | 4 | :path | / | - | 5 | :path | /index.html | - | 6 | :scheme | http | - | 7 | :scheme | https | - | 8 | :status | 200 | - | 9 | :status | 204 | - | 10 | :status | 206 | - | 11 | :status | 304 | - | 12 | :status | 400 | - | 13 | :status | 404 | - | 14 | :status | 500 | - | 15 | accept-charset | | - | 16 | accept-encoding | gzip, deflate | - | 17 | accept-language | | - | 18 | accept-ranges | | - | 19 | accept | | - | 20 | access-control-allow-origin | | - | 21 | age | | - | 22 | allow | | - | 23 | authorization | | - | 24 | cache-control | | - | 25 | content-disposition | | - | 26 | content-encoding | | - | 27 | content-language | | - | 28 | content-length | | - | 29 | content-location | | - | 30 | content-range | | - | 31 | content-type | | - | 32 | cookie | | - | 33 | date | | - | 34 | etag | | - | 35 | expect | | - | 36 | expires | | - | 37 | from | | - | 38 | host | | - | 39 | if-match | | - | 40 | if-modified-since | | - | 41 | if-none-match | | - | 42 | if-range | | - | 43 | if-unmodified-since | | - | 44 | last-modified | | - | 45 | link | | - | 46 | location | | - | 47 | max-forwards | | - | 48 | proxy-authenticate | | - | 49 | proxy-authorization | | - | 50 | range | | - | 51 | referer | | - | 52 | refresh | | - | 53 | retry-after | | - | 54 | server | | - | 55 | set-cookie | | - | 56 | strict-transport-security | | - | 57 | transfer-encoding | | - | 58 | user-agent | | - | 59 | vary | | - | 60 | via | | - | 61 | www-authenticate | | - +-------+-----------------------------+---------------+ -` - bs := bufio.NewScanner(strings.NewReader(fromSpec)) - re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) - for bs.Scan() { - l := bs.Text() - if !strings.Contains(l, "|") { - continue - } - m := re.FindStringSubmatch(l) - if m == nil { - continue - } - i, err := strconv.Atoi(m[1]) - if err != nil { - t.Errorf("Bogus integer on line %q", l) - continue - } - if i < 1 || i > len(staticTable) { - t.Errorf("Bogus index %d on line %q", i, l) - continue - } - if got, want := staticTable[i-1].Name, m[2]; got != want { - t.Errorf("header index %d name = %q; want %q", i, got, want) - } - if got, want := staticTable[i-1].Value, m[3]; got != want { - t.Errorf("header index %d value = %q; want %q", i, got, want) - } - } - if err := bs.Err(); err != nil { - t.Error(err) - } -} - func (d *Decoder) mustAt(idx int) HeaderField { if hf, ok := d.at(uint64(idx)); !ok { panic(fmt.Sprintf("bogus index %d", idx)) @@ -132,10 +31,10 @@ func TestDynamicTableAt(t *testing.T) { } d.dynTab.add(pair("foo", "bar")) d.dynTab.add(pair("blake", "miz")) - if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want { + if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want { t.Errorf("at(dyn 1) = %v; want %v", got, want) } - if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want { + if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want { t.Errorf("at(dyn 2) = %v; want %v", got, want) } if got, want := at(3), (pair(":method", "POST")); got != want { @@ -143,41 +42,6 @@ func TestDynamicTableAt(t *testing.T) { } } -func TestDynamicTableSearch(t *testing.T) { - dt := dynamicTable{} - dt.setMaxSize(4096) - - dt.add(pair("foo", "bar")) - dt.add(pair("blake", "miz")) - dt.add(pair(":method", "GET")) - - tests := []struct { - hf HeaderField - wantI uint64 - wantMatch bool - }{ - // Name and Value match - {pair("foo", "bar"), 3, true}, - {pair(":method", "GET"), 1, true}, - - // Only name match because of Sensitive == true - {HeaderField{"blake", "miz", true}, 2, false}, - - // Only Name matches - {pair("foo", "..."), 3, false}, - {pair("blake", "..."), 2, false}, - {pair(":method", "..."), 1, false}, - - // None match - {pair("foo-", "bar"), 0, false}, - } - for _, tt := range tests { - if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { - t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) - } - } -} - func TestDynamicTableSizeEvict(t *testing.T) { d := NewDecoder(4096, nil) if want := uint32(0); d.dynTab.size != want { @@ -196,7 +60,7 @@ func TestDynamicTableSizeEvict(t *testing.T) { if want := uint32(6 + 32); d.dynTab.size != want { t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want) } - if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want { + if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want { t.Errorf("at(dyn 1) = %v; want %v", got, want) } add(pair("long", strings.Repeat("x", 500))) @@ -255,9 +119,9 @@ func TestDecoderDecode(t *testing.T) { } func (dt *dynamicTable) reverseCopy() (hf []HeaderField) { - hf = make([]HeaderField, len(dt.ents)) + hf = make([]HeaderField, len(dt.table.ents)) for i := range hf { - hf[i] = dt.ents[len(dt.ents)-1-i] + hf[i] = dt.table.ents[len(dt.table.ents)-1-i] } return } @@ -524,6 +388,47 @@ func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) { } } +func TestHuffmanDecodeExcessPadding(t *testing.T) { + tests := [][]byte{ + {0xff}, // Padding Exceeds 7 bits + {0x1f, 0xff}, // {"a", 1 byte excess padding} + {0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding} + {0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding} + {0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding} + {'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol. + } + for i, in := range tests { + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err) + } + } +} + +func TestHuffmanDecodeEOS(t *testing.T) { + in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) { + in := []byte{0x00, 0x01} // {"0", "0", "0"} + var buf bytes.Buffer + if err := huffmanDecode(&buf, 2, in); err != ErrStringLength { + t.Errorf("error = %v; want ErrStringLength", err) + } +} + +func TestHuffmanDecodeCorruptPadding(t *testing.T) { + in := []byte{0x00} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + func TestHuffmanDecode(t *testing.T) { tests := []struct { inHex, want string @@ -743,6 +648,10 @@ func TestHuffmanFuzzCrash(t *testing.T) { } } +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + func dehex(s string) []byte { s = strings.Replace(s, " ", "", -1) s = strings.Replace(s, "\n", "", -1) diff --git a/fn/vendor/golang.org/x/net/http2/hpack/huffman.go b/fn/vendor/golang.org/x/net/http2/hpack/huffman.go index eb4b1f05c..8850e3946 100644 --- a/fn/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/fn/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -48,12 +48,16 @@ var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") // maxLen bytes will return ErrStringLength. func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { n := rootHuffmanNode - cur, nbits := uint(0), uint8(0) + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) for _, b := range v { cur = cur<<8 | uint(b) - nbits += 8 - for nbits >= 8 { - idx := byte(cur >> (nbits - 8)) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) n = n.children[idx] if n == nil { return ErrInvalidHuffman @@ -63,22 +67,40 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { return ErrStringLength } buf.WriteByte(n.sym) - nbits -= n.codeLen + cbits -= n.codeLen n = rootHuffmanNode + sbits = cbits } else { - nbits -= 8 + cbits -= 8 } } } - for nbits > 0 { - n = n.children[byte(cur<<(8-nbits))] - if n.children != nil || n.codeLen > nbits { + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { break } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } buf.WriteByte(n.sym) - nbits -= n.codeLen + cbits -= n.codeLen n = rootHuffmanNode + sbits = cbits } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 } // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = [...]HeaderField{ - pair(":authority", ""), // index 1 (1-based) - pair(":method", "GET"), - pair(":method", "POST"), - pair(":path", "/"), - pair(":path", "/index.html"), - pair(":scheme", "http"), - pair(":scheme", "https"), - pair(":status", "200"), - pair(":status", "204"), - pair(":status", "206"), - pair(":status", "304"), - pair(":status", "400"), - pair(":status", "404"), - pair(":status", "500"), - pair("accept-charset", ""), - pair("accept-encoding", "gzip, deflate"), - pair("accept-language", ""), - pair("accept-ranges", ""), - pair("accept", ""), - pair("access-control-allow-origin", ""), - pair("age", ""), - pair("allow", ""), - pair("authorization", ""), - pair("cache-control", ""), - pair("content-disposition", ""), - pair("content-encoding", ""), - pair("content-language", ""), - pair("content-length", ""), - pair("content-location", ""), - pair("content-range", ""), - pair("content-type", ""), - pair("cookie", ""), - pair("date", ""), - pair("etag", ""), - pair("expect", ""), - pair("expires", ""), - pair("from", ""), - pair("host", ""), - pair("if-match", ""), - pair("if-modified-since", ""), - pair("if-none-match", ""), - pair("if-range", ""), - pair("if-unmodified-since", ""), - pair("last-modified", ""), - pair("link", ""), - pair("location", ""), - pair("max-forwards", ""), - pair("proxy-authenticate", ""), - pair("proxy-authorization", ""), - pair("range", ""), - pair("referer", ""), - pair("refresh", ""), - pair("retry-after", ""), - pair("server", ""), - pair("set-cookie", ""), - pair("strict-transport-security", ""), - pair("transfer-encoding", ""), - pair("user-agent", ""), - pair("vary", ""), - pair("via", ""), - pair("www-authenticate", ""), +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t } var huffmanCodes = [256]uint32{ diff --git a/fn/vendor/golang.org/x/net/http2/hpack/tables_test.go b/fn/vendor/golang.org/x/net/http2/hpack/tables_test.go new file mode 100644 index 000000000..d963f3635 --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/hpack/tables_test.go @@ -0,0 +1,214 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bufio" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestHeaderFieldTable(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // Tests will be run twice: once before evicting anything, and + // again after evicting the three oldest entries. + tests := []struct { + f HeaderField + beforeWantStaticI uint64 + beforeWantMatch bool + afterWantStaticI uint64 + afterWantMatch bool + }{ + {HeaderField{"key1", "value1-1", false}, 1, true, 0, false}, + {HeaderField{"key1", "value1-2", false}, 3, true, 0, false}, + {HeaderField{"key1", "value1-3", false}, 3, false, 0, false}, + {HeaderField{"key2", "value2-1", false}, 2, true, 3, false}, + {HeaderField{"key2", "value2-2", false}, 6, true, 3, true}, + {HeaderField{"key2", "value2-3", false}, 6, false, 3, false}, + {HeaderField{"key4", "value4-1", false}, 5, true, 2, true}, + // Name match only, because sensitive. + {HeaderField{"key4", "value4-1", true}, 5, false, 2, false}, + // Key not found. + {HeaderField{"key5", "value5-x", false}, 0, false, 0, false}, + } + + staticToDynamic := func(i uint64) uint64 { + if i == 0 { + return 0 + } + return uint64(table.len()) - i + 1 // dynamic is the reversed table + } + + searchStatic := func(f HeaderField) (uint64, bool) { + old := staticTable + staticTable = table + defer func() { staticTable = old }() + return staticTable.search(f) + } + + searchDynamic := func(f HeaderField) (uint64, bool) { + return table.search(f) + } + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.beforeWantStaticI) + if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } + + table.evictOldest(3) + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.afterWantStaticI) + if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } +} + +func TestHeaderFieldTable_LookupMapEviction(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // evict all pairs + table.evictOldest(table.len()) + + if l := table.len(); l > 0 { + t.Errorf("table.len() = %d, want 0", l) + } + + if l := len(table.byName); l > 0 { + t.Errorf("len(table.byName) = %d, want 0", l) + } + + if l := len(table.byNameValue); l > 0 { + t.Errorf("len(table.byNameValue) = %d, want 0", l) + } +} + +func TestStaticTable(t *testing.T) { + fromSpec := ` + +-------+-----------------------------+---------------+ + | 1 | :authority | | + | 2 | :method | GET | + | 3 | :method | POST | + | 4 | :path | / | + | 5 | :path | /index.html | + | 6 | :scheme | http | + | 7 | :scheme | https | + | 8 | :status | 200 | + | 9 | :status | 204 | + | 10 | :status | 206 | + | 11 | :status | 304 | + | 12 | :status | 400 | + | 13 | :status | 404 | + | 14 | :status | 500 | + | 15 | accept-charset | | + | 16 | accept-encoding | gzip, deflate | + | 17 | accept-language | | + | 18 | accept-ranges | | + | 19 | accept | | + | 20 | access-control-allow-origin | | + | 21 | age | | + | 22 | allow | | + | 23 | authorization | | + | 24 | cache-control | | + | 25 | content-disposition | | + | 26 | content-encoding | | + | 27 | content-language | | + | 28 | content-length | | + | 29 | content-location | | + | 30 | content-range | | + | 31 | content-type | | + | 32 | cookie | | + | 33 | date | | + | 34 | etag | | + | 35 | expect | | + | 36 | expires | | + | 37 | from | | + | 38 | host | | + | 39 | if-match | | + | 40 | if-modified-since | | + | 41 | if-none-match | | + | 42 | if-range | | + | 43 | if-unmodified-since | | + | 44 | last-modified | | + | 45 | link | | + | 46 | location | | + | 47 | max-forwards | | + | 48 | proxy-authenticate | | + | 49 | proxy-authorization | | + | 50 | range | | + | 51 | referer | | + | 52 | refresh | | + | 53 | retry-after | | + | 54 | server | | + | 55 | set-cookie | | + | 56 | strict-transport-security | | + | 57 | transfer-encoding | | + | 58 | user-agent | | + | 59 | vary | | + | 60 | via | | + | 61 | www-authenticate | | + +-------+-----------------------------+---------------+ +` + bs := bufio.NewScanner(strings.NewReader(fromSpec)) + re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) + for bs.Scan() { + l := bs.Text() + if !strings.Contains(l, "|") { + continue + } + m := re.FindStringSubmatch(l) + if m == nil { + continue + } + i, err := strconv.Atoi(m[1]) + if err != nil { + t.Errorf("Bogus integer on line %q", l) + continue + } + if i < 1 || i > staticTable.len() { + t.Errorf("Bogus index %d on line %q", i, l) + continue + } + if got, want := staticTable.ents[i-1].Name, m[2]; got != want { + t.Errorf("header index %d name = %q; want %q", i, got, want) + } + if got, want := staticTable.ents[i-1].Value, m[3]; got != want { + t.Errorf("header index %d value = %q; want %q", i, got, want) + } + } + if err := bs.Err(); err != nil { + t.Error(err) + } +} diff --git a/fn/vendor/golang.org/x/net/http2/http2.go b/fn/vendor/golang.org/x/net/http2/http2.go index 3ed51a44e..d565f40e0 100644 --- a/fn/vendor/golang.org/x/net/http2/http2.go +++ b/fn/vendor/golang.org/x/net/http2/http2.go @@ -13,24 +13,30 @@ // See https://http2.github.io/ for more information on HTTP/2. // // See https://http2.golang.org/ for a test server running this code. -package http2 +// +package http2 // import "golang.org/x/net/http2" import ( "bufio" + "crypto/tls" "errors" "fmt" "io" "net/http" "os" + "sort" "strconv" "strings" "sync" + + "golang.org/x/net/lex/httplex" ) var ( VerboseLogs bool logFrameWrites bool logFrameReads bool + inTests bool ) func init() { @@ -72,13 +78,23 @@ var ( type streamState int +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. const ( stateIdle streamState = iota stateOpen stateHalfClosedLocal stateHalfClosedRemote - stateResvLocal - stateResvRemote stateClosed ) @@ -87,8 +103,6 @@ var stateName = [...]string{ stateOpen: "Open", stateHalfClosedLocal: "HalfClosedLocal", stateHalfClosedRemote: "HalfClosedRemote", - stateResvLocal: "ResvLocal", - stateResvRemote: "ResvRemote", stateClosed: "Closed", } @@ -164,57 +178,23 @@ var ( errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") ) -// validHeaderFieldName reports whether v is a valid header field name (key). -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / " +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httplex.ValidHeaderName for the base rules. +// // Further, http2 says: // "Just as in HTTP/1.x, header field names are strings of ASCII // characters that are compared in a case-insensitive // fashion. However, header field names MUST be converted to // lowercase prior to their encoding in HTTP/2. " -func validHeaderFieldName(v string) bool { +func validWireHeaderFieldName(v string) bool { if len(v) == 0 { return false } for _, r := range v { - if int(r) >= len(isTokenTable) || ('A' <= r && r <= 'Z') { + if !httplex.IsTokenRune(r) { return false } - if !isTokenTable[byte(r)] { - return false - } - } - return true -} - -// validHeaderFieldValue reports whether v is a valid header field value. -// -// RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" -// -// http2 further says: "Similarly, HTTP/2 allows header field values -// that are not valid. While most of the values that can be encoded -// will not alter header field parsing, carriage return (CR, ASCII -// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII -// 0x0) might be exploited by an attacker if they are translated -// verbatim. Any request or response that contains a character not -// permitted in a header field value MUST be treated as malformed -// (Section 8.1.2.6). Valid characters are defined by the -// field-content ABNF rule in Section 3.2 of [RFC7230]." -// -// This function does not (yet?) properly handle the rejection of -// strings that begin or end with SP or HTAB. -func validHeaderFieldValue(v string) bool { - for i := 0; i < len(v); i++ { - if b := v[i]; b < ' ' && b != '\t' || b == 0x7f { + if 'A' <= r && r <= 'Z' { return false } } @@ -282,14 +262,27 @@ func newBufferedWriter(w io.Writer) *bufferedWriter { return &bufferedWriter{w: w} } +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + var bufWriterPool = sync.Pool{ New: func() interface{} { - // TODO: pick something better? this is a bit under - // (3 x typical 1500 byte MTU) at least. - return bufio.NewWriterSize(nil, 4<<10) + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) }, } +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) @@ -319,7 +312,7 @@ func mustUint31(v int32) uint32 { } // bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC2616, section 4.4. +// permits a body. See RFC 2616, section 4.4. func bodyAllowedForStatus(status int) bool { switch { case status >= 100 && status <= 199: @@ -343,82 +336,56 @@ func (e *httpError) Temporary() bool { return true } var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} -var isTokenTable = [127]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" } diff --git a/fn/vendor/golang.org/x/net/http2/http2_test.go b/fn/vendor/golang.org/x/net/http2/http2_test.go index 0a4da46a0..524877647 100644 --- a/fn/vendor/golang.org/x/net/http2/http2_test.go +++ b/fn/vendor/golang.org/x/net/http2/http2_test.go @@ -27,8 +27,9 @@ func condSkipFailingTest(t *testing.T) { } func init() { + inTests = true DebugGoroutines = true - flag.BoolVar(&VerboseLogs, "verboseh2", false, "Verbose HTTP/2 debug logging") + flag.BoolVar(&VerboseLogs, "verboseh2", VerboseLogs, "Verbose HTTP/2 debug logging") } func TestSettingString(t *testing.T) { @@ -65,7 +66,7 @@ func (w twriter) Write(p []byte) (n int, err error) { return len(p), nil } -// like encodeHeader, but don't add implicit psuedo headers. +// like encodeHeader, but don't add implicit pseudo headers. func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte { var buf bytes.Buffer enc := hpack.NewEncoder(&buf) @@ -172,3 +173,27 @@ func cleanDate(res *http.Response) { d[0] = "XXX" } } + +func TestSorterPoolAllocs(t *testing.T) { + ss := []string{"a", "b", "c"} + h := http.Header{ + "a": nil, + "b": nil, + "c": nil, + } + sorter := new(sorter) + + if allocs := testing.AllocsPerRun(100, func() { + sorter.SortStrings(ss) + }); allocs >= 1 { + t.Logf("SortStrings allocs = %v; want <1", allocs) + } + + if allocs := testing.AllocsPerRun(5, func() { + if len(sorter.Keys(h)) != 3 { + t.Fatal("wrong result") + } + }); allocs > 0 { + t.Logf("Keys allocs = %v; want <1", allocs) + } +} diff --git a/fn/vendor/golang.org/x/net/http2/not_go16.go b/fn/vendor/golang.org/x/net/http2/not_go16.go index db53c5b8c..508cebcc4 100644 --- a/fn/vendor/golang.org/x/net/http2/not_go16.go +++ b/fn/vendor/golang.org/x/net/http2/not_go16.go @@ -6,8 +6,16 @@ package http2 -import "net/http" +import ( + "net/http" + "time" +) func configureTransport(t1 *http.Transport) (*Transport, error) { return nil, errTransportVersion } + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return 0 + +} diff --git a/fn/vendor/golang.org/x/net/http2/not_go17.go b/fn/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 000000000..140434a79 --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/not_go17.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package http2 + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +type contextContext interface { + Done() <-chan struct{} + Err() error +} + +type fakeContext struct{} + +func (fakeContext) Done() <-chan struct{} { return nil } +func (fakeContext) Err() error { panic("should not be called") } + +func reqContext(r *http.Request) fakeContext { + return fakeContext{} +} + +func setResponseUncompressed(res *http.Response) { + // Nothing. +} + +type clientTrace struct{} + +func requestTrace(*http.Request) *clientTrace { return nil } +func traceGotConn(*http.Request, *ClientConn) {} +func traceFirstResponseByte(*clientTrace) {} +func traceWroteHeaders(*clientTrace) {} +func traceWroteRequest(*clientTrace, error) {} +func traceGot100Continue(trace *clientTrace) {} +func traceWait100Continue(trace *clientTrace) {} + +func nop() {} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + return nil, nop +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return ctx, nop +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req +} + +// temporary copy of Go 1.6's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} + +func (cc *ClientConn) Ping(ctx contextContext) error { + return cc.ping(ctx) +} + +func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/fn/vendor/golang.org/x/net/http2/not_go18.go b/fn/vendor/golang.org/x/net/http2/not_go18.go new file mode 100644 index 000000000..6f8d3f86f --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/not_go18.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package http2 + +import ( + "io" + "net/http" +) + +func configureServer18(h1 *http.Server, h2 *Server) error { + // No IdleTimeout to sync prior to Go 1.8. + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return nil +} + +func reqBodyIsNoBody(io.ReadCloser) bool { return false } + +func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/fn/vendor/golang.org/x/net/http2/not_go19.go b/fn/vendor/golang.org/x/net/http2/not_go19.go new file mode 100644 index 000000000..5ae07726b --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/not_go19.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + // not supported prior to go1.9 + return nil +} diff --git a/fn/vendor/golang.org/x/net/http2/pipe.go b/fn/vendor/golang.org/x/net/http2/pipe.go index 69446e7a3..a6140099c 100644 --- a/fn/vendor/golang.org/x/net/http2/pipe.go +++ b/fn/vendor/golang.org/x/net/http2/pipe.go @@ -10,13 +10,13 @@ import ( "sync" ) -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like // io.Pipe except there are no PipeReader/PipeWriter halves, and the // underlying buffer is an interface. (io.Pipe is always unbuffered) type pipe struct { mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading err error // read error once empty. non-nil means closed. breakErr error // immediate read error (caller doesn't see rest of b) donec chan struct{} // closed on error @@ -29,6 +29,15 @@ type pipeBuffer interface { io.Reader } +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return 0 + } + return p.b.Len() +} + // Read waits until data is available and copies bytes // from the buffer into p. func (p *pipe) Read(d []byte) (n int, err error) { @@ -41,7 +50,7 @@ func (p *pipe) Read(d []byte) (n int, err error) { if p.breakErr != nil { return 0, p.breakErr } - if p.b.Len() > 0 { + if p.b != nil && p.b.Len() > 0 { return p.b.Read(d) } if p.err != nil { @@ -49,6 +58,7 @@ func (p *pipe) Read(d []byte) (n int, err error) { p.readFn() // e.g. copy trailers p.readFn = nil // not sticky like p.err } + p.b = nil return 0, p.err } p.c.Wait() @@ -69,6 +79,9 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil { return 0, errClosedPipeWrite } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } return p.b.Write(d) } @@ -103,6 +116,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) { return } p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } *dst = err p.closeDoneLocked() } diff --git a/fn/vendor/golang.org/x/net/http2/pipe_test.go b/fn/vendor/golang.org/x/net/http2/pipe_test.go index 763229999..1bf351ff6 100644 --- a/fn/vendor/golang.org/x/net/http2/pipe_test.go +++ b/fn/vendor/golang.org/x/net/http2/pipe_test.go @@ -92,6 +92,13 @@ func TestPipeCloseWithError(t *testing.T) { if err != a { t.Logf("read error = %v, %v", err, a) } + // Read and Write should fail. + if n, err := p.Write([]byte("abc")); err != errClosedPipeWrite || n != 0 { + t.Errorf("Write(abc) after close\ngot %v, %v\nwant 0, %v", n, err, errClosedPipeWrite) + } + if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { + t.Errorf("Read() after close\ngot %v, nil\nwant 0, %v", n, errClosedPipeWrite) + } } func TestPipeBreakWithError(t *testing.T) { @@ -106,4 +113,18 @@ func TestPipeBreakWithError(t *testing.T) { if err != a { t.Logf("read error = %v, %v", err, a) } + if p.b != nil { + t.Errorf("buffer should be nil after BreakWithError") + } + // Write should succeed silently. + if n, err := p.Write([]byte("abc")); err != nil || n != 3 { + t.Errorf("Write(abc) after break\ngot %v, %v\nwant 0, nil", n, err) + } + if p.b != nil { + t.Errorf("buffer should be nil after Write") + } + // Read should fail. + if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { + t.Errorf("Read() after close\ngot %v, nil\nwant 0, not nil", n) + } } diff --git a/fn/vendor/golang.org/x/net/http2/priority_test.go b/fn/vendor/golang.org/x/net/http2/priority_test.go deleted file mode 100644 index a3fe2bb49..000000000 --- a/fn/vendor/golang.org/x/net/http2/priority_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "testing" -) - -func TestPriority(t *testing.T) { - // A -> B - // move A's parent to B - streams := make(map[uint32]*stream) - a := &stream{ - parent: nil, - weight: 16, - } - streams[1] = a - b := &stream{ - parent: a, - weight: 16, - } - streams[2] = b - adjustStreamPriority(streams, 1, PriorityParam{ - Weight: 20, - StreamDep: 2, - }) - if a.parent != b { - t.Errorf("Expected A's parent to be B") - } - if a.weight != 20 { - t.Errorf("Expected A's weight to be 20; got %d", a.weight) - } - if b.parent != nil { - t.Errorf("Expected B to have no parent") - } - if b.weight != 16 { - t.Errorf("Expected B's weight to be 16; got %d", b.weight) - } -} - -func TestPriorityExclusiveZero(t *testing.T) { - // A B and C are all children of the 0 stream. - // Exclusive reprioritization to any of the streams - // should bring the rest of the streams under the - // reprioritized stream - streams := make(map[uint32]*stream) - a := &stream{ - parent: nil, - weight: 16, - } - streams[1] = a - b := &stream{ - parent: nil, - weight: 16, - } - streams[2] = b - c := &stream{ - parent: nil, - weight: 16, - } - streams[3] = c - adjustStreamPriority(streams, 3, PriorityParam{ - Weight: 20, - StreamDep: 0, - Exclusive: true, - }) - if a.parent != c { - t.Errorf("Expected A's parent to be C") - } - if a.weight != 16 { - t.Errorf("Expected A's weight to be 16; got %d", a.weight) - } - if b.parent != c { - t.Errorf("Expected B's parent to be C") - } - if b.weight != 16 { - t.Errorf("Expected B's weight to be 16; got %d", b.weight) - } - if c.parent != nil { - t.Errorf("Expected C to have no parent") - } - if c.weight != 20 { - t.Errorf("Expected C's weight to be 20; got %d", b.weight) - } -} - -func TestPriorityOwnParent(t *testing.T) { - streams := make(map[uint32]*stream) - a := &stream{ - parent: nil, - weight: 16, - } - streams[1] = a - b := &stream{ - parent: a, - weight: 16, - } - streams[2] = b - adjustStreamPriority(streams, 1, PriorityParam{ - Weight: 20, - StreamDep: 1, - }) - if a.parent != nil { - t.Errorf("Expected A's parent to be nil") - } - if a.weight != 20 { - t.Errorf("Expected A's weight to be 20; got %d", a.weight) - } - if b.parent != a { - t.Errorf("Expected B's parent to be A") - } - if b.weight != 16 { - t.Errorf("Expected B's weight to be 16; got %d", b.weight) - } - -} diff --git a/fn/vendor/golang.org/x/net/http2/server.go b/fn/vendor/golang.org/x/net/http2/server.go index ff01dc97f..eae143ddf 100644 --- a/fn/vendor/golang.org/x/net/http2/server.go +++ b/fn/vendor/golang.org/x/net/http2/server.go @@ -2,17 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TODO: replace all <-sc.doneServing with reads from the stream's cw -// instead, and make sure that on close we close all open -// streams. then remove doneServing? - -// TODO: re-audit GOAWAY support. Consider each incoming frame type and -// whether it should be ignored during graceful shutdown. - -// TODO: disconnect idle clients. GFE seems to do 4 minutes. make -// configurable? or maximum number of idle clients and remove the -// oldest? - // TODO: turn off the serve goroutine when idle, so // an idle conn only has the readFrames goroutine active. (which could // also be optimized probably to pin less memory in crypto/tls). This @@ -44,10 +33,13 @@ import ( "fmt" "io" "log" + "math" "net" "net/http" "net/textproto" "net/url" + "os" + "reflect" "runtime" "strconv" "strings" @@ -112,6 +104,47 @@ type Server struct { // PermitProhibitedCipherSuites, if true, permits the use of // cipher suites prohibited by the HTTP/2 spec. PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 } func (s *Server) maxReadFrameSize() uint32 { @@ -128,15 +161,59 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. // // ConfigureServer must be called before s begins serving. func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } if conf == nil { conf = new(Server) } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if err := configureServer18(s, conf); err != nil { + return err + } + if err := configureServer19(s, conf); err != nil { + return err + } if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) @@ -181,9 +258,6 @@ func ConfigureServer(s *http.Server, conf *Server) error { if !haveNPN { s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) } - // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers - // to switch to "h2". - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14") if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} @@ -192,47 +266,120 @@ func ConfigureServer(s *http.Server, conf *Server) error { if testHookOnConn != nil { testHookOnConn() } - conf.handleConn(hs, c, h) + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) } s.TLSNextProto[NextProtoTLS] = protoHandler - s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. return nil } -func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { - sc := &serverConn{ - srv: srv, - hs: hs, - conn: c, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: h, - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan frameWriteMsg, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - advMaxStreams: srv.maxConcurrentStreams(), - writeSched: writeScheduler{ - maxFrameSize: initialMaxFrameSize, - }, - initialWindowSize: initialWindowSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, nil) - sc.hpackDecoder.SetMaxStringLength(sc.maxHeaderStringLen()) fr := NewFramer(sc.bw, c) - fr.SetMaxReadFrameSize(srv.maxReadFrameSize()) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr - if tc, ok := c.(*tls.Conn); ok { + if tc, ok := c.(connectionStater); ok { sc.tlsState = new(tls.ConnectionState) *sc.tlsState = tc.ConnectionState() // 9.2 Use of TLS Features @@ -262,7 +409,7 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { // So for now, do nothing here again. } - if !srv.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -284,30 +431,6 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { sc.serve() } -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} - func (sc *serverConn) rejectConn(err ErrCode, debug string) { sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) // ignoring errors. hanging up anyway. @@ -323,57 +446,54 @@ type serverConn struct { conn net.Conn bw *bufferedWriter // writing to conn handler http.Handler + baseCtx contextContext framer *Framer - hpackDecoder *hpack.Decoder - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan readFrameResult // written by serverConn.readFrames - wantWriteFrameCh chan frameWriteMsg // from handlers -> serve - wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - testHookCh chan func(int) // code to run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string + writeSched WriteScheduler // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curOpenStreams uint32 // client's number of open streams - maxStreamID uint32 // max ever seen - streams map[uint32]*stream - initialWindowSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - req requestParam // non-zero while reading request headers - writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - writeSched writeScheduler - inGoAway bool // we've started to or sent GOAWAY - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimerCh <-chan time.Time // nil until used - shutdownTimer *time.Timer // nil until used + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder -} -func (sc *serverConn) maxHeaderStringLen() int { - v := sc.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + // Used by startGracefulShutdown. + shutdownOnce sync.Once } func (sc *serverConn) maxHeaderListSize() uint32 { @@ -388,19 +508,9 @@ func (sc *serverConn) maxHeaderListSize() uint32 { return uint32(n + typicalHeaders*perFieldOverhead) } -// requestParam is the state of the next request, initialized over -// potentially several frames HEADERS + zero or more CONTINUATION -// frames. -type requestParam struct { - // stream is non-nil if we're reading (HEADER or CONTINUATION) - // frames for a request (but not DATA). - stream *stream - header http.Header - method, path string - scheme, authority string - sawRegularHeader bool // saw a non-pseudo header already - invalidHeader bool // an invalid header was seen - headerListSize int64 // actually uint32, but easier math this way +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams } // stream represents a stream. This is the minimal metadata needed by @@ -412,10 +522,12 @@ type requestParam struct { // responseWriter's state field. type stream struct { // immutable: - sc *serverConn - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx contextContext + cancelCtx func() // owned by serverConn's serve loop: bodyBytes int64 // body bytes seen so far @@ -426,9 +538,10 @@ type stream struct { numTrailerValues int64 weight uint8 state streamState - sentReset bool // only true once detached from streams map - gotReset bool // only true once detacted from streams map - gotTrailerHeader bool // HEADER frame for trailers was seen + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -443,7 +556,7 @@ func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { func (sc *serverConn) state(streamID uint32) (streamState, *stream) { sc.serveG.check() - // http://http2.github.io/http2-spec/#rfc.section.5.1 + // http://tools.ietf.org/html/rfc7540#section-5.1 if st, ok := sc.streams[streamID]; ok { return st.state, st } @@ -453,8 +566,14 @@ func (sc *serverConn) state(streamID uint32) (streamState, *stream) { // a client sends a HEADERS frame on stream 7 without ever sending a // frame on stream 5, then stream 5 transitions to the "closed" // state when the first frame for stream 7 is sent or received." - if streamID <= sc.maxStreamID { - return stateClosed, nil + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } } return stateIdle, nil } @@ -482,12 +601,55 @@ func (sc *serverConn) logf(format string, args ...interface{}) { } } +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { if err == nil { return } - str := err.Error() - if err == io.EOF || strings.Contains(str, "use of closed network connection") { + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { // Boring, expected errors. sc.vlogf(format, args...) } else { @@ -495,87 +657,6 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { } } -func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) { - sc.serveG.check() - if VerboseLogs { - sc.vlogf("http2: server decoded %v", f) - } - switch { - case !validHeaderFieldValue(f.Value): // f.Name checked _after_ pseudo check, since ':' is invalid - sc.req.invalidHeader = true - case strings.HasPrefix(f.Name, ":"): - if sc.req.sawRegularHeader { - sc.logf("pseudo-header after regular header") - sc.req.invalidHeader = true - return - } - var dst *string - switch f.Name { - case ":method": - dst = &sc.req.method - case ":path": - dst = &sc.req.path - case ":scheme": - dst = &sc.req.scheme - case ":authority": - dst = &sc.req.authority - default: - // 8.1.2.1 Pseudo-Header Fields - // "Endpoints MUST treat a request or response - // that contains undefined or invalid - // pseudo-header fields as malformed (Section - // 8.1.2.6)." - sc.logf("invalid pseudo-header %q", f.Name) - sc.req.invalidHeader = true - return - } - if *dst != "" { - sc.logf("duplicate pseudo-header %q sent", f.Name) - sc.req.invalidHeader = true - return - } - *dst = f.Value - case !validHeaderFieldName(f.Name): - sc.req.invalidHeader = true - default: - sc.req.sawRegularHeader = true - sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value) - const headerFieldOverhead = 32 // per spec - sc.req.headerListSize += int64(len(f.Name)) + int64(len(f.Value)) + headerFieldOverhead - if sc.req.headerListSize > int64(sc.maxHeaderListSize()) { - sc.hpackDecoder.SetEmitEnabled(false) - } - } -} - -func (st *stream) onNewTrailerField(f hpack.HeaderField) { - sc := st.sc - sc.serveG.check() - if VerboseLogs { - sc.vlogf("http2: server decoded trailer %v", f) - } - switch { - case strings.HasPrefix(f.Name, ":"): - sc.req.invalidHeader = true - return - case !validHeaderFieldName(f.Name) || !validHeaderFieldValue(f.Value): - sc.req.invalidHeader = true - return - default: - key := sc.canonicalHeader(f.Name) - if st.trailer != nil { - vv := append(st.trailer[key], f.Value) - st.trailer[key] = vv - - // arbitrary; TODO: read spec about header list size limits wrt trailers - const tooBig = 1000 - if len(vv) >= tooBig { - sc.hpackDecoder.SetEmitEnabled(false) - } - } - } -} - func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() cv, ok := commonCanonHeader[v] @@ -610,10 +691,11 @@ type readFrameResult struct { // It's run on its own goroutine. func (sc *serverConn) readFrames() { gate := make(gate) + gateDone := gate.Done for { f, err := sc.framer.ReadFrame() select { - case sc.readFrameCh <- readFrameResult{f, err, gate.Done}: + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: case <-sc.doneServing: return } @@ -630,17 +712,17 @@ func (sc *serverConn) readFrames() { // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. type frameWriteResult struct { - wm frameWriteMsg // what was written (or attempted) - err error // result of the writeFrame call + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call } // writeFrameAsync runs in its own goroutine and writes a single frame // and then reports when it's done. // At most one goroutine can be running writeFrameAsync at a time per // serverConn. -func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) { - err := wm.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wm, err} +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} } func (sc *serverConn) closeAllStreamsOnConnClose() { @@ -684,40 +766,53 @@ func (sc *serverConn) serve() { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } - sc.writeFrame(frameWriteMsg{ + sc.writeFrame(FrameWriteRequest{ write: writeSettings{ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - - // TODO: more actual settings, notably - // SettingInitialWindowSize, but then we also - // want to bump up the conn window size the - // same amount here right after the settings + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) sc.unackedSettings++ + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + if err := sc.readPreface(); err != nil { sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return } // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. + // "StateNew" state. We can't go directly to idle, though. // Active means we read some data and anticipate a request. We'll // do another Active when we get a HEADERS frame. sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.NewTimer(firstSettingsTimeout) + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + loopNum := 0 for { loopNum++ select { - case wm := <-sc.wantWriteFrameCh: - sc.writeFrame(wm) + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: @@ -725,21 +820,72 @@ func (sc *serverConn) serve() { return } res.readMore() - if settingsTimer.C != nil { + if settingsTimer != nil { settingsTimer.Stop() - settingsTimer.C = nil + settingsTimer = nil } case m := <-sc.bodyReadCh: sc.noteBodyRead(m.st, m.n) - case <-settingsTimer.C: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case <-sc.shutdownTimerCh: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case fn := <-sc.testHookCh: - fn(loopNum) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } } + + if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { + return + } + } +} + +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: } } @@ -787,7 +933,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea ch := errChanPool.Get().(chan error) writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} - err := sc.writeFrameFromHandler(frameWriteMsg{ + err := sc.writeFrameFromHandler(FrameWriteRequest{ write: writeArg, stream: stream, done: ch, @@ -823,17 +969,17 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea return err } -// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts // if the connection has gone away. // // This must not be run from the serve goroutine itself, else it might // deadlock writing to sc.wantWriteFrameCh (which is only mildly // buffered and is read by serve itself). If you're on the serve // goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { sc.serveG.checkNotOn() // NOT select { - case sc.wantWriteFrameCh <- wm: + case sc.wantWriteFrameCh <- wr: return nil case <-sc.doneServing: // Serve loop is gone. @@ -850,39 +996,103 @@ func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { // make it onto the wire // // If you're not on the serve goroutine, use writeFrameFromHandler instead. -func (sc *serverConn) writeFrame(wm frameWriteMsg) { +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { sc.serveG.check() - sc.writeSched.add(wm) + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.Push(wr) + } sc.scheduleFrameWrite() } -// startFrameWrite starts a goroutine to write wm (in a separate +// startFrameWrite starts a goroutine to write wr (in a separate // goroutine since that might block on the network), and updates the -// serve goroutine's state about the world, updated from info in wm. -func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { sc.serveG.check() if sc.writingFrame { panic("internal error: can only be writing one frame at a time") } - st := wm.stream + st := wr.stream if st != nil { switch st.state { case stateHalfClosedLocal: - panic("internal error: attempt to send frame on half-closed-local stream") - case stateClosed: - if st.sentReset || st.gotReset { - // Skip this frame. - sc.scheduleFrameWrite() - return + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) } - panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm)) + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return } } sc.writingFrame = true sc.needsFrameFlush = true - go sc.writeFrameAsync(wm) + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } } // errHandlerPanicked is the error given to any callers blocked in a read from @@ -898,27 +1108,12 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { panic("internal error: expected to be already writing a frame") } sc.writingFrame = false + sc.writingFrameAsync = false - wm := res.wm - st := wm.stream + wr := res.wr - closeStream := endsStream(wm.write) - - if _, ok := wm.write.(handlerPanicRST); ok { - sc.closeStream(st, errHandlerPanicked) - } - - // Reply (if requested) to the blocked ServeHTTP goroutine. - if ch := wm.done; ch != nil { - select { - case ch <- res.err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) - } - } - wm.write = nil // prevent use (assume it's tainted after wm.done send) - - if closeStream { + if writeEndsStream(wr.write) { + st := wr.stream if st == nil { panic("internal error: expecting non-nil stream") } @@ -927,19 +1122,37 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { // Here we would go to stateHalfClosedLocal in // theory, but since our handler is done and // the net/http package provides no mechanism - // for finishing writing to a ResponseWriter - // while still reading data (see possible TODO - // at top of this file), we go into closed - // state here anyway, after telling the peer - // we're hanging up on them. - st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream - errCancel := StreamError{st.id, ErrCodeCancel} - sc.resetStream(errCancel) + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) case stateHalfClosedRemote: sc.closeStream(st, errHandlerComplete) } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } } + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + sc.scheduleFrameWrite() } @@ -957,47 +1170,77 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { // flush the write buffer. func (sc *serverConn) scheduleFrameWrite() { sc.serveG.check() - if sc.writingFrame { + if sc.writingFrame || sc.inFrameScheduleLoop { return } - if sc.needToSendGoAway { - sc.needToSendGoAway = false - sc.startFrameWrite(frameWriteMsg{ - write: &writeGoAway{ - maxStreamID: sc.maxStreamID, - code: sc.goAwayCode, - }, - }) - return - } - if sc.needToSendSettingsAck { - sc.needToSendSettingsAck = false - sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}}) - return - } - if !sc.inGoAway { - if wm, ok := sc.writeSched.take(); ok { - sc.startFrameWrite(wm) - return + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break } - if sc.needsFrameFlush { - sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}}) - sc.needsFrameFlush = false // after startFrameWrite, since it sets this true - return - } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAwayIn(ErrCodeNo, 0) } func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + var forceCloseIn time.Duration + if code != ErrCodeNo { + forceCloseIn = 250 * time.Millisecond + } else { + // TODO: configurable + forceCloseIn = 1 * time.Second + } + sc.goAwayIn(code, forceCloseIn) +} + +func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { sc.serveG.check() if sc.inGoAway { return } - if code != ErrCodeNo { - sc.shutDownIn(250 * time.Millisecond) - } else { - // TODO: configurable - sc.shutDownIn(1 * time.Second) + if forceCloseIn != 0 { + sc.shutDownIn(forceCloseIn) } sc.inGoAway = true sc.needToSendGoAway = true @@ -1007,16 +1250,14 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.NewTimer(d) - sc.shutdownTimerCh = sc.shutdownTimer.C + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { sc.serveG.check() - sc.writeFrame(frameWriteMsg{write: se}) + sc.writeFrame(FrameWriteRequest{write: se}) if st, ok := sc.streams[se.StreamID]; ok { - st.sentReset = true - sc.closeStream(st, se) + st.resetQueued = true } } @@ -1031,7 +1272,7 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFrameSize) return true // goAway will close the loop } - clientGone := err == io.EOF || strings.Contains(err.Error(), "use of closed network connection") + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) if clientGone { // TODO: could we also get into this state if // the peer does a half close @@ -1067,7 +1308,7 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { return true // goAway will handle shutdown default: if res.err != nil { - sc.logf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) } else { sc.logf("http2: server closing client connection: %v", err) } @@ -1089,10 +1330,8 @@ func (sc *serverConn) processFrame(f Frame) error { switch f := f.(type) { case *SettingsFrame: return sc.processSettings(f) - case *HeadersFrame: + case *MetaHeadersFrame: return sc.processHeaders(f) - case *ContinuationFrame: - return sc.processContinuation(f) case *WindowUpdateFrame: return sc.processWindowUpdate(f) case *PingFrame: @@ -1103,6 +1342,8 @@ func (sc *serverConn) processFrame(f Frame) error { return sc.processResetStream(f) case *PriorityFrame: return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) case *PushPromiseFrame: // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. @@ -1128,7 +1369,10 @@ func (sc *serverConn) processPing(f *PingFrame) error { // PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } - sc.writeFrame(frameWriteMsg{write: writePingAck{f}}) + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) return nil } @@ -1136,7 +1380,14 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { sc.serveG.check() switch { case f.StreamID != 0: // stream-level flow control - st := sc.streams[f.StreamID] + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } if st == nil { // "WINDOW_UPDATE can be sent by a peer that has sent a // frame bearing the END_STREAM flag. This means that a @@ -1146,7 +1397,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { return nil } if !st.flow.add(int32(f.Increment)) { - return StreamError{f.StreamID, ErrCodeFlowControl} + return streamError(f.StreamID, ErrCodeFlowControl) } default: // connection-level flow control if !sc.flow.add(int32(f.Increment)) { @@ -1170,8 +1421,8 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { return ConnectionError(ErrCodeProtocol) } if st != nil { - st.gotReset = true - sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) } return nil } @@ -1182,16 +1433,33 @@ func (sc *serverConn) closeStream(st *stream, err error) { panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) } st.state = stateClosed - sc.curOpenStreams-- - if sc.curOpenStreams == 0 { - sc.setConnState(http.StateIdle) + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- } delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + p.CloseWithError(err) } st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc - sc.writeSched.forgetStream(st.id) + sc.writeSched.CloseStream(st.id) } func (sc *serverConn) processSettings(f *SettingsFrame) error { @@ -1233,7 +1501,7 @@ func (sc *serverConn) processSetting(s Setting) error { case SettingInitialWindowSize: return sc.processSettingInitialWindowSize(s.Val) case SettingMaxFrameSize: - sc.writeSched.maxFrameSize = s.Val + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val default: @@ -1258,9 +1526,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { // adjust the size of all stream flow control windows that it // maintains by the difference between the new value and the // old value." - old := sc.initialWindowSize - sc.initialWindowSize = int32(val) - growth := sc.initialWindowSize - old // may be negative + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative for _, st := range sc.streams { if !st.flow.add(growth) { // 6.9.2 Initial Flow Control Window Size @@ -1277,43 +1545,82 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { func (sc *serverConn) processData(f *DataFrame) error { sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + // "If a DATA frame is received whose stream is not in "open" // or "half closed (local)" state, the recipient MUST respond // with a stream error (Section 5.4.2) of type STREAM_CLOSED." id := f.Header().StreamID - st, ok := sc.streams[id] - if !ok || st.state != stateOpen || st.gotTrailerHeader { + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that // the http.Handler returned, so it's done reading & // done writing). Try to stop the client from sending // more DATA. - return StreamError{id, ErrCodeStreamClosed} + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) } if st.body == nil { panic("internal error: should have a body in this state") } - data := f.Data() // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return StreamError{id, ErrCodeStreamClosed} + return streamError(id, ErrCodeStreamClosed) } - if len(data) > 0 { + if f.Length > 0 { // Check whether the client has flow control quota. - if int(st.inflow.available()) < len(data) { - return StreamError{id, ErrCodeFlowControl} + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) } - st.inflow.take(int32(len(data))) - wrote, err := st.body.Write(data) - if err != nil { - return StreamError{id, ErrCodeStreamClosed} + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) } - if wrote != len(data) { - panic("internal error: bad Writer") + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) } - st.bodyBytes += int64(len(data)) } if f.StreamEnded() { st.endStream() @@ -1321,6 +1628,25 @@ func (sc *serverConn) processData(f *DataFrame) error { return nil } +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + // endStream closes a Request.Body's pipe. It is called when a DATA // frame says a request body is over (or after trailers). func (st *stream) endStream() { @@ -1348,14 +1674,20 @@ func (st *stream) copyTrailersToHandlerRequest() { } } -func (sc *serverConn) processHeaders(f *HeadersFrame) error { +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() - id := f.Header().StreamID + id := f.StreamID if sc.inGoAway { // Ignore. return nil } - // http://http2.github.io/http2-spec/#rfc.section.5.1.1 + // http://tools.ietf.org/html/rfc7540#section-5.1.1 // Streams initiated by a client MUST use odd-numbered stream // identifiers. [...] An endpoint that receives an unexpected // stream identifier MUST respond with a connection error @@ -1367,8 +1699,12 @@ func (sc *serverConn) processHeaders(f *HeadersFrame) error { // send a trailer for an open one. If we already have a stream // open, let it process its own HEADERS frame (trailers at this // point, if it's valid). - st := sc.streams[f.Header().StreamID] - if st != nil { + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } return st.processTrailerHeaders(f) } @@ -1377,100 +1713,48 @@ func (sc *serverConn) processHeaders(f *HeadersFrame) error { // endpoint has opened or reserved. [...] An endpoint that // receives an unexpected stream identifier MUST respond with // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxStreamID || sc.req.stream != nil { + if id <= sc.maxClientStreamID { return ConnectionError(ErrCodeProtocol) } + sc.maxClientStreamID = id - if id > sc.maxStreamID { - sc.maxStreamID = id + if sc.idleTimer != nil { + sc.idleTimer.Stop() } - st = &stream{ - sc: sc, - id: id, - state: stateOpen, - } - if f.StreamEnded() { - st.state = stateHalfClosedRemote - } - st.cw.Init() - st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings - - sc.streams[id] = st - if f.HasPriority() { - adjustStreamPriority(sc.streams, st.id, f.Priority) - } - sc.curOpenStreams++ - if sc.curOpenStreams == 1 { - sc.setConnState(http.StateActive) - } - sc.req = requestParam{ - stream: st, - header: make(http.Header), - } - sc.hpackDecoder.SetEmitFunc(sc.onNewHeaderField) - sc.hpackDecoder.SetEmitEnabled(true) - return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (st *stream) processTrailerHeaders(f *HeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - if !f.StreamEnded() { - return StreamError{st.id, ErrCodeProtocol} - } - sc.resetPendingRequest() // we use invalidHeader from it for trailers - return st.processTrailerHeaderBlockFragment(f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (sc *serverConn) processContinuation(f *ContinuationFrame) error { - sc.serveG.check() - st := sc.streams[f.Header().StreamID] - if st.gotTrailerHeader { - return st.processTrailerHeaderBlockFragment(f.HeaderBlockFragment(), f.HeadersEnded()) - } - return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error { - sc.serveG.check() - if _, err := sc.hpackDecoder.Write(frag); err != nil { - return ConnectionError(ErrCodeCompression) - } - if !end { - return nil - } - if err := sc.hpackDecoder.Close(); err != nil { - return ConnectionError(ErrCodeCompression) - } - defer sc.resetPendingRequest() - if sc.curOpenStreams > sc.advMaxStreams { - // "Endpoints MUST NOT exceed the limit set by their - // peer. An endpoint that receives a HEADERS frame - // that causes their advertised concurrent stream - // limit to be exceeded MUST treat this as a stream - // error (Section 5.4.2) of type PROTOCOL_ERROR or - // REFUSED_STREAM." + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { if sc.unackedSettings == 0 { // They should know better. - return StreamError{st.id, ErrCodeProtocol} + return streamError(id, ErrCodeProtocol) } // Assume it's a network race, where they just haven't // received our last SETTINGS update. But actually // this can't happen yet, because we don't yet provide // a way for users to adjust server parameters at // runtime. - return StreamError{st.id, ErrCodeRefusedStream} + return streamError(id, ErrCodeRefusedStream) } - rw, req, err := sc.newWriterAndRequest() + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) if err != nil { return err } @@ -1482,103 +1766,133 @@ func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bo st.declBodyBytes = req.ContentLength handler := sc.handler.ServeHTTP - if !sc.hpackDecoder.EmitEnabled() { + if f.Truncated { // Their header list was too long. Send a 431 error. handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) } go sc.runHandler(rw, req, handler) return nil } -func (st *stream) processTrailerHeaderBlockFragment(frag []byte, end bool) error { +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() - sc.hpackDecoder.SetEmitFunc(st.onNewTrailerField) - if _, err := sc.hpackDecoder.Write(frag); err != nil { - return ConnectionError(ErrCodeCompression) + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) } - if !end { - return nil + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) } - rp := &sc.req - if rp.invalidHeader { - return StreamError{rp.stream.id, ErrCodeProtocol} + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } } - - err := sc.hpackDecoder.Close() st.endStream() - if err != nil { - return ConnectionError(ErrCodeCompression) + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) } return nil } func (sc *serverConn) processPriority(f *PriorityFrame) error { - adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam) + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) return nil } -func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) { - st, ok := streams[streamID] - if !ok { - // TODO: not quite correct (this streamID might - // already exist in the dep tree, but be closed), but - // close enough for now. - return - } - st.weight = priority.Weight - parent := streams[priority.StreamDep] // might be nil - if parent == st { - // if client tries to set this stream to be the parent of itself - // ignore and keep going - return +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") } - // section 5.3.3: If a stream is made dependent on one of its - // own dependencies, the formerly dependent stream is first - // moved to be dependent on the reprioritized stream's previous - // parent. The moved dependency retains its weight. - for piter := parent; piter != nil; piter = piter.parent { - if piter == st { - parent.parent = st.parent - break - } + ctx, cancelCtx := contextWithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, } - st.parent = parent - if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) { - for _, openStream := range streams { - if openStream != st && openStream.parent == st.parent { - openStream.parent = st - } - } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st } -// resetPendingRequest zeros out all state related to a HEADERS frame -// and its zero or more CONTINUATION frames sent to start a new -// request. -func (sc *serverConn) resetPendingRequest() { +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - sc.req = requestParam{} -} -func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) { - sc.serveG.check() - rp := &sc.req - - if rp.invalidHeader { - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), } isConnect := rp.method == "CONNECT" if isConnect { if rp.path != "" || rp.scheme != "" || rp.authority == "" { - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } - } else if rp.method == "" || rp.path == "" || - (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -1589,23 +1903,54 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } - bodyOpen := rp.stream.state == stateOpen + bodyOpen := !f.StreamEnded() if rp.method == "HEAD" && bodyOpen { // HEAD requests can't have bodies - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } - var tlsState *tls.ConnectionState // nil if not scheme https + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https if rp.scheme == "https" { tlsState = sc.tlsState } - authority := rp.authority - if authority == "" { - authority = rp.header.Get("Host") - } + needsContinue := rp.header.Get("Expect") == "100-continue" if needsContinue { rp.header.Del("Expect") @@ -1634,24 +1979,25 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err } delete(rp.header, "Trailer") - body := &requestBody{ - conn: sc, - stream: rp.stream, - needsContinue: needsContinue, - } var url_ *url.URL var requestURI string - if isConnect { + if rp.method == "CONNECT" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { var err error url_, err = url.ParseRequestURI(rp.path) if err != nil { - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, streamError(st.id, ErrCodeProtocol) } requestURI = rp.path } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } req := &http.Request{ Method: rp.method, URL: url_, @@ -1662,21 +2008,11 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: authority, + Host: rp.authority, Body: body, Trailer: trailer, } - if bodyOpen { - body.pipe = &pipe{ - b: &fixedBuffer{buf: make([]byte, initialWindowSize)}, // TODO: garbage - } - - if vv, ok := rp.header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) - } else { - req.ContentLength = -1 - } - } + req = requestWithContext(req, st.ctx) rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw @@ -1684,7 +2020,7 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err rws.conn = sc rws.bw = bwSave rws.bw.Reset(chunkWriter{rws}) - rws.stream = rp.stream + rws.stream = st rws.req = req rws.body = body @@ -1696,17 +2032,20 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { didPanic := true defer func() { + rw.rws.stream.cancelCtx() if didPanic { e := recover() - // Same as net/http: - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - sc.writeFrameFromHandler(frameWriteMsg{ + sc.writeFrameFromHandler(FrameWriteRequest{ write: handlerPanicRST{rw.rws.stream.id}, stream: rw.rws.stream, }) - sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + // Same as net/http: + if shouldLogPanic(e) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } return } rw.handlerDone() @@ -1737,7 +2076,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // mutates it. errc = errChanPool.Get().(chan error) } - if err := sc.writeFrameFromHandler(frameWriteMsg{ + if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, stream: st, done: errc, @@ -1760,7 +2099,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // called from handler goroutines. func (sc *serverConn) write100ContinueHeaders(st *stream) { - sc.writeFrameFromHandler(frameWriteMsg{ + sc.writeFrameFromHandler(FrameWriteRequest{ write: write100ContinueHeadersFrame{st.id}, stream: st, }) @@ -1776,11 +2115,13 @@ type bodyReadMsg struct { // called from handler goroutines. // Notes that the handler for the given stream ID read n bytes of its body // and schedules flow control tokens to be sent. -func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) { +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { sc.serveG.checkNotOn() // NOT on - select { - case sc.bodyReadCh <- bodyReadMsg{st, n}: - case <-sc.doneServing: + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } } } @@ -1823,7 +2164,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { if st != nil { streamID = st.id } - sc.writeFrame(frameWriteMsg{ + sc.writeFrame(FrameWriteRequest{ write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, stream: st, }) @@ -1838,17 +2179,20 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { } } +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. type requestBody struct { stream *stream conn *serverConn - closed bool + closed bool // for use by Close only + sawEOF bool // for use by Read only pipe *pipe // non-nil if we have a HTTP entity message body needsContinue bool // need to send a 100-continue } func (b *requestBody) Close() error { - if b.pipe != nil { - b.pipe.CloseWithError(errClosedBody) + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) } b.closed = true return nil @@ -1859,18 +2203,22 @@ func (b *requestBody) Read(p []byte) (n int, err error) { b.needsContinue = false b.conn.write100ContinueHeaders(b.stream) } - if b.pipe == nil { + if b.pipe == nil || b.sawEOF { return 0, io.EOF } n, err = b.pipe.Read(p) - if n > 0 { - b.conn.noteBodyReadFromHandler(b.stream, n) + if err == io.EOF { + b.sawEOF = true } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) return } -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The // responseWriterState pointer inside is zeroed at the end of a // request (in handlerDone) and calls on the responseWriter thereafter // simply crash (caller's mistake), but the much larger responseWriterState @@ -1904,6 +2252,7 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -1923,12 +2272,14 @@ func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != // written in the trailers at the end of the response. func (rws *responseWriterState) declareTrailer(k string) { k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Content-Length", "Trailer": + if !ValidTrailerHeader(k) { // Forbidden by RFC 2616 14.40. + rws.conn.logf("ignoring invalid trailer %q", k) return } - rws.trailers = append(rws.trailers, k) + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } } // writeChunk writes chunks from the bufio.Writer. But because @@ -1983,6 +2334,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { + rws.dirty = true return 0, err } if endStream { @@ -1996,10 +2348,15 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { return 0, nil } + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + endStream := rws.handlerDone && !rws.hasTrailers() if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true return 0, err } } @@ -2011,11 +2368,66 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) + if err != nil { + rws.dirty = true + } return len(p), err } return len(p), nil } +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 2616 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + func (w *responseWriter) Flush() { rws := w.rws if rws == nil { @@ -2045,8 +2457,9 @@ func (w *responseWriter) CloseNotify() <-chan bool { if ch == nil { ch = make(chan bool, 1) rws.closeNotifierCh = ch + cw := rws.stream.cw go func() { - rws.stream.cw.Wait() // wait for close + cw.Wait() // wait for close ch <- true }() } @@ -2097,7 +2510,7 @@ func cloneHeader(h http.Header) http.Header { // // * Handler calls w.Write or w.WriteString -> // * -> rws.bw (*bufio.Writer) -> -// * (Handler migth call Flush) +// * (Handler might call Flush) // * -> chunkWriter{rws} // * -> responseWriterState.writeChunk(p []byte) // * -> responseWriterState.writeChunk (most of the magic; see comment there) @@ -2136,10 +2549,213 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws + dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - responseWriterStatePool.Put(rws) + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +// pushOptions is the internal version of http.PushOptions, which we +// cannot include here because it's only defined in Go 1.8 and later. +type pushOptions struct { + Method string + Header http.Header +} + +func (w *responseWriter) push(target string, opts pushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) } // foreachHeaderElement splits v according to the "#rule" construction @@ -2159,3 +2775,83 @@ func foreachHeaderElement(v string, fn func(string)) { } } } + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 +func ValidTrailerHeader(name string) bool { + name = http.CanonicalHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/fn/vendor/golang.org/x/net/http2/server_push_test.go b/fn/vendor/golang.org/x/net/http2/server_push_test.go new file mode 100644 index 000000000..918fd30dc --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/server_push_test.go @@ -0,0 +1,521 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "sync" + "testing" + "time" +) + +func TestServer_Push_Success(t *testing.T) { + const ( + mainBody = "index page" + pushedBody = "pushed page" + userAgent = "testagent" + cookie = "testcookie" + ) + + var stURL string + checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error { + if got, want := r.Method, wantMethod; got != want { + return fmt.Errorf("promised Req.Method=%q, want %q", got, want) + } + if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) { + return fmt.Errorf("promised Req.Header=%q, want %q", got, want) + } + if got, want := "https://"+r.Host, stURL; got != want { + return fmt.Errorf("promised Req.Host=%q, want %q", got, want) + } + if r.Body == nil { + return fmt.Errorf("nil Body") + } + if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 { + return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err) + } + return nil + } + + errc := make(chan error, 3) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + // Push "/pushed?get" as a GET request, using an absolute URL. + opt := &http.PushOptions{ + Header: http.Header{ + "User-Agent": {userAgent}, + }, + } + if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil { + errc <- fmt.Errorf("error pushing /pushed?get: %v", err) + return + } + // Push "/pushed?head" as a HEAD request, using a path. + opt = &http.PushOptions{ + Method: "HEAD", + Header: http.Header{ + "User-Agent": {userAgent}, + "Cookie": {cookie}, + }, + } + if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil { + errc <- fmt.Errorf("error pushing /pushed?head: %v", err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(mainBody))) + w.WriteHeader(200) + io.WriteString(w, mainBody) + errc <- nil + + case "/pushed?get": + wantH := http.Header{} + wantH.Set("User-Agent", userAgent) + if err := checkPromisedReq(r, "GET", wantH); err != nil { + errc <- fmt.Errorf("/pushed?get: %v", err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody))) + w.WriteHeader(200) + io.WriteString(w, pushedBody) + errc <- nil + + case "/pushed?head": + wantH := http.Header{} + wantH.Set("User-Agent", userAgent) + wantH.Set("Cookie", cookie) + if err := checkPromisedReq(r, "HEAD", wantH); err != nil { + errc <- fmt.Errorf("/pushed?head: %v", err) + return + } + w.WriteHeader(204) + errc <- nil + + default: + errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) + } + }) + stURL = st.ts.URL + + // Send one request, which should push two responses. + st.greet() + getSlash(st) + for k := 0; k < 3; k++ { + select { + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for handler %d to finish", k) + case err := <-errc: + if err != nil { + t.Fatal(err) + } + } + } + + checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error { + pp, ok := f.(*PushPromiseFrame) + if !ok { + return fmt.Errorf("got a %T; want *PushPromiseFrame", f) + } + if !pp.HeadersEnded() { + return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame") + } + if got, want := pp.PromiseID, promiseID; got != want { + return fmt.Errorf("got PromiseID %v; want %v", got, want) + } + gotH := st.decodeHeader(pp.HeaderBlockFragment()) + if !reflect.DeepEqual(gotH, wantH) { + return fmt.Errorf("got promised headers %v; want %v", gotH, wantH) + } + return nil + } + checkHeaders := func(f Frame, wantH [][2]string) error { + hf, ok := f.(*HeadersFrame) + if !ok { + return fmt.Errorf("got a %T; want *HeadersFrame", f) + } + gotH := st.decodeHeader(hf.HeaderBlockFragment()) + if !reflect.DeepEqual(gotH, wantH) { + return fmt.Errorf("got response headers %v; want %v", gotH, wantH) + } + return nil + } + checkData := func(f Frame, wantData string) error { + df, ok := f.(*DataFrame) + if !ok { + return fmt.Errorf("got a %T; want *DataFrame", f) + } + if gotData := string(df.Data()); gotData != wantData { + return fmt.Errorf("got response data %q; want %q", gotData, wantData) + } + return nil + } + + // Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA + // Stream 2 has HEADERS + DATA + // Stream 4 has HEADERS + expected := map[uint32][]func(Frame) error{ + 1: { + func(f Frame) error { + return checkPushPromise(f, 2, [][2]string{ + {":method", "GET"}, + {":scheme", "https"}, + {":authority", st.ts.Listener.Addr().String()}, + {":path", "/pushed?get"}, + {"user-agent", userAgent}, + }) + }, + func(f Frame) error { + return checkPushPromise(f, 4, [][2]string{ + {":method", "HEAD"}, + {":scheme", "https"}, + {":authority", st.ts.Listener.Addr().String()}, + {":path", "/pushed?head"}, + {"cookie", cookie}, + {"user-agent", userAgent}, + }) + }, + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "200"}, + {"content-type", "text/html"}, + {"content-length", strconv.Itoa(len(mainBody))}, + }) + }, + func(f Frame) error { + return checkData(f, mainBody) + }, + }, + 2: { + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "200"}, + {"content-type", "text/html"}, + {"content-length", strconv.Itoa(len(pushedBody))}, + }) + }, + func(f Frame) error { + return checkData(f, pushedBody) + }, + }, + 4: { + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "204"}, + }) + }, + }, + } + + consumed := map[uint32]int{} + for k := 0; len(expected) > 0; k++ { + f, err := st.readFrame() + if err != nil { + for id, left := range expected { + t.Errorf("stream %d: missing %d frames", id, len(left)) + } + t.Fatalf("readFrame %d: %v", k, err) + } + id := f.Header().StreamID + label := fmt.Sprintf("stream %d, frame %d", id, consumed[id]) + if len(expected[id]) == 0 { + t.Fatalf("%s: unexpected frame %#+v", label, f) + } + check := expected[id][0] + expected[id] = expected[id][1:] + if len(expected[id]) == 0 { + delete(expected, id) + } + if err := check(f); err != nil { + t.Fatalf("%s: %v", label, err) + } + consumed[id]++ + } +} + +func TestServer_Push_SuccessNoRace(t *testing.T) { + // Regression test for issue #18326. Ensure the request handler can mutate + // pushed request headers without racing with the PUSH_PROMISE write. + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + opt := &http.PushOptions{ + Header: http.Header{"User-Agent": {"testagent"}}, + } + if err := w.(http.Pusher).Push("/pushed", opt); err != nil { + errc <- fmt.Errorf("error pushing: %v", err) + return + } + w.WriteHeader(200) + errc <- nil + + case "/pushed": + // Update request header, ensure there is no race. + r.Header.Set("User-Agent", "newagent") + r.Header.Set("Cookie", "cookie") + w.WriteHeader(200) + errc <- nil + + default: + errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) + } + }) + + // Send one request, which should push one response. + st.greet() + getSlash(st) + for k := 0; k < 2; k++ { + select { + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for handler %d to finish", k) + case err := <-errc: + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestServer_Push_RejectRecursivePush(t *testing.T) { + // Expect two requests, but might get three if there's a bug and the second push succeeds. + errc := make(chan error, 3) + handler := func(w http.ResponseWriter, r *http.Request) error { + baseURL := "https://" + r.Host + switch r.URL.Path { + case "/": + if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil { + return fmt.Errorf("first Push()=%v, want nil", err) + } + return nil + + case "/push1": + if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + + default: + return fmt.Errorf("unexpected path: %q", r.URL.Path) + } + } + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + errc <- handler(w, r) + }) + defer st.Close() + st.greet() + getSlash(st) + if err := <-errc; err != nil { + t.Errorf("First request failed: %v", err) + } + if err := <-errc; err != nil { + t.Errorf("Second request failed: %v", err) + } +} + +func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) { + // Expect one request, but might get two if there's a bug and the push succeeds. + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + errc <- doPush(w.(http.Pusher), r) + }) + defer st.Close() + st.greet() + if err := st.fr.WriteSettings(settings...); err != nil { + st.t.Fatalf("WriteSettings: %v", err) + } + st.wantSettingsAck() + getSlash(st) + if err := <-errc; err != nil { + t.Error(err) + } + // Should not get a PUSH_PROMISE frame. + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("stream should end after headers") + } +} + +func TestServer_Push_RejectIfDisabled(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + }, + Setting{SettingEnablePush, 0}) +} + +func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + }, + Setting{SettingMaxConcurrentStreams, 0}) +} + +func TestServer_Push_RejectWrongScheme(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil { + return errors.New("Push() should have failed (push target URL is http)") + } + return nil + }) +} + +func TestServer_Push_RejectMissingHost(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("https:pushed", nil); err == nil { + return errors.New("Push() should have failed (push target URL missing host)") + } + return nil + }) +} + +func TestServer_Push_RejectRelativePath(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("../test", nil); err == nil { + return errors.New("Push() should have failed (push target is a relative path)") + } + return nil + }) +} + +func TestServer_Push_RejectForbiddenMethod(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil { + return errors.New("Push() should have failed (cannot promise a POST)") + } + return nil + }) +} + +func TestServer_Push_RejectForbiddenHeader(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + header := http.Header{ + "Content-Length": {"10"}, + "Content-Encoding": {"gzip"}, + "Trailer": {"Foo"}, + "Te": {"trailers"}, + "Host": {"test.com"}, + ":authority": {"test.com"}, + } + if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil { + return errors.New("Push() should have failed (forbidden headers)") + } + return nil + }) +} + +func TestServer_Push_StateTransitions(t *testing.T) { + const body = "foo" + + gotPromise := make(chan bool) + finishedPush := make(chan bool) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + if err := w.(http.Pusher).Push("/pushed", nil); err != nil { + t.Errorf("Push error: %v", err) + } + // Don't finish this request until the push finishes so we don't + // nondeterministically interleave output frames with the push. + <-finishedPush + case "/pushed": + <-gotPromise + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(body))) + w.WriteHeader(200) + io.WriteString(w, body) + }) + defer st.Close() + + st.greet() + if st.stream(2) != nil { + t.Fatal("stream 2 should be empty") + } + if got, want := st.streamState(2), stateIdle; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + getSlash(st) + // After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote. + st.wantPushPromise() + if got, want := st.streamState(2), stateHalfClosedRemote; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + // We stall the HTTP handler for "/pushed" until the above check. If we don't + // stall the handler, then the handler might write HEADERS and DATA and finish + // the stream before we check st.streamState(2) -- should that happen, we'll + // see stateClosed and fail the above check. + close(gotPromise) + st.wantHeaders() + if df := st.wantData(); !df.StreamEnded() { + t.Fatal("expected END_STREAM flag on DATA") + } + if got, want := st.streamState(2), stateClosed; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + close(finishedPush) +} + +func TestServer_Push_RejectAfterGoAway(t *testing.T) { + var readyOnce sync.Once + ready := make(chan struct{}) + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + select { + case <-ready: + case <-time.After(5 * time.Second): + errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed") + } + if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { + errc <- fmt.Errorf("Push()=%v, want %v", got, want) + } + errc <- nil + }) + defer st.Close() + st.greet() + getSlash(st) + + // Send GOAWAY and wait for it to be processed. + st.fr.WriteGoAway(1, ErrCodeNo, nil) + go func() { + for { + select { + case <-ready: + return + default: + } + st.sc.serveMsgCh <- func(loopNum int) { + if !st.sc.pushEnabled { + readyOnce.Do(func() { close(ready) }) + } + } + } + }() + if err := <-errc; err != nil { + t.Error(err) + } +} diff --git a/fn/vendor/golang.org/x/net/http2/server_test.go b/fn/vendor/golang.org/x/net/http2/server_test.go index 0a7d870d6..437d1c378 100644 --- a/fn/vendor/golang.org/x/net/http2/server_test.go +++ b/fn/vendor/golang.org/x/net/http2/server_test.go @@ -45,21 +45,25 @@ type serverTester struct { t testing.TB ts *httptest.Server fr *Framer - logBuf *bytes.Buffer - logFilter []string // substrings to filter out - scMu sync.Mutex // guards sc + serverLogBuf bytes.Buffer // logger for httptest.Server + logFilter []string // substrings to filter out + scMu sync.Mutex // guards sc sc *serverConn hpackDec *hpack.Decoder decodedHeaders [][2]string + // If http2debug!=2, then we capture Frame debug logs that will be written + // to t.Log after a test fails. The read and write logs use separate locks + // and buffers so we don't accidentally introduce synchronization between + // the read and write goroutines, which may hide data races. + frameReadLogMu sync.Mutex + frameReadLogBuf bytes.Buffer + frameWriteLogMu sync.Mutex + frameWriteLogBuf bytes.Buffer + // writing headers: headerBuf bytes.Buffer hpackEnc *hpack.Encoder - - // reading frames: - frc chan Frame - frErrc chan error - readTimer *time.Timer } func init() { @@ -76,47 +80,49 @@ type serverTesterOpt string var optOnlyServer = serverTesterOpt("only_server") var optQuiet = serverTesterOpt("quiet_logging") +var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames") func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { resetHooks() - logBuf := new(bytes.Buffer) ts := httptest.NewUnstartedServer(handler) tlsConfig := &tls.Config{ InsecureSkipVerify: true, - // The h2-14 is temporary, until curl is updated. (as used by unit tests - // in Docker) - NextProtos: []string{NextProtoTLS, "h2-14"}, + NextProtos: []string{NextProtoTLS}, } - var onlyServer, quiet bool + var onlyServer, quiet, framerReuseFrames bool + h2server := new(Server) for _, opt := range opts { switch v := opt.(type) { case func(*tls.Config): v(tlsConfig) case func(*httptest.Server): v(ts) + case func(*Server): + v(h2server) case serverTesterOpt: switch v { case optOnlyServer: onlyServer = true case optQuiet: quiet = true + case optFramerReuseFrames: + framerReuseFrames = true } + case func(net.Conn, http.ConnState): + ts.Config.ConnState = v default: t.Fatalf("unknown newServerTester option type %T", v) } } - ConfigureServer(ts.Config, &Server{}) + ConfigureServer(ts.Config, h2server) st := &serverTester{ - t: t, - ts: ts, - logBuf: logBuf, - frc: make(chan Frame, 1), - frErrc: make(chan error, 1), + t: t, + ts: ts, } st.hpackEnc = hpack.NewEncoder(&st.headerBuf) st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField) @@ -125,7 +131,7 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{} if quiet { ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) } else { - ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, logBuf), "", log.LstdFlags) + ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags) } ts.StartTLS() @@ -136,7 +142,6 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{} st.scMu.Lock() defer st.scMu.Unlock() st.sc = v - st.sc.testHookCh = make(chan func(int)) } log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st})) if !onlyServer { @@ -146,6 +151,25 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{} } st.cc = cc st.fr = NewFramer(cc, cc) + if framerReuseFrames { + st.fr.SetReuseFrames() + } + if !logFrameReads && !logFrameWrites { + st.fr.debugReadLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameReadLogMu.Lock() + fmt.Fprintf(&st.frameReadLogBuf, m, v...) + st.frameReadLogMu.Unlock() + } + st.fr.debugWriteLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameWriteLogMu.Lock() + fmt.Fprintf(&st.frameWriteLogBuf, m, v...) + st.frameWriteLogMu.Unlock() + } + st.fr.logReads = true + st.fr.logWrites = true + } } return st } @@ -162,7 +186,7 @@ func (st *serverTester) addLogFilter(phrase string) { func (st *serverTester) stream(id uint32) *stream { ch := make(chan *stream, 1) - st.sc.testHookCh <- func(int) { + st.sc.serveMsgCh <- func(int) { ch <- st.sc.streams[id] } return <-ch @@ -170,7 +194,7 @@ func (st *serverTester) stream(id uint32) *stream { func (st *serverTester) streamState(id uint32) streamState { ch := make(chan streamState, 1) - st.sc.testHookCh <- func(int) { + st.sc.serveMsgCh <- func(int) { state, _ := st.sc.state(id) ch <- state } @@ -180,7 +204,7 @@ func (st *serverTester) streamState(id uint32) streamState { // loopNum reports how many times this conn's select loop has gone around. func (st *serverTester) loopNum() int { lastc := make(chan int, 1) - st.sc.testHookCh <- func(loopNum int) { + st.sc.serveMsgCh <- func(loopNum int) { lastc <- loopNum } return <-lastc @@ -188,7 +212,7 @@ func (st *serverTester) loopNum() int { // awaitIdle heuristically awaits for the server conn's select loop to be idle. // The heuristic is that the server connection's serve loop must schedule -// 50 times in a row without any channel sends or receives occuring. +// 50 times in a row without any channel sends or receives occurring. func (st *serverTester) awaitIdle() { remain := 50 last := st.loopNum() @@ -204,6 +228,27 @@ func (st *serverTester) awaitIdle() { } func (st *serverTester) Close() { + if st.t.Failed() { + st.frameReadLogMu.Lock() + if st.frameReadLogBuf.Len() > 0 { + st.t.Logf("Framer read log:\n%s", st.frameReadLogBuf.String()) + } + st.frameReadLogMu.Unlock() + + st.frameWriteLogMu.Lock() + if st.frameWriteLogBuf.Len() > 0 { + st.t.Logf("Framer write log:\n%s", st.frameWriteLogBuf.String()) + } + st.frameWriteLogMu.Unlock() + + // If we failed already (and are likely in a Fatal, + // unwindowing), force close the connection, so the + // httptest.Server doesn't wait forever for the conn + // to close. + if st.cc != nil { + st.cc.Close() + } + } st.ts.Close() if st.cc != nil { st.cc.Close() @@ -214,11 +259,52 @@ func (st *serverTester) Close() { // greet initiates the client's HTTP/2 connection into a state where // frames may be sent. func (st *serverTester) greet() { + st.greetAndCheckSettings(func(Setting) error { return nil }) +} + +func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) { st.writePreface() st.writeInitialSettings() - st.wantSettings() + st.wantSettings().ForeachSetting(checkSetting) st.writeSettingsAck() - st.wantSettingsAck() + + // The initial WINDOW_UPDATE and SETTINGS ACK can come in any order. + var gotSettingsAck bool + var gotWindowUpdate bool + + for i := 0; i < 2; i++ { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + switch f := f.(type) { + case *SettingsFrame: + if !f.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } + gotSettingsAck = true + + case *WindowUpdateFrame: + if f.FrameHeader.StreamID != 0 { + st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID, 0) + } + incr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize) + if f.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr) + } + gotWindowUpdate = true + + default: + st.t.Fatalf("Wanting a settings ACK or window update, received a %T", f) + } + } + + if !gotSettingsAck { + st.t.Fatalf("Didn't get a settings ACK") + } + if !gotWindowUpdate { + st.t.Fatalf("Didn't get a window update") + } } func (st *serverTester) writePreface() { @@ -249,6 +335,12 @@ func (st *serverTester) writeHeaders(p HeadersFrameParam) { } } +func (st *serverTester) writePriority(id uint32, p PriorityParam) { + if err := st.fr.WritePriority(id, p); err != nil { + st.t.Fatalf("Error writing PRIORITY: %v", err) + } +} + func (st *serverTester) encodeHeaderField(k, v string) { err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) if err != nil { @@ -272,39 +364,44 @@ func (st *serverTester) encodeHeaderRaw(headers ...string) []byte { } // encodeHeader encodes headers and returns their HPACK bytes. headers -// must contain an even number of key/value pairs. There may be +// must contain an even number of key/value pairs. There may be // multiple pairs for keys (e.g. "cookie"). The :method, :path, and -// :scheme headers default to GET, / and https. +// :scheme headers default to GET, / and https. The :authority header +// defaults to st.ts.Listener.Addr(). func (st *serverTester) encodeHeader(headers ...string) []byte { if len(headers)%2 == 1 { panic("odd number of kv args") } st.headerBuf.Reset() + defaultAuthority := st.ts.Listener.Addr().String() if len(headers) == 0 { // Fast path, mostly for benchmarks, so test code doesn't pollute // profiles when we're looking to improve server allocations. st.encodeHeaderField(":method", "GET") - st.encodeHeaderField(":path", "/") st.encodeHeaderField(":scheme", "https") + st.encodeHeaderField(":authority", defaultAuthority) + st.encodeHeaderField(":path", "/") return st.headerBuf.Bytes() } if len(headers) == 2 && headers[0] == ":method" { // Another fast path for benchmarks. st.encodeHeaderField(":method", headers[1]) - st.encodeHeaderField(":path", "/") st.encodeHeaderField(":scheme", "https") + st.encodeHeaderField(":authority", defaultAuthority) + st.encodeHeaderField(":path", "/") return st.headerBuf.Bytes() } pseudoCount := map[string]int{} - keys := []string{":method", ":path", ":scheme"} + keys := []string{":method", ":scheme", ":authority", ":path"} vals := map[string][]string{ - ":method": {"GET"}, - ":path": {"/"}, - ":scheme": {"https"}, + ":method": {"GET"}, + ":scheme": {"https"}, + ":authority": {defaultAuthority}, + ":path": {"/"}, } for len(headers) > 0 { k, v := headers[0], headers[1] @@ -348,32 +445,39 @@ func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) } } -func (st *serverTester) readFrame() (Frame, error) { +func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) { + if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func readFrameTimeout(fr *Framer, wait time.Duration) (Frame, error) { + ch := make(chan interface{}, 1) go func() { - fr, err := st.fr.ReadFrame() + fr, err := fr.ReadFrame() if err != nil { - st.frErrc <- err + ch <- err } else { - st.frc <- fr + ch <- fr } }() - t := st.readTimer - if t == nil { - t = time.NewTimer(2 * time.Second) - st.readTimer = t - } - t.Reset(2 * time.Second) - defer t.Stop() + t := time.NewTimer(wait) select { - case f := <-st.frc: - return f, nil - case err := <-st.frErrc: - return nil, err + case v := <-ch: + t.Stop() + if fr, ok := v.(Frame); ok { + return fr, nil + } + return nil, v.(error) case <-t.C: return nil, errors.New("timeout waiting for frame") } } +func (st *serverTester) readFrame() (Frame, error) { + return readFrameTimeout(st.fr, 2*time.Second) +} + func (st *serverTester) wantHeaders() *HeadersFrame { f, err := st.readFrame() if err != nil { @@ -492,7 +596,18 @@ func (st *serverTester) wantSettingsAck() { if !sf.Header().Flags.Has(FlagSettingsAck) { st.t.Fatal("Settings Frame didn't have ACK set") } +} +func (st *serverTester) wantPushPromise() *PushPromiseFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + ppf, ok := f.(*PushPromiseFrame) + if !ok { + st.t.Fatalf("Wanted PushPromise, received %T", ppf) + } + return ppf } func TestServer(t *testing.T) { @@ -509,12 +624,7 @@ func TestServer(t *testing.T) { server sends in the HTTP/2 connection. `) - st.writePreface() - st.writeInitialSettings() - st.wantSettings() - st.writeSettingsAck() - st.wantSettingsAck() - + st.greet() st.writeHeaders(HeadersFrameParam{ StreamID: 1, // clients send odd numbers BlockFragment: st.encodeHeader(), @@ -587,7 +697,7 @@ func TestServer_Request_Get_PathSlashes(t *testing.T) { } // TODO: add a test with EndStream=true on the HEADERS but setting a -// Content-Length anyway. Should we just omit it and force it to +// Content-Length anyway. Should we just omit it and force it to // zero? func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) { @@ -747,7 +857,7 @@ func TestServer_Request_Get_Host(t *testing.T) { testServerRequest(t, func(st *serverTester) { st.writeHeaders(HeadersFrameParam{ StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader("host", host), + BlockFragment: st.encodeHeader(":authority", "", "host", host), EndStream: true, EndHeaders: true, }) @@ -926,7 +1036,7 @@ func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) { func testRejectRequest(t *testing.T, send func(*serverTester)) { st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - t.Fatal("server request made it to handler; should've been rejected") + t.Error("server request made it to handler; should've been rejected") }) defer st.Close() @@ -935,6 +1045,39 @@ func testRejectRequest(t *testing.T, send func(*serverTester)) { st.wantRSTStream(1, ErrCodeProtocol) } +func testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("server request made it to handler; should've been rejected") + }, optQuiet) + defer st.Close() + + st.greet() + send(st) + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeProtocol { + t.Errorf("err code = %v; want %v", gf.ErrCode, ErrCodeProtocol) + } +} + +// Section 5.1, on idle connections: "Receiving any frame other than +// HEADERS or PRIORITY on a stream in this state MUST be treated as a +// connection error (Section 5.4.1) of type PROTOCOL_ERROR." +func TestRejectFrameOnIdle_WindowUpdate(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteWindowUpdate(123, 456) + }) +} +func TestRejectFrameOnIdle_Data(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteData(123, true, nil) + }) +} +func TestRejectFrameOnIdle_RSTStream(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteRSTStream(123, ErrCodeCancel) + }) +} + func TestServer_Request_Connect(t *testing.T) { testServerRequest(t, func(st *serverTester) { st.writeHeaders(HeadersFrameParam{ @@ -1033,10 +1176,10 @@ func TestServer_RejectsLargeFrames(t *testing.T) { if gf.ErrCode != ErrCodeFrameSize { t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize) } - if st.logBuf.Len() != 0 { + if st.serverLogBuf.Len() != 0 { // Previously we spun here for a bit until the GOAWAY disconnect // timer fired, logging while we fired. - t.Errorf("unexpected server output: %.500s\n", st.logBuf.Bytes()) + t.Errorf("unexpected server output: %.500s\n", st.serverLogBuf.Bytes()) } } @@ -1072,6 +1215,40 @@ func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM } +// the version of the TestServer_Handler_Sends_WindowUpdate with padding. +// See golang.org/issue/16556 +func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeDataPadded(1, false, []byte("abcdef"), []byte{0, 0, 0, 0}) + + // Expect to immediately get our 5 bytes of padding back for + // both the connection and stream (4 bytes of padding + 1 byte of length) + st.wantWindowUpdate(0, 5) + st.wantWindowUpdate(1, 5) + + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) +} + func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { st := newServerTester(t, nil) defer st.Close() @@ -1126,6 +1303,7 @@ func testServerPostUnblock(t *testing.T, inHandler <- true errc <- handler(w, r) }) + defer st.Close() st.greet() st.writeHeaders(HeadersFrameParam{ StreamID: 1, @@ -1143,7 +1321,6 @@ func testServerPostUnblock(t *testing.T, case <-time.After(5 * time.Second): t.Fatal("timeout waiting for Handler to return") } - st.Close() } func TestServer_RSTStream_Unblocks_Read(t *testing.T) { @@ -1400,6 +1577,36 @@ func TestServer_Rejects_Continuation0(t *testing.T) { }) } +// No PRIORITY on stream 0. +func TestServer_Rejects_Priority0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writePriority(0, PriorityParam{StreamDep: 1}) + }) +} + +// No HEADERS frame with a self-dependence. +func TestServer_Rejects_HeadersSelfDependence(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + Priority: PriorityParam{StreamDep: 1}, + }) + }) +} + +// No PRIORTY frame with a self-dependence. +func TestServer_Rejects_PrioritySelfDependence(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writePriority(1, PriorityParam{StreamDep: 1}) + }) +} + func TestServer_Rejects_PushPromise(t *testing.T) { testServerRejectsConn(t, func(st *serverTester) { pp := PushPromiseParam{ @@ -1785,8 +1992,14 @@ func TestServer_Response_LargeWrite(t *testing.T) { // Test that the handler can't write more than the client allows func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { - const size = 1 << 20 - const maxFrameSize = 16 << 10 + // Make these reads. Before each read, the client adds exactly enough + // flow-control to satisfy the read. Numbers chosen arbitrarily. + reads := []int{123, 1, 13, 127} + size := 0 + for _, n := range reads { + size += n + } + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { w.(http.Flusher).Flush() n, err := w.Write(bytes.Repeat([]byte("a"), size)) @@ -1800,17 +2013,12 @@ func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { }, func(st *serverTester) { // Set the window size to something explicit for this test. // It's also how much initial data we expect. - const initWindowSize = 123 - if err := st.fr.WriteSettings( - Setting{SettingInitialWindowSize, initWindowSize}, - Setting{SettingMaxFrameSize, maxFrameSize}, - ); err != nil { + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, uint32(reads[0])}); err != nil { t.Fatal(err) } st.wantSettingsAck() getSlash(st) // make the single request - defer func() { st.fr.WriteRSTStream(1, ErrCodeCancel) }() hf := st.wantHeaders() if hf.StreamEnded() { @@ -1821,11 +2029,11 @@ func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { } df := st.wantData() - if got := len(df.Data()); got != initWindowSize { - t.Fatalf("Initial window size = %d but got DATA with %d bytes", initWindowSize, got) + if got := len(df.Data()); got != reads[0] { + t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got) } - for _, quota := range []int{1, 13, 127} { + for _, quota := range reads[1:] { if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil { t.Fatal(err) } @@ -1834,10 +2042,6 @@ func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota) } } - - if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { - t.Fatal(err) - } }) } @@ -2147,7 +2351,7 @@ func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { // Sent when the a Handler closes while a client has // indicated it's still sending DATA: - st.wantRSTStream(1, ErrCodeCancel) + st.wantRSTStream(1, ErrCodeNo) // Now the handler has ended, so it's ended its // stream, but the client hasn't closed its side @@ -2156,6 +2360,9 @@ func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { // it did before. st.writeData(1, true, []byte("foo")) + // Get our flow control bytes back, since the handler didn't get them. + st.wantWindowUpdate(0, uint32(len("foo"))) + // Sent after a peer sends data anyway (admittedly the // previous RST_STREAM might've still been in-flight), // but they'll get the more friendly 'cancel' code @@ -2224,6 +2431,7 @@ func TestServer_Rejects_TLSBadCipher(t *testing.T) { tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, } }) defer st.Close() @@ -2266,9 +2474,9 @@ func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) { return st.decodedHeaders } -// testServerResponse sets up an idle HTTP/2 connection and lets you -// write a single request with writeReq, and then reply to it in some way with the provided handler, -// and then verify the output with the serverTester again (assuming the handler returns nil) +// testServerResponse sets up an idle HTTP/2 connection. The client function should +// write a single request that must be handled by the handler. This waits up to 5s +// for client to return, then up to an additional 2s for the handler to return. func testServerResponse(t testing.TB, handler func(http.ResponseWriter, *http.Request) error, client func(*serverTester), @@ -2291,9 +2499,8 @@ func testServerResponse(t testing.TB, select { case <-donec: - return case <-time.After(5 * time.Second): - t.Fatal("timeout") + t.Fatal("timeout in client") } select { @@ -2302,7 +2509,7 @@ func testServerResponse(t testing.TB, t.Fatalf("Error in handler: %v", err) } case <-time.After(2 * time.Second): - t.Error("timeout waiting for handler to finish") + t.Fatal("timeout in handler") } } @@ -2430,11 +2637,9 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) { defer st.Close() // shake hands - st.writePreface() - st.writeInitialSettings() frameSize := defaultMaxReadFrameSize var advHeaderListSize *uint32 - st.wantSettings().ForeachSetting(func(s Setting) error { + st.greetAndCheckSettings(func(s Setting) error { switch s.ID { case SettingMaxFrameSize: if s.Val < minMaxFrameSize { @@ -2449,8 +2654,6 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) { } return nil }) - st.writeSettingsAck() - st.wantSettingsAck() if advHeaderListSize == nil { t.Errorf("server didn't advertise a max header list size") @@ -2515,7 +2718,7 @@ func TestCompressionErrorOnWrite(t *testing.T) { defer st.Close() st.greet() - maxAllowed := st.sc.maxHeaderStringLen() + maxAllowed := st.sc.framer.maxHeaderStringLen() // Crank this up, now that we have a conn connected with the // hpack.Decoder's max string length set has been initialized @@ -2524,8 +2727,12 @@ func TestCompressionErrorOnWrite(t *testing.T) { // the max string size. serverConfig.MaxHeaderBytes = 1 << 20 - // First a request with a header that's exactly the max allowed size. + // First a request with a header that's exactly the max allowed size + // for the hpack compression. It's still too long for the header list + // size, so we'll get the 431 error, but that keeps the compression + // context still valid. hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed)) + st.writeHeaders(HeadersFrameParam{ StreamID: 1, BlockFragment: hbf, @@ -2533,8 +2740,24 @@ func TestCompressionErrorOnWrite(t *testing.T) { EndHeaders: true, }) h := st.wantHeaders() - if !h.HeadersEnded() || !h.StreamEnded() { - t.Errorf("Unexpected HEADER frame %v", h) + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } + df := st.wantData() + if !strings.Contains(string(df.Data()), "HTTP Error 431") { + t.Errorf("Unexpected data body: %q", df.Data()) + } + if !df.StreamEnded() { + t.Fatalf("expect data stream end") } // And now send one that's just one byte too big. @@ -2633,13 +2856,11 @@ func testServerWritesTrailers(t *testing.T, withFlush bool) { testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B") w.Header().Add("Trailer", "Server-Trailer-C") - - // TODO: decide if the server should filter these while - // writing the Trailer header in the response. Currently it - // appears net/http doesn't do this for http/1.1 w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered + + // Regular headers: w.Header().Set("Foo", "Bar") - w.Header().Set("Content-Length", "5") + w.Header().Set("Content-Length", "5") // len("Hello") io.WriteString(w, "Hello") if withFlush { @@ -2647,7 +2868,15 @@ func testServerWritesTrailers(t *testing.T, withFlush bool) { } w.Header().Set("Server-Trailer-A", "valuea") w.Header().Set("Server-Trailer-C", "valuec") // skipping B + // After a flush, random keys like Server-Surprise shouldn't show up: w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!") + // But we do permit promoting keys to trailers after a + // flush if they start with the magic + // otherwise-invalid "Trailer:" prefix: + w.Header().Set("Trailer:Post-Header-Trailer", "hi1") + w.Header().Set("Trailer:post-header-trailer2", "hi2") + w.Header().Set("Trailer:Range", "invalid") + w.Header().Set("Trailer:Foo\x01Bogus", "invalid") w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 2616 14.40") w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 2616 14.40") w.Header().Set("Trailer", "should not be included; Forbidden by RFC 2616 14.40") @@ -2689,6 +2918,8 @@ func testServerWritesTrailers(t *testing.T, withFlush bool) { t.Fatalf("trailers HEADERS lacked END_HEADERS") } wanth = [][2]string{ + {"post-header-trailer", "hi1"}, + {"post-header-trailer2", "hi2"}, {"server-trailer-a", "valuea"}, {"server-trailer-c", "valuec"}, } @@ -2699,7 +2930,39 @@ func testServerWritesTrailers(t *testing.T, withFlush bool) { }) } +// validate transmitted header field names & values +// golang.org/issue/14048 +func TestServerDoesntWriteInvalidHeaders(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Add("OK1", "x") + w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key + w.Header().Add("Bad1\x00", "x") // null in key + w.Header().Add("Bad2", "x\x00y") // null in value + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("response HEADERS lacked END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"ok1", "x"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + func BenchmarkServerGets(b *testing.B) { + defer disableGoroutineTracking()() b.ReportAllocs() const msg = "Hello, world" @@ -2731,10 +2994,17 @@ func BenchmarkServerGets(b *testing.B) { } func BenchmarkServerPosts(b *testing.B) { + defer disableGoroutineTracking()() b.ReportAllocs() const msg = "Hello, world" st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } io.WriteString(w, msg) }) defer st.Close() @@ -2762,6 +3032,89 @@ func BenchmarkServerPosts(b *testing.B) { } } +// Send a stream of messages from server to client in separate data frames. +// Brings up performance issues seen in long streams. +// Created to show problem in go issue #18502 +func BenchmarkServerToClientStreamDefaultOptions(b *testing.B) { + benchmarkServerToClientStream(b) +} + +// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8 +// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer. +func BenchmarkServerToClientStreamReuseFrames(b *testing.B) { + benchmarkServerToClientStream(b, optFramerReuseFrames) +} + +func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msgLen = 1 + // default window size + const windowSize = 1<<16 - 1 + + // next message to send from the server and for the client to expect + nextMsg := func(i int) []byte { + msg := make([]byte, msgLen) + msg[0] = byte(i) + if len(msg) != msgLen { + panic("invalid test setup msg length") + } + return msg + } + + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } + for i := 0; i < b.N; i += 1 { + w.Write(nextMsg(i)) + w.(http.Flusher).Flush() + } + }, newServerOpts...) + defer st.Close() + st.greet() + + const id = uint32(1) + + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + + st.writeData(id, true, nil) + st.wantHeaders() + + var pendingWindowUpdate = uint32(0) + + for i := 0; i < b.N; i += 1 { + expected := nextMsg(i) + df := st.wantData() + if bytes.Compare(expected, df.data) != 0 { + b.Fatalf("Bad message received; want %v; got %v", expected, df.data) + } + // try to send infrequent but large window updates so they don't overwhelm the test + pendingWindowUpdate += uint32(len(df.data)) + if pendingWindowUpdate >= windowSize/2 { + if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + if err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + pendingWindowUpdate = 0 + } + } + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } +} + // go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53 // Verify we don't hang. func TestIssue53(t *testing.T) { @@ -2769,12 +3122,16 @@ func TestIssue53(t *testing.T) { "\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad" s := &http.Server{ ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags), + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("hello")) + }), + } + s2 := &Server{ + MaxReadFrameSize: 1 << 16, + PermitProhibitedCipherSuites: true, } - s2 := &Server{MaxReadFrameSize: 1 << 16, PermitProhibitedCipherSuites: true} c := &issue53Conn{[]byte(data), false, false} - s2.handleConn(s, c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte("hello")) - })) + s2.ServeConn(c, &ServeConnOpts{BaseConfig: s}) if !c.closed { t.Fatal("connection is not closed") } @@ -2805,8 +3162,12 @@ func (c *issue53Conn) Close() error { return nil } -func (c *issue53Conn) LocalAddr() net.Addr { return &net.TCPAddr{net.IP{127, 0, 0, 1}, 49706, ""} } -func (c *issue53Conn) RemoteAddr() net.Addr { return &net.TCPAddr{net.IP{127, 0, 0, 1}, 49706, ""} } +func (c *issue53Conn) LocalAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) RemoteAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} func (c *issue53Conn) SetDeadline(t time.Time) error { return nil } func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil } func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil } @@ -2814,51 +3175,43 @@ func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil } // golang.org/issue/12895 func TestConfigureServer(t *testing.T) { tests := []struct { - name string - in http.Server - wantErr string + name string + tlsConfig *tls.Config + wantErr string }{ { name: "empty server", - in: http.Server{}, }, { name: "just the required cipher suite", - in: http.Server{ - TLSConfig: &tls.Config{ - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, - }, + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, }, }, { name: "missing required cipher suite", - in: http.Server{ - TLSConfig: &tls.Config{ - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384}, - }, + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384}, }, wantErr: "is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", }, { name: "required after bad", - in: http.Server{ - TLSConfig: &tls.Config{ - CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, - }, + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, }, wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after", }, { name: "bad after required", - in: http.Server{ - TLSConfig: &tls.Config{ - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA}, - }, + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA}, }, }, } for _, tt := range tests { - err := ConfigureServer(&tt.in, nil) + srv := &http.Server{TLSConfig: tt.tlsConfig} + err := ConfigureServer(srv, nil) if (err != nil) != (tt.wantErr != "") { if tt.wantErr != "" { t.Errorf("%s: success, but want error", tt.name) @@ -2869,7 +3222,7 @@ func TestConfigureServer(t *testing.T) { if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) { t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr) } - if err == nil && !tt.in.TLSConfig.PreferServerCipherSuites { + if err == nil && !srv.TLSConfig.PreferServerCipherSuites { t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name) } } @@ -2938,3 +3291,431 @@ func TestServerNoDuplicateContentType(t *testing.T) { t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) } } + +func disableGoroutineTracking() (restore func()) { + old := DebugGoroutines + DebugGoroutines = false + return func() { DebugGoroutines = old } +} + +func BenchmarkServer_GetRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + + st.greet() + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "GET") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + st.wantData() + } +} + +func BenchmarkServer_PostRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "POST") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: false, + EndHeaders: true, + }) + st.writeData(streamID, true, nil) + st.wantHeaders() + st.wantData() + } +} + +type connStateConn struct { + net.Conn + cs tls.ConnectionState +} + +func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs } + +// golang.org/issue/12737 -- handle any net.Conn, not just +// *tls.Conn. +func TestServerHandleCustomConn(t *testing.T) { + var s Server + c1, c2 := net.Pipe() + clientDone := make(chan struct{}) + handlerDone := make(chan struct{}) + var req *http.Request + go func() { + defer close(clientDone) + defer c2.Close() + fr := NewFramer(c2, c2) + io.WriteString(c2, ClientPreface) + fr.WriteSettings() + fr.WriteSettingsAck() + f, err := fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() { + t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f)) + return + } + f, err = fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() { + t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f)) + return + } + var henc hpackEncoder + fr.WriteHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"), + EndStream: true, + EndHeaders: true, + }) + go io.Copy(ioutil.Discard, c2) + <-handlerDone + }() + const testString = "my custom ConnectionState" + fakeConnState := tls.ConnectionState{ + ServerName: testString, + Version: tls.VersionTLS12, + CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + } + go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{ + BaseConfig: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer close(handlerDone) + req = r + }), + }}) + select { + case <-clientDone: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for handler") + } + if req.TLS == nil { + t.Fatalf("Request.TLS is nil. Got: %#v", req) + } + if req.TLS.ServerName != testString { + t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString) + } +} + +// golang.org/issue/14214 +func TestServer_Rejects_ConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("should not get to Handler") + }) + defer st.Close() + st.greet() + st.bodylessReq1("connection", "foo") + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "400"}, + {"content-type", "text/plain; charset=utf-8"}, + {"x-content-type-options", "nosniff"}, + {"content-length", "51"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } +} + +type hpackEncoder struct { + enc *hpack.Encoder + buf bytes.Buffer +} + +func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + he.buf.Reset() + if he.enc == nil { + he.enc = hpack.NewEncoder(&he.buf) + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + headers = headers[2:] + } + return he.buf.Bytes() +} + +func TestCheckValidHTTP2Request(t *testing.T) { + tests := []struct { + h http.Header + want error + }{ + { + h: http.Header{"Te": {"trailers"}}, + want: nil, + }, + { + h: http.Header{"Te": {"trailers", "bogus"}}, + want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`), + }, + { + h: http.Header{"Foo": {""}}, + want: nil, + }, + { + h: http.Header{"Connection": {""}}, + want: errors.New(`request header "Connection" is not valid in HTTP/2`), + }, + { + h: http.Header{"Proxy-Connection": {""}}, + want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`), + }, + { + h: http.Header{"Keep-Alive": {""}}, + want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`), + }, + { + h: http.Header{"Upgrade": {""}}, + want: errors.New(`request header "Upgrade" is not valid in HTTP/2`), + }, + } + for i, tt := range tests { + got := checkValidHTTP2RequestHeaders(tt.h) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want) + } + } +} + +// golang.org/issue/14030 +func TestExpect100ContinueAfterHandlerWrites(t *testing.T) { + const msg = "Hello" + const msg2 = "World" + + doRead := make(chan bool, 1) + defer close(doRead) // fallback cleanup + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + + // Do a read, which might force a 100-continue status to be sent. + <-doRead + r.Body.Read(make([]byte, 10)) + + io.WriteString(w, msg2) + + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20)) + req.Header.Set("Expect", "100-continue") + + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + buf := make([]byte, len(msg)) + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg { + t.Fatalf("msg = %q; want %q", buf, msg) + } + + doRead <- true + + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg2 { + t.Fatalf("second msg = %q; want %q", buf, msg2) + } +} + +type funcReader func([]byte) (n int, err error) + +func (f funcReader) Read(p []byte) (n int, err error) { return f(p) } + +// golang.org/issue/16481 -- return flow control when streams close with unread data. +// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport) +func TestUnreadFlowControlReturned_Server(t *testing.T) { + unblock := make(chan bool, 1) + defer close(unblock) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // Don't read the 16KB request body. Wait until the client's + // done sending it and then return. This should cause the Server + // to then return those 16KB of flow control to the client. + <-unblock + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + // This previously hung on the 4th iteration. + for i := 0; i < 6; i++ { + body := io.MultiReader( + io.LimitReader(neverEnding('A'), 16<<10), + funcReader(func([]byte) (n int, err error) { + unblock <- true + return 0, io.EOF + }), + ) + req, _ := http.NewRequest("POST", st.ts.URL, body) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + } + +} + +func TestServerIdleTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + }, func(h2s *Server) { + h2s.IdleTimeout = 500 * time.Millisecond + }) + defer st.Close() + + st.greet() + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } +} + +func TestServerIdleTimeout_AfterRequest(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + const timeout = 250 * time.Millisecond + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + time.Sleep(timeout * 2) + }, func(h2s *Server) { + h2s.IdleTimeout = timeout + }) + defer st.Close() + + st.greet() + + // Send a request which takes twice the timeout. Verifies the + // idle timeout doesn't fire while we're in a request: + st.bodylessReq1() + st.wantHeaders() + + // But the idle timeout should be rearmed after the request + // is done: + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } +} + +// grpc-go closes the Request.Body currently with a Read. +// Verify that it doesn't race. +// See https://github.com/grpc/grpc-go/pull/938 +func TestRequestBodyReadCloseRace(t *testing.T) { + for i := 0; i < 100; i++ { + body := &requestBody{ + pipe: &pipe{ + b: new(bytes.Buffer), + }, + } + body.pipe.CloseWithError(io.EOF) + + done := make(chan bool, 1) + buf := make([]byte, 10) + go func() { + time.Sleep(1 * time.Millisecond) + body.Close() + done <- true + }() + body.Read(buf) + <-done + } +} + +func TestIssue20704Race(t *testing.T) { + if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { + t.Skip("skipping in short mode") + } + const ( + itemSize = 1 << 10 + itemCount = 100 + ) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < itemCount; i++ { + _, err := w.Write(make([]byte, itemSize)) + if err != nil { + return + } + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + cl := &http.Client{Transport: tr} + + for i := 0; i < 1000; i++ { + resp, err := cl.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + // Force a RST stream to the server by closing without + // reading the body: + resp.Body.Close() + } +} diff --git a/fn/vendor/golang.org/x/net/http2/transport.go b/fn/vendor/golang.org/x/net/http2/transport.go index b3cbb9943..850d7ae09 100644 --- a/fn/vendor/golang.org/x/net/http2/transport.go +++ b/fn/vendor/golang.org/x/net/http2/transport.go @@ -10,12 +10,14 @@ import ( "bufio" "bytes" "compress/gzip" + "crypto/rand" "crypto/tls" "errors" "fmt" "io" "io/ioutil" "log" + "math" "net" "net/http" "sort" @@ -25,6 +27,8 @@ import ( "time" "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" + "golang.org/x/net/lex/httplex" ) const ( @@ -76,6 +80,10 @@ type Transport struct { // uncompressed. DisableCompression bool + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to // send in the initial settings frame. It is how many bytes // of response headers are allow. Unlike the http2 spec, zero here @@ -134,32 +142,41 @@ func (t *Transport) initConnPool() { // ClientConn is the state of a single HTTP/2 client connection to an // HTTP/2 server. type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request // readLoop goroutine fields: readerDone chan struct{} // closed on error readerErr error // set before readerDone is closed - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closed bool - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - bw *bufio.Writer - br *bufio.Reader - fr *Framer - // Settings from peer: + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) maxFrameSize uint32 maxConcurrentStreams uint32 initialWindowSize uint32 - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte wmu sync.Mutex // held while writing; acquire AFTER mu if holding both werr error // first write error that has occurred @@ -170,16 +187,20 @@ type ClientConn struct { type clientStream struct { cc *ClientConn req *http.Request + trace *clientTrace // or nil ID uint32 resc chan resAndError bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu requestedGzip bool + on100 func() // optional code to run if get a 100 continue response flow flow // guarded by cc.mu inflow flow // guarded by cc.mu bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read readErr error // sticky read error; owned by transportResponseBody.Read stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu peerReset chan struct{} // closed on peer reset resetErr error // populated before peerReset is closed @@ -187,36 +208,54 @@ type clientStream struct { done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu // owned by clientConnReadLoop: - pastHeaders bool // got HEADERS w/ END_HEADERS - pastTrailers bool // got second HEADERS frame w/ END_HEADERS + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer } // awaitRequestCancel runs in its own goroutine and waits for the user -// to either cancel a RoundTrip request (using the provided -// Request.Cancel channel), or for the request to be done (any way it -// might be removed from the cc.streams map: peer reset, successful -// completion, TCP connection breakage, etc) -func (cs *clientStream) awaitRequestCancel(cancel <-chan struct{}) { - if cancel == nil { +// to cancel a RoundTrip request, its context to expire, or for the +// request to be done (any way it might be removed from the cc.streams +// map: peer reset, successful completion, TCP connection breakage, +// etc) +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + ctx := reqContext(req) + if req.Cancel == nil && ctx.Done() == nil { return } select { - case <-cancel: + case <-req.Cancel: + cs.cancelStream() cs.bufPipe.CloseWithError(errRequestCanceled) - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + case <-ctx.Done(): + cs.cancelStream() + cs.bufPipe.CloseWithError(ctx.Err()) case <-cs.done: } } -// checkReset reports any error sent in a RST_STREAM frame by the -// server. -func (cs *clientStream) checkReset() error { +func (cs *clientStream) cancelStream() { + cs.cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cs.cc.mu.Unlock() + + if !didReset { + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { select { case <-cs.peerReset: return cs.resetErr + case <-cs.done: + return errStreamClosed default: return nil } @@ -264,29 +303,44 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { // authorityAddr returns a given authority (a host/IP, or host:port / ip:port) // and returns a host:port. The port 443 is added if needed. -func authorityAddr(authority string) (addr string) { - if _, _, err := net.SplitHostPort(authority); err == nil { - return authority +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority } - return net.JoinHostPort(authority, "443") + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) } // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if req.URL.Scheme != "https" { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { return nil, errors.New("http2: unsupported scheme") } - addr := authorityAddr(req.URL.Host) + addr := authorityAddr(req.URL.Scheme, req.URL.Host) for { cc, err := t.connPool().GetClientConn(req, addr) if err != nil { t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } + traceGotConn(req, cc) res, err := cc.RoundTrip(req) - if shouldRetryRequest(req, err) { - continue + if err != nil { + if req, err = shouldRetryRequest(req, err); err == nil { + continue + } } if err != nil { t.vlogf("RoundTrip failure: %v", err) @@ -300,7 +354,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res // connected from previous requests but are now sitting idle. // It does not interrupt any connections currently in use. func (t *Transport) CloseIdleConnections() { - if cp, ok := t.connPool().(*clientConnPool); ok { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { cp.closeIdleConnections() } } @@ -308,15 +362,44 @@ func (t *Transport) CloseIdleConnections() { var ( errClientConnClosed = errors.New("http2: client conn is closed") errClientConnUnusable = errors.New("http2: client conn not usable") + + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written") ) -func shouldRetryRequest(req *http.Request, err error) bool { - // TODO: retry GET requests (no bodies) more aggressively, if shutdown - // before response. - return err == errClientConnUnusable +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { + switch err { + default: + return nil, err + case errClientConnUnusable, errClientConnGotGoAway: + return req, nil + case errClientConnGotGoAwayAfterSomeReqBody: + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return req, nil + } + // Otherwise we depend on the Request having its GetBody + // func defined. + getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody + if getBody == nil { + return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") + } + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil + } } -func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -325,16 +408,20 @@ func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { if err != nil { return nil, err } - return t.NewClientConn(tconn) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { cfg := new(tls.Config) if t.TLSClientConfig != nil { - *cfg = *t.TLSClientConfig + *cfg = *cloneTLSConfig(t.TLSClientConfig) + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host } - cfg.NextProtos = []string{NextProtoTLS} // TODO: don't override if already in list - cfg.ServerName = host return cfg } @@ -374,15 +461,18 @@ func (t *Transport) disableKeepAlives() bool { return t.t1 != nil && t.t1.DisableKeepAlives } -func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - if VerboseLogs { - t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr()) - } - if _, err := c.Write(clientPreface); err != nil { - t.vlogf("client preface write error: %v", err) - return nil, err +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 } + return transportExpectContinueTimeout(t.t1) +} +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -392,7 +482,18 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { initialWindowSize: 65535, // spec default maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + cc.cond = sync.NewCond(&cc.mu) cc.flow.add(int32(initialWindowSize)) @@ -401,26 +502,27 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) - type connectionStater interface { - ConnectionState() tls.ConnectionState - } if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state } initialSettings := []Setting{ - Setting{ID: SettingEnablePush, Val: 0}, - Setting{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, } if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } + + cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) cc.inflow.add(transportDefaultConnFlow + initialWindowSize) @@ -429,33 +531,6 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return nil, cc.werr } - // Read the obligatory SETTINGS frame - f, err := cc.fr.ReadFrame() - if err != nil { - return nil, err - } - sf, ok := f.(*SettingsFrame) - if !ok { - return nil, fmt.Errorf("expected settings frame, got: %T", f) - } - cc.fr.WriteSettingsAck() - cc.bw.Flush() - - sf.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingInitialWindowSize: - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE? - t.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - go cc.readLoop() return cc, nil } @@ -463,7 +538,26 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { func (cc *ClientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() + + old := cc.goAway cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } } func (cc *ClientConn) CanTakeNewRequest() bool { @@ -473,9 +567,22 @@ func (cc *ClientConn) CanTakeNewRequest() bool { } func (cc *ClientConn) canTakeNewRequestLocked() bool { + if cc.singleUse && cc.nextStreamID > 1 { + return false + } return cc.goAway == nil && !cc.closed && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && - cc.nextStreamID < 2147483647 + cc.nextStreamID < math.MaxInt32 +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() } func (cc *ClientConn) closeIfIdle() { @@ -485,9 +592,13 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } cc.tconn.Close() } @@ -547,8 +658,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { } if len(keys) > 0 { sort.Strings(keys) - // TODO: could do better allocation-wise here, but trailers are rare, - // so being lazy for now. return strings.Join(keys, ","), nil } return "", nil @@ -565,46 +674,62 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := checkConnHeaders(req); err != nil { + return nil, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + trailers, err := commaSeparatedTrailers(req) if err != nil { return nil, err } hasTrailers := trailers != "" - var body io.Reader = req.Body - contentLen := req.ContentLength - if req.Body != nil && contentLen == 0 { - // Test to see if it's actually zero or just unset. - var buf [1]byte - n, rerr := io.ReadFull(body, buf[:]) - if rerr != nil && rerr != io.EOF { - contentLen = -1 - body = errorReader{rerr} - } else if n == 1 { - // Oh, guess there is data in this Body Reader after all. - // The ContentLength field just wasn't set. - // Stich the Body back together again, re-attaching our - // consumed byte. - contentLen = -1 - body = io.MultiReader(bytes.NewReader(buf[:]), body) - } else { - // Body is actually empty. - body = nil - } - } - cc.mu.Lock() + cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { cc.mu.Unlock() return nil, errClientConnUnusable } - cs := cc.newStream() - cs.req = req - hasBody := body != nil + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" && @@ -621,37 +746,49 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // We don't request gzip if the request is for a range, since // auto-decoding a portion of a gzipped document will just fail // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + requestedGzip = true } // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = requestTrace(req) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + cc.wmu.Lock() endStream := !hasBody && !hasTrailers werr := cc.writeHeaders(cs.ID, endStream, hdrs) cc.wmu.Unlock() + traceWroteHeaders(cs.trace) cc.mu.Unlock() if werr != nil { if hasBody { req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() } cc.forgetStreamID(cs.ID) // Don't bother sending a RST_STREAM (our write already failed; // no need to keep writing) + traceWroteRequest(cs.trace, werr) return nil, werr } var respHeaderTimer <-chan time.Time - var bodyCopyErrc chan error // result of body copy if hasBody { - bodyCopyErrc = make(chan error, 1) - go func() { - bodyCopyErrc <- cs.writeRequestBody(body, req.Body) - }() + bodyWriter.scheduleBodyWrite() } else { + traceWroteRequest(cs.trace, nil) if d := cc.responseHeaderTimeout(); d != 0 { timer := time.NewTimer(d) defer timer.Stop() @@ -660,45 +797,68 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { } readLoopResCh := cs.resc - requestCanceledCh := requestCancel(req) bodyWritten := false + ctx := reqContext(req) + + handleReadLoopResponse := func(re resAndError) (*http.Response, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + if re.err == errClientConnGotGoAway { + cc.mu.Lock() + if cs.startedWrite { + re.err = errClientConnGotGoAwayAfterSomeReqBody + } + cc.mu.Unlock() + } + cc.forgetStreamID(cs.ID) + return nil, re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, nil + } for { select { case re := <-readLoopResCh: - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - cc.forgetStreamID(cs.ID) - return nil, re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, nil + return handleReadLoopResponse(re) case <-respHeaderTimer: cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) } else { + bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } return nil, errTimeout - case <-requestCanceledCh: + case <-ctx.Done(): cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, ctx.Err() + case <-req.Cancel: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } return nil, errRequestCanceled @@ -707,7 +867,13 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // stream from the streams map; no need for // forgetStreamID. return nil, cs.resetErr - case err := <-bodyCopyErrc: + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } if err != nil { return nil, err } @@ -768,6 +934,7 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( defer cc.putFrameScratchBuffer(buf) defer func() { + traceWroteRequest(cs.trace, err) // TODO: write h12Compare test showing whether // Request.Body is closed by the Transport, // and in multiple cases: server replies <=299 and >299 @@ -811,10 +978,11 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( err = cc.fr.WriteData(cs.ID, sentEnd, data) if err == nil { // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or opt-out? - // Use some heuristic on the body type? Nagel-like timers? - // Based on 'n'? Only last chunk of this for loop, unless flow control - // tokens are low? For now, always: + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. err = cc.bw.Flush() } cc.wmu.Unlock() @@ -824,28 +992,33 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( } } - cc.wmu.Lock() - if !sentEnd { - var trls []byte - if hasTrailers { - cc.mu.Lock() - trls = cc.encodeTrailers(req) - cc.mu.Unlock() - } + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } - // Avoid forgetting to send an END_STREAM if the encoded - // trailers are 0 bytes. Both results produce and END_STREAM. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } + var trls []byte + if hasTrailers { + cc.mu.Lock() + defer cc.mu.Unlock() + trls = cc.encodeTrailers(req) + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) } if ferr := cc.bw.Flush(); ferr != nil && err == nil { err = ferr } - cc.wmu.Unlock() - return err } @@ -864,7 +1037,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) if cs.stopReqBody != nil { return 0, cs.stopReqBody } - if err := cs.checkReset(); err != nil { + if err := cs.checkResetOrDone(); err != nil { return 0, err } if a := cs.flow.available(); a > 0 { @@ -891,13 +1064,47 @@ type badStringError struct { func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } // requires cc.mu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) []byte { +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() host := req.Host if host == "" { host = req.URL.Host } + host, err := httplex.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httplex.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } // 8.1.2.3 Request Pseudo-Header Fields // The :path pseudo-header field includes the path and query parts of the @@ -907,8 +1114,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail cc.writeHeader(":authority", host) cc.writeHeader(":method", req.Method) if req.Method != "CONNECT" { - cc.writeHeader(":path", req.URL.RequestURI()) - cc.writeHeader(":scheme", "https") + cc.writeHeader(":path", path) + cc.writeHeader(":scheme", req.URL.Scheme) } if trailers != "" { cc.writeHeader("trailer", trailers) @@ -917,13 +1124,21 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail var didUA bool for k, vv := range req.Header { lowKey := strings.ToLower(k) - if lowKey == "host" || lowKey == "content-length" { + switch lowKey { + case "host", "content-length": + // Host is :authority, already sent. + // Content-Length is automatic, set below. continue - } - if lowKey == "user-agent" { + case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + case "user-agent": // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, // include the default (below). didUA = true if len(vv) < 1 { @@ -947,7 +1162,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if !didUA { cc.writeHeader("user-agent", defaultUserAgent) } - return cc.hbuf.Bytes() + return cc.hbuf.Bytes(), nil } // shouldSendReqContentLength reports whether the http2.Transport should send @@ -1025,25 +1240,22 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { defer cc.mu.Unlock() cs := cc.streams[id] if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } close(cs.done) + cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } return cs } // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { - cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID - - hdec *hpack.Decoder - - // Fields reset on each HEADERS: - nextRes *http.Response - sawRegHeader bool // saw non-pseudo header - reqMalformed error // non-nil once known to be malformed - lastHeaderEndsStream bool - headerListSize int64 // actually uint32, but easier math this way + cc *ClientConn + activeRes map[uint32]*clientStream // keyed by streamID + closeWhenIdle bool } // readLoop runs in its own goroutine and reads and dispatches frames. @@ -1052,7 +1264,6 @@ func (cc *ClientConn) readLoop() { cc: cc, activeRes: make(map[uint32]*clientStream), } - rl.hdec = hpack.NewDecoder(initialHeaderTableSize, rl.onNewHeaderField) defer rl.cleanup() cc.readerErr = rl.run() @@ -1063,20 +1274,51 @@ func (cc *ClientConn) readLoop() { } } +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + func (rl *clientConnReadLoop) cleanup() { cc := rl.cc defer cc.tconn.Close() defer cc.t.connPool().MarkDead(cc) defer close(cc.readerDone) + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + // Close any response bodies if the server closes prematurely. // TODO: also do this if we've written the headers but not // gotten a response yet. err := cc.readerErr - if err == io.EOF { + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { err = io.ErrUnexpectedEOF } - cc.mu.Lock() for _, cs := range rl.activeRes { cs.bufPipe.CloseWithError(err) } @@ -1094,32 +1336,43 @@ func (rl *clientConnReadLoop) cleanup() { func (rl *clientConnReadLoop) run() error { cc := rl.cc - closeWhenIdle := cc.t.disableKeepAlives() - gotReply := false // ever saw a reply + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false for { f, err := cc.fr.ReadFrame() if err != nil { - cc.vlogf("Transport readFrame error: (%T) %v", err, err) + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - // TODO: deal with stream errors from the framer. - return se + if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue } else if err != nil { return err } if VerboseLogs { cc.vlogf("http2: Transport received %s", summarizeFrame(f)) } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } maybeIdle := false // whether frame might transition us to idle switch f := f.(type) { - case *HeadersFrame: + case *MetaHeadersFrame: err = rl.processHeaders(f) maybeIdle = true gotReply = true - case *ContinuationFrame: - err = rl.processContinuation(f) - maybeIdle = true case *DataFrame: err = rl.processData(f) maybeIdle = true @@ -1141,100 +1394,121 @@ func (rl *clientConnReadLoop) run() error { cc.logf("Transport: unhandled response frame type %T", f) } if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } return err } - if closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { cc.closeIfIdle() } } } -func (rl *clientConnReadLoop) processHeaders(f *HeadersFrame) error { - rl.sawRegHeader = false - rl.reqMalformed = nil - rl.lastHeaderEndsStream = f.StreamEnded() - rl.headerListSize = 0 - rl.nextRes = &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: make(http.Header), - } - rl.hdec.SetEmitEnabled(true) - return rl.processHeaderBlockFragment(f.HeaderBlockFragment(), f.StreamID, f.HeadersEnded()) -} - -func (rl *clientConnReadLoop) processContinuation(f *ContinuationFrame) error { - return rl.processHeaderBlockFragment(f.HeaderBlockFragment(), f.StreamID, f.HeadersEnded()) -} - -func (rl *clientConnReadLoop) processHeaderBlockFragment(frag []byte, streamID uint32, finalFrag bool) error { +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { cc := rl.cc - streamEnded := rl.lastHeaderEndsStream - cs := cc.streamByID(streamID, streamEnded && finalFrag) + cs := cc.streamByID(f.StreamID, f.StreamEnded()) if cs == nil { // We'd get here if we canceled a request while the - // server was mid-way through replying with its - // headers. (The case of a CONTINUATION arriving - // without HEADERS would be rejected earlier by the - // Framer). So if this was just something we canceled, - // ignore it. + // server had its response still in flight. So if this + // was just something we canceled, ignore it. return nil } - if cs.pastHeaders { - rl.hdec.SetEmitFunc(func(f hpack.HeaderField) { rl.onNewTrailerField(cs, f) }) - } else { - rl.hdec.SetEmitFunc(rl.onNewHeaderField) - } - _, err := rl.hdec.Write(frag) - if err != nil { - return ConnectionError(ErrCodeCompression) - } - if finalFrag { - if err := rl.hdec.Close(); err != nil { - return ConnectionError(ErrCodeCompression) + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) } + cs.firstByte = true } - - if !finalFrag { - return nil - } - if !cs.pastHeaders { cs.pastHeaders = true } else { - // We're dealing with trailers. (and specifically the - // final frame of headers) - if cs.pastTrailers { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) - } - cs.pastTrailers = true - if !streamEnded { - // We expect that any header block fragment - // frame for trailers with END_HEADERS also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - rl.endStream(cs) - return nil + return rl.processTrailers(cs, f) } - if rl.reqMalformed != nil { - cs.resc <- resAndError{err: rl.reqMalformed} - rl.cc.writeStreamReset(cs.ID, ErrCodeProtocol, rl.reqMalformed) + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. return nil } + if res.Body != noBody { + rl.activeRes[cs.ID] = cs + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} - res := rl.nextRes +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } - if res.StatusCode == 100 { - // Just skip 100-continue response headers for now. - // TODO: golang.org/issue/13851 for doing it properly. + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } cs.pastHeaders = false // do it all again - return nil + return nil, nil } - if !streamEnded || cs.req.Method == "HEAD" { + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { res.ContentLength = -1 if clens := res.Header["Content-Length"]; len(clens) == 1 { if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { @@ -1249,27 +1523,51 @@ func (rl *clientConnReadLoop) processHeaderBlockFragment(frag []byte, streamID u } } - if streamEnded { + if streamEnded || isHead { res.Body = noBody - } else { - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(requestCancel(cs.req)) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - } + return res, nil } - cs.resTrailer = &res.Trailer - rl.activeRes[cs.ID] = cs - cs.resc <- resAndError{res: res} - rl.nextRes = nil // unused now; will be reset next HEADERS frame + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + setResponseUncompressed(res) + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) return nil } @@ -1320,8 +1618,12 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { cc.inflow.add(connAdd) } if err == nil { // No need to refresh if the stream is over or failed. - if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = transportDefaultStreamFlow - v + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) cs.inflow.add(streamAdd) } } @@ -1343,10 +1645,28 @@ var errClosedResponseBody = errors.New("http2: response body closed") func (b transportResponseBody) Close() error { cs := b.cs - if cs.bufPipe.Err() != io.EOF { - // TODO: write test for this - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() } + cs.bufPipe.BreakWithError(errClosedResponseBody) return nil } @@ -1354,6 +1674,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() if cs == nil { cc.mu.Lock() neverSent := cc.nextStreamID @@ -1367,27 +1688,59 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { // TODO: be stricter here? only silently ignore things which // we canceled, but not things which were closed normally // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } return nil } - if data := f.Data(); len(data) > 0 { - if cs.bufPipe.b == nil { - // Data frame after it's already closed? - cc.logf("http2: Transport received DATA frame for closed stream; closing connection") - return ConnectionError(ErrCodeProtocol) - } - + if f.Length > 0 { // Check connection-level flow control. cc.mu.Lock() - if cs.inflow.available() >= int32(len(data)) { - cs.inflow.take(int32(len(data))) + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) } else { cc.mu.Unlock() return ConnectionError(ErrCodeFlowControl) } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } cc.mu.Unlock() - if _, err := cs.bufPipe.Write(data); err != nil { - return err + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } } } @@ -1402,14 +1755,25 @@ var errInvalidTrailers = errors.New("http2: invalid trailers") func (rl *clientConnReadLoop) endStream(cs *clientStream) { // TODO: check that any declared content-length matches, like // server.go's (*stream).endStream method. - err := io.EOF - code := cs.copyTrailers - if rl.reqMalformed != nil { - err = rl.reqMalformed - code = nil + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers } cs.bufPipe.closeWithErrorAndCode(err, code) delete(rl.activeRes, cs.ID) + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + + select { + case cs.resc <- resAndError{err: err}: + default: + } } func (cs *clientStream) copyTrailers() { @@ -1437,18 +1801,39 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { cc := rl.cc cc.mu.Lock() defer cc.mu.Unlock() - return f.ForeachSetting(func(s Setting) error { + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { switch s.ID { case SettingMaxFrameSize: cc.maxFrameSize = s.Val case SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val case SettingInitialWindowSize: - // TODO: error if this is too large. + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } - // TODO: adjust flow control of still-open + // Adjust flow control of currently-open // frames by the difference of the old initial // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + cc.initialWindowSize = s.Val default: // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. @@ -1456,6 +1841,16 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { } return nil }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr } func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { @@ -1492,20 +1887,66 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { // which closes this, so there // isn't a race. default: - err := StreamError{cs.ID, f.ErrCode} + err := streamError(cs.ID, f.ErrCode) cs.resetErr = err close(cs.peerReset) cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkReset via clientStream.awaitFlowControl + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } delete(rl.activeRes, cs.ID) return nil } +// Ping sends a PING frame to the server and waits for the ack. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) ping(ctx contextContext) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } return nil } cc := rl.cc @@ -1529,8 +1970,10 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { } func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: do something with err? send it as a debug frame to the peer? - // But that's only in GOAWAY. Invent a new frame type? Is there one already? + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) cc.bw.Flush() @@ -1542,118 +1985,6 @@ var ( errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") ) -func (rl *clientConnReadLoop) checkHeaderField(f hpack.HeaderField) bool { - if rl.reqMalformed != nil { - return false - } - - const headerFieldOverhead = 32 // per spec - rl.headerListSize += int64(len(f.Name)) + int64(len(f.Value)) + headerFieldOverhead - if max := rl.cc.t.maxHeaderListSize(); max != 0 && rl.headerListSize > int64(max) { - rl.hdec.SetEmitEnabled(false) - rl.reqMalformed = errResponseHeaderListSize - return false - } - - if !validHeaderFieldValue(f.Value) { - rl.reqMalformed = errInvalidHeaderFieldValue - return false - } - - isPseudo := strings.HasPrefix(f.Name, ":") - if isPseudo { - if rl.sawRegHeader { - rl.reqMalformed = errors.New("http2: invalid pseudo header after regular header") - return false - } - } else { - if !validHeaderFieldName(f.Name) { - rl.reqMalformed = errInvalidHeaderFieldName - return false - } - rl.sawRegHeader = true - } - - return true -} - -// onNewHeaderField runs on the readLoop goroutine whenever a new -// hpack header field is decoded. -func (rl *clientConnReadLoop) onNewHeaderField(f hpack.HeaderField) { - cc := rl.cc - if VerboseLogs { - cc.logf("http2: Transport decoded %v", f) - } - - if !rl.checkHeaderField(f) { - return - } - - isPseudo := strings.HasPrefix(f.Name, ":") - if isPseudo { - switch f.Name { - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - rl.reqMalformed = errors.New("http2: invalid :status") - return - } - rl.nextRes.Status = f.Value + " " + http.StatusText(code) - rl.nextRes.StatusCode = code - default: - // "Endpoints MUST NOT generate pseudo-header - // fields other than those defined in this - // document." - rl.reqMalformed = fmt.Errorf("http2: unknown response pseudo header %q", f.Name) - } - return - } - - key := http.CanonicalHeaderKey(f.Name) - if key == "Trailer" { - t := rl.nextRes.Trailer - if t == nil { - t = make(http.Header) - rl.nextRes.Trailer = t - } - foreachHeaderElement(f.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - rl.nextRes.Header.Add(key, f.Value) - } -} - -func (rl *clientConnReadLoop) onNewTrailerField(cs *clientStream, f hpack.HeaderField) { - if VerboseLogs { - rl.cc.logf("http2: Transport decoded trailer %v", f) - } - if !rl.checkHeaderField(f) { - return - } - if strings.HasPrefix(f.Name, ":") { - // Pseudo-header fields MUST NOT appear in - // trailers. Endpoints MUST treat a request or - // response that contains undefined or invalid - // pseudo-header fields as malformed. - rl.reqMalformed = errPseudoTrailers - return - } - - key := http.CanonicalHeaderKey(f.Name) - - // The spec says one must predeclare their trailers but in practice - // popular users (which is to say the only user we found) do not so we - // violate the spec and accept all of them. - const acceptAllTrailers = true - if _, ok := (*cs.resTrailer)[key]; ok || acceptAllTrailers { - if cs.trailer == nil { - cs.trailer = make(http.Header) - } - cs.trailer[key] = append(cs.trailer[key], f.Value) - } -} - func (cc *ClientConn) logf(format string, args ...interface{}) { cc.t.logf(format, args...) } @@ -1691,13 +2022,18 @@ func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { // call gzip.NewReader on the first call to Read type gzipReader struct { body io.ReadCloser // underlying Response.Body - zr io.Reader // lazily-initialized gzip reader + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error } func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } if gz.zr == nil { gz.zr, err = gzip.NewReader(gz.body) if err != nil { + gz.zerr = err return 0, err } } @@ -1711,3 +2047,88 @@ func (gz *gzipReader) Close() error { type errorReader struct{ err error } func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httplex.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") +} diff --git a/fn/vendor/golang.org/x/net/http2/transport_test.go b/fn/vendor/golang.org/x/net/http2/transport_test.go index 868fd1fc7..15dfa0739 100644 --- a/fn/vendor/golang.org/x/net/http2/transport_test.go +++ b/fn/vendor/golang.org/x/net/http2/transport_test.go @@ -13,13 +13,14 @@ import ( "fmt" "io" "io/ioutil" - "log" "math/rand" "net" "net/http" "net/url" "os" "reflect" + "runtime" + "sort" "strconv" "strings" "sync" @@ -38,6 +39,13 @@ var ( var tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true} +type testContext struct{} + +func (testContext) Done() <-chan struct{} { return make(chan struct{}) } +func (testContext) Err() error { panic("should not be called") } +func (testContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } +func (testContext) Value(key interface{}) interface{} { return nil } + func TestTransportExternal(t *testing.T) { if !*extNet { t.Skip("skipping external network test") @@ -51,6 +59,62 @@ func TestTransportExternal(t *testing.T) { res.Write(os.Stdout) } +type fakeTLSConn struct { + net.Conn +} + +func (c *fakeTLSConn) ConnectionState() tls.ConnectionState { + return tls.ConnectionState{ + Version: tls.VersionTLS12, + CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + } +} + +func startH2cServer(t *testing.T) net.Listener { + h2Server := &Server{} + l := newLocalListener(t) + go func() { + conn, err := l.Accept() + if err != nil { + t.Error(err) + return + } + h2Server.ServeConn(&fakeTLSConn{conn}, &ServeConnOpts{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, %v, http: %v", r.URL.Path, r.TLS == nil) + })}) + }() + return l +} + +func TestTransportH2c(t *testing.T) { + l := startH2cServer(t) + defer l.Close() + req, err := http.NewRequest("GET", "http://"+l.Addr().String()+"/foobar", nil) + if err != nil { + t.Fatal(err) + } + tr := &Transport{ + AllowHTTP: true, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + return net.Dial(network, addr) + }, + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if res.ProtoMajor != 2 { + t.Fatal("proto not h2c") + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(body), "Hello, /foobar, http: true"; got != want { + t.Fatalf("response got %v, want %v", got, want) + } +} + func TestTransport(t *testing.T) { const body = "sup" st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { @@ -101,10 +165,12 @@ func TestTransport(t *testing.T) { } } -func TestTransportReusesConns(t *testing.T) { +func onSameConn(t *testing.T, modReq func(*http.Request)) bool { st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, r.RemoteAddr) - }, optOnlyServer) + }, optOnlyServer, func(c net.Conn, st http.ConnState) { + t.Logf("conn %v is now state %v", c.RemoteAddr(), st) + }) defer st.Close() tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() @@ -113,6 +179,7 @@ func TestTransportReusesConns(t *testing.T) { if err != nil { t.Fatal(err) } + modReq(req) res, err := tr.RoundTrip(req) if err != nil { t.Fatal(err) @@ -130,8 +197,24 @@ func TestTransportReusesConns(t *testing.T) { } first := get() second := get() - if first != second { - t.Errorf("first and second responses were on different connections: %q vs %q", first, second) + return first == second +} + +func TestTransportReusesConns(t *testing.T) { + if !onSameConn(t, func(*http.Request) {}) { + t.Errorf("first and second responses were on different connections") + } +} + +func TestTransportReusesConn_RequestClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Close = true }) { + t.Errorf("first and second responses were not on different connections") + } +} + +func TestTransportReusesConn_ConnClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Header.Set("Connection", "close") }) { + t.Errorf("first and second responses were not on different connections") } } @@ -309,28 +392,65 @@ func randString(n int) string { return string(b) } -var bodyTests = []struct { - body string - noContentLen bool -}{ - {body: "some message"}, - {body: "some message", noContentLen: true}, - {body: ""}, - {body: "", noContentLen: true}, - {body: strings.Repeat("a", 1<<20), noContentLen: true}, - {body: strings.Repeat("a", 1<<20)}, - {body: randString(16<<10 - 1)}, - {body: randString(16 << 10)}, - {body: randString(16<<10 + 1)}, - {body: randString(512<<10 - 1)}, - {body: randString(512 << 10)}, - {body: randString(512<<10 + 1)}, - {body: randString(1<<20 - 1)}, - {body: randString(1 << 20)}, - {body: randString(1<<20 + 2)}, +type panicReader struct{} + +func (panicReader) Read([]byte) (int, error) { panic("unexpected Read") } +func (panicReader) Close() error { panic("unexpected Close") } + +func TestActualContentLength(t *testing.T) { + tests := []struct { + req *http.Request + want int64 + }{ + // Verify we don't read from Body: + 0: { + req: &http.Request{Body: panicReader{}}, + want: -1, + }, + // nil Body means 0, regardless of ContentLength: + 1: { + req: &http.Request{Body: nil, ContentLength: 5}, + want: 0, + }, + // ContentLength is used if set. + 2: { + req: &http.Request{Body: panicReader{}, ContentLength: 5}, + want: 5, + }, + // http.NoBody means 0, not -1. + 3: { + req: &http.Request{Body: go18httpNoBody()}, + want: 0, + }, + } + for i, tt := range tests { + got := actualContentLength(tt.req) + if got != tt.want { + t.Errorf("test[%d]: got %d; want %d", i, got, tt.want) + } + } } func TestTransportBody(t *testing.T) { + bodyTests := []struct { + body string + noContentLen bool + }{ + {body: "some message"}, + {body: "some message", noContentLen: true}, + {body: strings.Repeat("a", 1<<20), noContentLen: true}, + {body: strings.Repeat("a", 1<<20)}, + {body: randString(16<<10 - 1)}, + {body: randString(16 << 10)}, + {body: randString(16<<10 + 1)}, + {body: randString(512<<10 - 1)}, + {body: randString(512 << 10)}, + {body: randString(512<<10 + 1)}, + {body: randString(1<<20 - 1)}, + {body: randString(1 << 20)}, + {body: randString(1<<20 + 2)}, + } + type reqInfo struct { req *http.Request slurp []byte @@ -370,7 +490,7 @@ func TestTransportBody(t *testing.T) { defer res.Body.Close() ri := <-gotc if ri.err != nil { - t.Errorf("%#d: read error: %v", i, ri.err) + t.Errorf("#%d: read error: %v", i, ri.err) continue } if got := string(ri.slurp); got != tt.body { @@ -445,7 +565,7 @@ func TestConfigureTransport(t *testing.T) { if err != nil { t.Fatal(err) } - if got := fmt.Sprintf("%#v", *t1); !strings.Contains(got, `"h2"`) { + if got := fmt.Sprintf("%#v", t1); !strings.Contains(got, `"h2"`) { // Laziness, to avoid buildtags. t.Errorf("stringification of HTTP/1 transport didn't contain \"h2\": %v", got) } @@ -587,6 +707,19 @@ func (ct *clientTester) greet() { } } +func (ct *clientTester) readNonSettingsFrame() (Frame, error) { + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return nil, err + } + if _, ok := f.(*SettingsFrame); ok { + continue + } + return f, nil + } +} + func (ct *clientTester) cleanup() { ct.tr.CloseIdleConnections() } @@ -621,6 +754,28 @@ func (ct *clientTester) start(which string, errc chan<- error, fn func() error) }() } +func (ct *clientTester) readFrame() (Frame, error) { + return readFrameTimeout(ct.fr, 2*time.Second) +} + +func (ct *clientTester) firstHeaders() (*HeadersFrame, error) { + for { + f, err := ct.readFrame() + if err != nil { + return nil, fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + hf, ok := f.(*HeadersFrame) + if !ok { + return nil, fmt.Errorf("Got %T; want HeadersFrame", f) + } + return hf, nil + } +} + type countingReader struct { n *int64 } @@ -638,8 +793,12 @@ func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyA func testTransportReqBodyAfterResponse(t *testing.T, status int) { const bodySize = 10 << 20 + clientDone := make(chan struct{}) ct := newClientTester(t) ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + var n int64 // atomic req, err := http.NewRequest("PUT", "https://dummy.tld/", io.LimitReader(countingReader{&n}, bodySize)) if err != nil { @@ -680,7 +839,15 @@ func testTransportReqBodyAfterResponse(t *testing.T, status int) { for { f, err := ct.fr.ReadFrame() if err != nil { - return err + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } } //println(fmt.Sprintf("server got frame: %v", f)) switch f := f.(type) { @@ -692,18 +859,18 @@ func testTransportReqBodyAfterResponse(t *testing.T, status int) { if f.StreamEnded() { return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f) } - time.Sleep(50 * time.Millisecond) // let client send body - enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) case *DataFrame: dataLen := len(f.Data()) - dataRecv += int64(dataLen) if dataLen > 0 { + if dataRecv == 0 { + enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { return err } @@ -711,19 +878,19 @@ func testTransportReqBodyAfterResponse(t *testing.T, status int) { return err } } + dataRecv += int64(dataLen) + if !closed && ((status != 200 && dataRecv > 0) || (status == 200 && dataRecv == bodySize)) { closed = true if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil { return err } - return nil } default: return fmt.Errorf("Unexpected client frame %v", f) } } - return nil } ct.run() } @@ -745,12 +912,12 @@ func TestTransportFullDuplex(t *testing.T) { pr, pw := io.Pipe() req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr)) if err != nil { - log.Fatal(err) + t.Fatal(err) } req.ContentLength = -1 res, err := c.Do(req) if err != nil { - log.Fatal(err) + t.Fatal(err) } defer res.Body.Close() if res.StatusCode != 200 { @@ -951,41 +1118,38 @@ func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerTy if err != nil { return err } + endStream := false + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } switch f := f.(type) { case *WindowUpdateFrame, *SettingsFrame: case *DataFrame: - // ignore for now. - case *HeadersFrame: - endStream := false - send := func(mode headerType) { - hbf := buf.Bytes() - switch mode { - case oneHeader: - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: endStream, - BlockFragment: hbf, - }) - case splitHeader: - if len(hbf) < 2 { - panic("too small") - } - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: false, - EndStream: endStream, - BlockFragment: hbf[:1], - }) - ct.fr.WriteContinuation(f.StreamID, true, hbf[1:]) - default: - panic("bogus mode") - } - } - if expect100Continue != noHeader { - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) - send(expect100Continue) + if !f.StreamEnded() { + // No need to send flow control tokens. The test request body is tiny. + continue } // Response headers (1+ frames; 1 or 2 in this test, but never 0) { @@ -1009,7 +1173,15 @@ func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerTy enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"}) send(trailers) } - return nil + if endStream { + return nil + } + case *HeadersFrame: + if expect100Continue != noHeader { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) + send(expect100Continue) + } } } } @@ -1087,7 +1259,7 @@ func TestTransportInvalidTrailer_Pseudo2(t *testing.T) { testTransportInvalidTrailer_Pseudo(t, splitHeader) } func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) { - testInvalidTrailer(t, trailers, errPseudoTrailers, func(enc *hpack.Encoder) { + testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) { enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"}) enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) }) @@ -1100,19 +1272,19 @@ func TestTransportInvalidTrailer_Capital2(t *testing.T) { testTransportInvalidTrailer_Capital(t, splitHeader) } func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) { - testInvalidTrailer(t, trailers, errInvalidHeaderFieldName, func(enc *hpack.Encoder) { + testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) { enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"}) }) } func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) { - testInvalidTrailer(t, oneHeader, errInvalidHeaderFieldName, func(enc *hpack.Encoder) { + testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) { enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"}) }) } func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) { - testInvalidTrailer(t, oneHeader, errInvalidHeaderFieldValue, func(enc *hpack.Encoder) { - enc.WriteField(hpack.HeaderField{Name: "", Value: "has\nnewline"}) + testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"}) }) } @@ -1129,8 +1301,9 @@ func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeT return fmt.Errorf("status code = %v; want 200", res.StatusCode) } slurp, err := ioutil.ReadAll(res.Body) - if err != wantErr { - return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, wantErr) + se, ok := err.(StreamError) + if !ok || se.Cause != wantErr { + return fmt.Errorf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", slurp, err, wantErr, wantErr) } if len(slurp) > 0 { return fmt.Errorf("body = %q; want nothing", slurp) @@ -1514,7 +1687,6 @@ func testTransportResponseHeaderTimeout(t *testing.T, body bool) { } } } - return nil } ct.run() } @@ -1549,3 +1721,1321 @@ func TestTransportDisableCompression(t *testing.T) { } defer res.Body.Close() } + +// RFC 7540 section 8.1.2.2 +func TestTransportRejectsConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + tests := []struct { + key string + value []string + want string + }{ + { + key: "Upgrade", + value: []string{"anything"}, + want: "ERROR: http2: invalid Upgrade request header: [\"anything\"]", + }, + { + key: "Connection", + value: []string{"foo"}, + want: "ERROR: http2: invalid Connection request header: [\"foo\"]", + }, + { + key: "Connection", + value: []string{"close"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Connection", + value: []string{"close", "something-else"}, + want: "ERROR: http2: invalid Connection request header: [\"close\" \"something-else\"]", + }, + { + key: "Connection", + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Proxy-Connection", // just deleted and ignored + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{""}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"foo"}, + want: "ERROR: http2: invalid Transfer-Encoding request header: [\"foo\"]", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked", "other"}, + want: "ERROR: http2: invalid Transfer-Encoding request header: [\"chunked\" \"other\"]", + }, + { + key: "Content-Length", + value: []string{"123"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Keep-Alive", + value: []string{"doop"}, + want: "Accept-Encoding,User-Agent", + }, + } + + for _, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header[tt.key] = tt.value + res, err := tr.RoundTrip(req) + var got string + if err != nil { + got = fmt.Sprintf("ERROR: %v", err) + } else { + got = res.Header.Get("Got-Header") + res.Body.Close() + } + if got != tt.want { + t.Errorf("For key %q, value %q, got = %q; want %q", tt.key, tt.value, got, tt.want) + } + } +} + +// golang.org/issue/14048 +func TestTransportFailsOnInvalidHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tests := [...]struct { + h http.Header + wantErr string + }{ + 0: { + h: http.Header{"with space": {"foo"}}, + wantErr: `invalid HTTP header name "with space"`, + }, + 1: { + h: http.Header{"name": {"Брэд"}}, + wantErr: "", // okay + }, + 2: { + h: http.Header{"имя": {"Brad"}}, + wantErr: `invalid HTTP header name "имя"`, + }, + 3: { + h: http.Header{"foo": {"foo\x01bar"}}, + wantErr: `invalid HTTP header value "foo\x01bar" for header "foo"`, + }, + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header = tt.h + res, err := tr.RoundTrip(req) + var bad bool + if tt.wantErr == "" { + if err != nil { + bad = true + t.Errorf("case %d: error = %v; want no error", i, err) + } + } else { + if !strings.Contains(fmt.Sprint(err), tt.wantErr) { + bad = true + t.Errorf("case %d: error = %v; want error %q", i, err, tt.wantErr) + } + } + if err == nil { + if bad { + t.Logf("case %d: server got headers %q", i, res.Header.Get("Got-Header")) + } + res.Body.Close() + } + } +} + +// Tests that gzipReader doesn't crash on a second Read call following +// the first Read call's gzip.NewReader returning an error. +func TestGzipReader_DoubleReadCrash(t *testing.T) { + gz := &gzipReader{ + body: ioutil.NopCloser(strings.NewReader("0123456789")), + } + var buf [1]byte + n, err1 := gz.Read(buf[:]) + if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") { + t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1) + } + n, err2 := gz.Read(buf[:]) + if n != 0 || err2 != err1 { + t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1) + } +} + +func TestTransportNewTLSConfig(t *testing.T) { + tests := [...]struct { + conf *tls.Config + host string + want *tls.Config + }{ + // Normal case. + 0: { + conf: nil, + host: "foo.com", + want: &tls.Config{ + ServerName: "foo.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // User-provided name (bar.com) takes precedence: + 1: { + conf: &tls.Config{ + ServerName: "bar.com", + }, + host: "foo.com", + want: &tls.Config{ + ServerName: "bar.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // NextProto is prepended: + 2: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar"}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{NextProtoTLS, "foo", "bar"}, + }, + }, + + // NextProto is not duplicated: + 3: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + }, + } + for i, tt := range tests { + // Ignore the session ticket keys part, which ends up populating + // unexported fields in the Config: + if tt.conf != nil { + tt.conf.SessionTicketsDisabled = true + } + + tr := &Transport{TLSClientConfig: tt.conf} + got := tr.newTLSConfig(tt.host) + + got.SessionTicketsDisabled = false + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. got %#v; want %#v", i, got, tt.want) + } + } +} + +// The Google GFE responds to HEAD requests with a HEADERS frame +// without END_STREAM, followed by a 0-length DATA frame with +// END_STREAM. Make sure we don't get confused by that. (We did.) +func TestTransportReadHeadResponse(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != 123 { + return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, // as the GFE does + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, nil) + + <-clientDone + return nil + } + } + ct.run() +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (int, error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +// golang.org/issue/15425: test that a handler closing the request +// body doesn't terminate the stream to the peer. (It just stops +// readability from the handler's side, and eventually the client +// runs out of flow control tokens) +func TestTransportHandlerBodyClose(t *testing.T) { + const bodySize = 10 << 20 + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + r.Body.Close() + io.Copy(w, io.LimitReader(neverEnding('A'), bodySize)) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + g0 := runtime.NumGoroutine() + + const numReq = 10 + for i := 0; i < numReq; i++ { + req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + if n != bodySize || err != nil { + t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize) + } + } + tr.CloseIdleConnections() + + gd := runtime.NumGoroutine() - g0 + if gd > numReq/2 { + t.Errorf("appeared to leak goroutines") + } + +} + +// https://golang.org/issue/15930 +func TestTransportFlowControl(t *testing.T) { + const bufLen = 64 << 10 + var total int64 = 100 << 20 // 100MB + if testing.Short() { + total = 10 << 20 + } + + var wrote int64 // updated atomically + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + b := make([]byte, bufLen) + for wrote < total { + n, err := w.Write(b) + atomic.AddInt64(&wrote, int64(n)) + if err != nil { + t.Errorf("ResponseWriter.Write error: %v", err) + break + } + w.(http.Flusher).Flush() + } + }, optOnlyServer) + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal("NewRequest error:", err) + } + resp, err := tr.RoundTrip(req) + if err != nil { + t.Fatal("RoundTrip error:", err) + } + defer resp.Body.Close() + + var read int64 + b := make([]byte, bufLen) + for { + n, err := resp.Body.Read(b) + if err == io.EOF { + break + } + if err != nil { + t.Fatal("Read error:", err) + } + read += int64(n) + + const max = transportDefaultStreamFlow + if w := atomic.LoadInt64(&wrote); -max > read-w || read-w > max { + t.Fatalf("Too much data inflight: server wrote %v bytes but client only received %v", w, read) + } + + // Let the server get ahead of the client. + time.Sleep(1 * time.Millisecond) + } +} + +// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make +// the Transport remember it and return it back to users (via +// RoundTrip or request body reads) if needed (e.g. if the server +// proceeds to close the TCP connection before the client gets its +// response) +func TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) { + testTransportUsesGoAwayDebugError(t, false) +} + +func TestTransportUsesGoAwayDebugError_Body(t *testing.T) { + testTransportUsesGoAwayDebugError(t, true) +} + +func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const goAwayErrCode = ErrCodeHTTP11Required // arbitrary + const goAwayDebugData = "some debug data" + + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if failMidBody { + if err != nil { + return fmt.Errorf("unexpected client RoundTrip error: %v", err) + } + _, err = io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + } + want := GoAwayError{ + LastStreamID: 5, + ErrCode: goAwayErrCode, + DebugData: goAwayDebugData, + } + if !reflect.DeepEqual(err, want) { + t.Errorf("RoundTrip error = %T: %#v, want %T (%#v)", err, err, want, want) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + if failMidBody { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + // Write two GOAWAY frames, to test that the Transport takes + // the interesting parts of both. + ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData)) + ct.fr.WriteGoAway(5, goAwayErrCode, nil) + ct.sc.(*net.TCPConn).CloseWrite() + <-clientDone + return nil + } + } + ct.run() +} + +func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) { + ct := newClientTester(t) + + clientClosed := make(chan struct{}) + serverWroteFirstByte := make(chan struct{}) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + <-serverWroteFirstByte + + if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 { + return fmt.Errorf("body read = %v, %v; want 1, nil", n, err) + } + res.Body.Close() // leaving 4999 bytes unread + close(clientClosed) + + return nil + } + ct.server = func() error { + ct.greet() + + var hf *HeadersFrame + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + var ok bool + hf, ok = f.(*HeadersFrame) + if !ok { + return fmt.Errorf("Got %T; want HeadersFrame", f) + } + break + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + // Two cases: + // - Send one DATA frame with 5000 bytes. + // - Send two DATA frames with 1 and 4999 bytes each. + // + // In both cases, the client should consume one byte of data, + // refund that byte, then refund the following 4999 bytes. + // + // In the second case, the server waits for the client connection to + // close before seconding the second DATA frame. This tests the case + // where the client receives a DATA frame after it has reset the stream. + if oneDataFrame { + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 5000)) + close(serverWroteFirstByte) + <-clientClosed + } else { + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 1)) + close(serverWroteFirstByte) + <-clientClosed + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 4999)) + } + + waitingFor := "RSTStreamFrame" + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for %s: %v", waitingFor, err) + } + if _, ok := f.(*SettingsFrame); ok { + continue + } + switch waitingFor { + case "RSTStreamFrame": + if rf, ok := f.(*RSTStreamFrame); !ok || rf.ErrCode != ErrCodeCancel { + return fmt.Errorf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f)) + } + waitingFor = "WindowUpdateFrame" + case "WindowUpdateFrame": + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != 4999 { + return fmt.Errorf("Expected WindowUpdateFrame for 4999 bytes; got %v", summarizeFrame(f)) + } + return nil + } + } + } + ct.run() +} + +// See golang.org/issue/16481 +func TestTransportReturnsUnusedFlowControlSingleWrite(t *testing.T) { + testTransportReturnsUnusedFlowControl(t, true) +} + +// See golang.org/issue/20469 +func TestTransportReturnsUnusedFlowControlMultipleWrites(t *testing.T) { + testTransportReturnsUnusedFlowControl(t, false) +} + +// Issue 16612: adjust flow control on open streams when transport +// receives SETTINGS with INITIAL_WINDOW_SIZE from server. +func TestTransportAdjustsFlowControl(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const bodySize = 1 << 20 + + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + + req, _ := http.NewRequest("POST", "https://dummy.tld/", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + res.Body.Close() + return nil + } + ct.server = func() error { + _, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface))) + if err != nil { + return fmt.Errorf("reading client preface: %v", err) + } + + var gotBytes int64 + var sentSettings bool + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + return nil + default: + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + } + switch f := f.(type) { + case *DataFrame: + gotBytes += int64(len(f.Data())) + // After we've got half the client's + // initial flow control window's worth + // of request body data, give it just + // enough flow control to finish. + if gotBytes >= initialWindowSize/2 && !sentSettings { + sentSettings = true + + ct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize}) + ct.fr.WriteWindowUpdate(0, bodySize) + ct.fr.WriteSettingsAck() + } + + if f.StreamEnded() { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + } + } + } + ct.run() +} + +// See golang.org/issue/16556 +func TestTransportReturnsDataPaddingFlowControl(t *testing.T) { + ct := newClientTester(t) + + unblockClient := make(chan bool, 1) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + defer res.Body.Close() + <-unblockClient + return nil + } + ct.server = func() error { + ct.greet() + + var hf *HeadersFrame + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + var ok bool + hf, ok = f.(*HeadersFrame) + if !ok { + return fmt.Errorf("Got %T; want HeadersFrame", f) + } + break + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + pad := make([]byte, 5) + ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream + + f, err := ct.readNonSettingsFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for first WindowUpdateFrame: %v", err) + } + wantBack := uint32(len(pad)) + 1 // one byte for the length of the padding + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID != 0 { + return fmt.Errorf("Expected conn WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) + } + + f, err = ct.readNonSettingsFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for second WindowUpdateFrame: %v", err) + } + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID == 0 { + return fmt.Errorf("Expected stream WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) + } + unblockClient <- true + return nil + } + ct.run() +} + +// golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a +// StreamError as a result of the response HEADERS +func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) { + ct := newClientTester(t) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err == nil { + res.Body.Close() + return errors.New("unexpected successful GET") + } + want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")} + if !reflect.DeepEqual(want, err) { + t.Errorf("RoundTrip error = %#v; want %#v", err, want) + } + return nil + } + ct.server = func() error { + ct.greet() + + hf, err := ct.firstHeaders() + if err != nil { + return err + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: " content-type", Value: "bogus"}) // bogus spaces + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + for { + fr, err := ct.readFrame() + if err != nil { + return fmt.Errorf("error waiting for RST_STREAM from client: %v", err) + } + if _, ok := fr.(*SettingsFrame); ok { + continue + } + if rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol { + t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr)) + } + break + } + + return nil + } + ct.run() +} + +// byteAndEOFReader returns is in an io.Reader which reads one byte +// (the underlying byte) and io.EOF at once in its Read call. +type byteAndEOFReader byte + +func (b byteAndEOFReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + panic("unexpected useless call") + } + p[0] = byte(b) + return 1, io.EOF +} + +// Issue 16788: the Transport had a regression where it started +// sending a spurious DATA frame with a duplicate END_STREAM bit after +// the request body writer goroutine had already read an EOF from the +// Request.Body and included the END_STREAM on a data-carrying DATA +// frame. +// +// Notably, to trigger this, the requests need to use a Request.Body +// which returns (non-0, io.EOF) and also needs to set the ContentLength +// explicitly. +func TestTransportBodyDoubleEndStream(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // Nothing. + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i := 0; i < 2; i++ { + req, _ := http.NewRequest("POST", st.ts.URL, byteAndEOFReader('a')) + req.ContentLength = 1 + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatalf("failure on req %d: %v", i+1, err) + } + defer res.Body.Close() + } +} + +// golang.org/issue/16847, golang.org/issue/19103 +func TestTransportRequestPathPseudo(t *testing.T) { + type result struct { + path string + err string + } + tests := []struct { + req *http.Request + want result + }{ + 0: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Host: "foo.com", + Path: "/foo", + }, + }, + want: result{path: "/foo"}, + }, + // In Go 1.7, we accepted paths of "//foo". + // In Go 1.8, we rejected it (issue 16847). + // In Go 1.9, we accepted it again (issue 19103). + 1: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Host: "foo.com", + Path: "//foo", + }, + }, + want: result{path: "//foo"}, + }, + + // Opaque with //$Matching_Hostname/path + 2: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "https", + Opaque: "//foo.com/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque with some other Request.Host instead: + 3: { + req: &http.Request{ + Method: "GET", + Host: "bar.com", + URL: &url.URL{ + Scheme: "https", + Opaque: "//bar.com/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque without the leading "//": + 4: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Opaque: "/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque we can't handle: + 5: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "https", + Opaque: "//unknown_host/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{err: `invalid request :path "https://unknown_host/path" from URL.Opaque = "//unknown_host/path"`}, + }, + + // A CONNECT request: + 6: { + req: &http.Request{ + Method: "CONNECT", + URL: &url.URL{ + Host: "foo.com", + }, + }, + want: result{}, + }, + } + for i, tt := range tests { + cc := &ClientConn{} + cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.mu.Lock() + hdrs, err := cc.encodeHeaders(tt.req, false, "", -1) + cc.mu.Unlock() + var got result + hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) { + if f.Name == ":path" { + got.path = f.Value + } + }) + if err != nil { + got.err = err.Error() + } else if len(hdrs) > 0 { + if _, err := hpackDec.Write(hdrs); err != nil { + t.Errorf("%d. bogus hpack: %v", i, err) + continue + } + } + if got != tt.want { + t.Errorf("%d. got %+v; want %+v", i, got, tt.want) + } + + } + +} + +// golang.org/issue/17071 -- don't sniff the first byte of the request body +// before we've determined that the ClientConn is usable. +func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) { + const body = "foo" + req, _ := http.NewRequest("POST", "http://foo.com/", ioutil.NopCloser(strings.NewReader(body))) + cc := &ClientConn{ + closed: true, + } + _, err := cc.RoundTrip(req) + if err != errClientConnUnusable { + t.Fatalf("RoundTrip = %v; want errClientConnUnusable", err) + } + slurp, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ReadAll = %v", err) + } + if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } +} + +func TestClientConnPing(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer) + defer st.Close() + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + cc, err := tr.dialClientConn(st.ts.Listener.Addr().String(), false) + if err != nil { + t.Fatal(err) + } + if err = cc.Ping(testContext{}); err != nil { + t.Fatal(err) + } +} + +// Issue 16974: if the server sent a DATA frame after the user +// canceled the Transport's Request, the Transport previously wrote to a +// closed pipe, got an error, and ended up closing the whole TCP +// connection. +func TestTransportCancelDataResponseRace(t *testing.T) { + cancel := make(chan struct{}) + clientGotError := make(chan bool, 1) + + const msg = "Hello." + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/hello") { + time.Sleep(50 * time.Millisecond) + io.WriteString(w, msg) + return + } + for i := 0; i < 50; i++ { + io.WriteString(w, "Some data.") + w.(http.Flusher).Flush() + if i == 2 { + close(cancel) + <-clientGotError + } + time.Sleep(10 * time.Millisecond) + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Cancel = cancel + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(ioutil.Discard, res.Body); err == nil { + t.Fatal("unexpected success") + } + clientGotError <- true + + res, err = c.Get(st.ts.URL + "/hello") + if err != nil { + t.Fatal(err) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != msg { + t.Errorf("Got = %q; want %q", slurp, msg) + } +} + +func TestTransportRetryAfterGOAWAY(t *testing.T) { + var dialer struct { + sync.Mutex + count int + } + ct1 := make(chan *clientTester) + ct2 := make(chan *clientTester) + + ln := newLocalListener(t) + defer ln.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + } + tr.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialer.Lock() + defer dialer.Unlock() + dialer.count++ + if dialer.count == 3 { + return nil, errors.New("unexpected number of dials") + } + cc, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + return nil, fmt.Errorf("dial error: %v", err) + } + sc, err := ln.Accept() + if err != nil { + return nil, fmt.Errorf("accept error: %v", err) + } + ct := &clientTester{ + t: t, + tr: tr, + cc: cc, + sc: sc, + fr: NewFramer(sc, sc), + } + switch dialer.count { + case 1: + ct1 <- ct + case 2: + ct2 <- ct + } + return cc, nil + } + + errs := make(chan error, 3) + done := make(chan struct{}) + defer close(done) + + // Client. + go func() { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := tr.RoundTrip(req) + if res != nil { + res.Body.Close() + if got := res.Header.Get("Foo"); got != "bar" { + err = fmt.Errorf("foo header = %q; want bar", got) + } + } + if err != nil { + err = fmt.Errorf("RoundTrip: %v", err) + } + errs <- err + }() + + connToClose := make(chan io.Closer, 2) + + // Server for the first request. + go func() { + var ct *clientTester + select { + case ct = <-ct1: + case <-done: + return + } + + connToClose <- ct.cc + ct.greet() + hf, err := ct.firstHeaders() + if err != nil { + errs <- fmt.Errorf("server1 failed reading HEADERS: %v", err) + return + } + t.Logf("server1 got %v", hf) + if err := ct.fr.WriteGoAway(0 /*max id*/, ErrCodeNo, nil); err != nil { + errs <- fmt.Errorf("server1 failed writing GOAWAY: %v", err) + return + } + errs <- nil + }() + + // Server for the second request. + go func() { + var ct *clientTester + select { + case ct = <-ct2: + case <-done: + return + } + + connToClose <- ct.cc + ct.greet() + hf, err := ct.firstHeaders() + if err != nil { + errs <- fmt.Errorf("server2 failed reading HEADERS: %v", err) + return + } + t.Logf("server2 got %v", hf) + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + err = ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + if err != nil { + errs <- fmt.Errorf("server2 failed writing response HEADERS: %v", err) + } else { + errs <- nil + } + }() + + for k := 0; k < 3; k++ { + select { + case err := <-errs: + if err != nil { + t.Error(err) + } + case <-time.After(1 * time.Second): + t.Errorf("timed out") + } + } + + for { + select { + case c := <-connToClose: + c.Close() + default: + return + } + } +} + +func TestAuthorityAddr(t *testing.T) { + tests := []struct { + scheme, authority string + want string + }{ + {"http", "foo.com", "foo.com:80"}, + {"https", "foo.com", "foo.com:443"}, + {"https", "foo.com:1234", "foo.com:1234"}, + {"https", "1.2.3.4:1234", "1.2.3.4:1234"}, + {"https", "1.2.3.4", "1.2.3.4:443"}, + {"https", "[::1]:1234", "[::1]:1234"}, + {"https", "[::1]", "[::1]:443"}, + } + for _, tt := range tests { + got := authorityAddr(tt.scheme, tt.authority) + if got != tt.want { + t.Errorf("authorityAddr(%q, %q) = %q; want %q", tt.scheme, tt.authority, got, tt.want) + } + } +} + +// Issue 20448: stop allocating for DATA frames' payload after +// Response.Body.Close is called. +func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) { + megabyteZero := make([]byte, 1<<20) + + writeErr := make(chan error, 1) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + var sum int64 + for i := 0; i < 100; i++ { + n, err := w.Write(megabyteZero) + sum += int64(n) + if err != nil { + writeErr <- err + return + } + } + t.Logf("wrote all %d bytes", sum) + writeErr <- nil + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + var buf [1]byte + if _, err := res.Body.Read(buf[:]); err != nil { + t.Error(err) + } + if err := res.Body.Close(); err != nil { + t.Error(err) + } + + trb, ok := res.Body.(transportResponseBody) + if !ok { + t.Fatalf("res.Body = %T; want transportResponseBody", res.Body) + } + if trb.cs.bufPipe.b != nil { + t.Errorf("response body pipe is still open") + } + + gotErr := <-writeErr + if gotErr == nil { + t.Errorf("Handler unexpectedly managed to write its entire response without getting an error") + } else if gotErr != errStreamClosed { + t.Errorf("Handler Write err = %v; want errStreamClosed", gotErr) + } +} + +// Issue 18891: make sure Request.Body == NoBody means no DATA frame +// is ever sent, even if empty. +func TestTransportNoBodyMeansNoDATA(t *testing.T) { + ct := newClientTester(t) + + unblockClient := make(chan bool) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", go18httpNoBody()) + ct.tr.RoundTrip(req) + <-unblockClient + return nil + } + ct.server = func() error { + defer close(unblockClient) + defer ct.cc.(*net.TCPConn).Close() + ct.greet() + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f := f.(type) { + default: + return fmt.Errorf("Got %T; want HeadersFrame", f) + case *WindowUpdateFrame, *SettingsFrame: + continue + case *HeadersFrame: + if !f.StreamEnded() { + return fmt.Errorf("got headers frame without END_STREAM") + } + return nil + } + } + } + ct.run() +} diff --git a/fn/vendor/golang.org/x/net/http2/write.go b/fn/vendor/golang.org/x/net/http2/write.go index 020d0fce9..6b0dfae31 100644 --- a/fn/vendor/golang.org/x/net/http2/write.go +++ b/fn/vendor/golang.org/x/net/http2/write.go @@ -9,15 +9,21 @@ import ( "fmt" "log" "net/http" - "sort" + "net/url" "time" "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" ) // writeFramer is implemented by any type that is used to write frames. type writeFramer interface { writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool } // writeContext is the interface needed by the various frame writer @@ -39,9 +45,10 @@ type writeContext interface { HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) } -// endsStream reports whether the given frame writer w will locally -// close the stream. -func endsStream(w writeFramer) bool { +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { switch v := w.(type) { case *writeData: return v.endStream @@ -51,7 +58,7 @@ func endsStream(w writeFramer) bool { // This can only happen if the caller reuses w after it's // been intentionally nil'ed out to prevent use. Keep this // here to catch future refactoring breaking it. - panic("endsStream called on nil writeFramer") + panic("writeEndsStream called on nil writeFramer") } return false } @@ -62,8 +69,16 @@ func (flushFrameWriter) writeFrame(ctx writeContext) error { return ctx.Flush() } +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + type writeSettings []Setting +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + func (s writeSettings) writeFrame(ctx writeContext) error { return ctx.Framer().WriteSettings([]Setting(s)...) } @@ -83,6 +98,8 @@ func (p *writeGoAway) writeFrame(ctx writeContext) error { return err } +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + type writeData struct { streamID uint32 p []byte @@ -97,6 +114,10 @@ func (w *writeData) writeFrame(ctx writeContext) error { return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) } +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + // handlerPanicRST is the message sent from handler goroutines when // the handler panics. type handlerPanicRST struct { @@ -107,22 +128,57 @@ func (hp handlerPanicRST) writeFrame(ctx writeContext) error { return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) } +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + func (se StreamError) writeFrame(ctx writeContext) error { return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) } +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { return ctx.Framer().WritePing(true, w.pf.Data) } +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + type writeSettingsAck struct{} func (writeSettingsAck) writeFrame(ctx writeContext) error { return ctx.Framer().WriteSettingsAck() } +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames // for HTTP response headers or trailers from a server handler. type writeResHeaders struct { @@ -144,6 +200,17 @@ func encKV(enc *hpack.Encoder, k, v string) { enc.WriteField(hpack.HeaderField{Name: k, Value: v}) } +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // uppper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + func (w *writeResHeaders) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() @@ -169,39 +236,69 @@ func (w *writeResHeaders) writeFrame(ctx writeContext) error { panic("unexpected empty hpack") } - // For now we're lazy and just pick the minimum MAX_FRAME_SIZE - // that all peers must support (16KB). Later we could care - // more and send larger frames if the peer advertised it, but - // there's little point. Most headers are small anyway (so we - // generally won't have CONTINUATION frames), and extra frames - // only waste 9 bytes anyway. - const maxFrameSize = 16384 + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} - first := true - for len(headerBlock) > 0 { - frag := headerBlock - if len(frag) > maxFrameSize { - frag = frag[:maxFrameSize] - } - headerBlock = headerBlock[len(frag):] - endHeaders := len(headerBlock) == 0 - var err error - if first { - first = false - err = ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: frag, - EndStream: w.endStream, - EndHeaders: endHeaders, - }) - } else { - err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) - } - if err != nil { - return err - } +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) } - return nil } type write100ContinueHeadersFrame struct { @@ -220,29 +317,49 @@ func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { }) } +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + type writeWindowUpdate struct { streamID uint32 // or 0 for conn-level n uint32 } +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) } +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only only if k is in keys. func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - // TODO: garbage. pool sorters like http1? hot path for 1 key? if keys == nil { - keys = make([]string, 0, len(h)) - for k := range h { - keys = append(keys, k) - } - sort.Strings(keys) + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) } for _, k := range keys { vv := h[k] k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } isTE := k == "transfer-encoding" for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } // TODO: more of "8.1.2.2 Connection-Specific Header Fields" if isTE && v != "trailers" { continue diff --git a/fn/vendor/golang.org/x/net/http2/writesched.go b/fn/vendor/golang.org/x/net/http2/writesched.go index c24316ce7..4fe307307 100644 --- a/fn/vendor/golang.org/x/net/http2/writesched.go +++ b/fn/vendor/golang.org/x/net/http2/writesched.go @@ -6,14 +6,53 @@ package http2 import "fmt" -// frameWriteMsg is a request to write a frame. -type frameWriteMsg struct { +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { // write is the interface value that does the writing, once the - // writeScheduler (below) has decided to select this frame - // to write. The write functions are all defined in write.go. + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. write writeFramer - stream *stream // used for prioritization. nil for non-stream frames. + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream // done, if non-nil, must be a buffered channel with space for // 1 message and is sent the return value from write (or an @@ -21,263 +60,183 @@ type frameWriteMsg struct { done chan error } -// for debugging only: -func (wm frameWriteMsg) String() string { - var streamID uint32 - if wm.stream != nil { - streamID = wm.stream.id - } - var des string - if s, ok := wm.write.(fmt.Stringer); ok { - des = s.String() - } else { - des = fmt.Sprintf("%T", wm.write) - } - return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des) -} - -// writeScheduler tracks pending frames to write, priorities, and decides -// the next one to use. It is not thread-safe. -type writeScheduler struct { - // zero are frames not associated with a specific stream. - // They're sent before any stream-specific freams. - zero writeQueue - - // maxFrameSize is the maximum size of a DATA frame - // we'll write. Must be non-zero and between 16K-16M. - maxFrameSize uint32 - - // sq contains the stream-specific queues, keyed by stream ID. - // when a stream is idle, it's deleted from the map. - sq map[uint32]*writeQueue - - // canSend is a slice of memory that's reused between frame - // scheduling decisions to hold the list of writeQueues (from sq) - // which have enough flow control data to send. After canSend is - // built, the best is selected. - canSend []*writeQueue - - // pool of empty queues for reuse. - queuePool []*writeQueue -} - -func (ws *writeScheduler) putEmptyQueue(q *writeQueue) { - if len(q.s) != 0 { - panic("queue must be empty") - } - ws.queuePool = append(ws.queuePool, q) -} - -func (ws *writeScheduler) getEmptyQueue() *writeQueue { - ln := len(ws.queuePool) - if ln == 0 { - return new(writeQueue) - } - q := ws.queuePool[ln-1] - ws.queuePool = ws.queuePool[:ln-1] - return q -} - -func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 } - -func (ws *writeScheduler) add(wm frameWriteMsg) { - st := wm.stream - if st == nil { - ws.zero.push(wm) - } else { - ws.streamQueue(st.id).push(wm) - } -} - -func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue { - if q, ok := ws.sq[streamID]; ok { - return q - } - if ws.sq == nil { - ws.sq = make(map[uint32]*writeQueue) - } - q := ws.getEmptyQueue() - ws.sq[streamID] = q - return q -} - -// take returns the most important frame to write and removes it from the scheduler. -// It is illegal to call this if the scheduler is empty or if there are no connection-level -// flow control bytes available. -func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) { - if ws.maxFrameSize == 0 { - panic("internal error: ws.maxFrameSize not initialized or invalid") - } - - // If there any frames not associated with streams, prefer those first. - // These are usually SETTINGS, etc. - if !ws.zero.empty() { - return ws.zero.shift(), true - } - if len(ws.sq) == 0 { - return - } - - // Next, prioritize frames on streams that aren't DATA frames (no cost). - for id, q := range ws.sq { - if q.firstIsNoCost() { - return ws.takeFrom(id, q) +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID } - } - - // Now, all that remains are DATA frames with non-zero bytes to - // send. So pick the best one. - if len(ws.canSend) != 0 { - panic("should be empty") - } - for _, q := range ws.sq { - if n := ws.streamWritableBytes(q); n > 0 { - ws.canSend = append(ws.canSend, q) - } - } - if len(ws.canSend) == 0 { - return - } - defer ws.zeroCanSend() - - // TODO: find the best queue - q := ws.canSend[0] - - return ws.takeFrom(q.streamID(), q) -} - -// zeroCanSend is defered from take. -func (ws *writeScheduler) zeroCanSend() { - for i := range ws.canSend { - ws.canSend[i] = nil - } - ws.canSend = ws.canSend[:0] -} - -// streamWritableBytes returns the number of DATA bytes we could write -// from the given queue's stream, if this stream/queue were -// selected. It is an error to call this if q's head isn't a -// *writeData. -func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 { - wm := q.head() - ret := wm.stream.flow.available() // max we can write - if ret == 0 { return 0 } - if int32(ws.maxFrameSize) < ret { - ret = int32(ws.maxFrameSize) - } - if ret == 0 { - panic("internal error: ws.maxFrameSize not initialized or invalid") - } - wd := wm.write.(*writeData) - if len(wd.p) < int(ret) { - ret = int32(len(wd.p)) - } - return ret + return wr.stream.id } -func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) { - wm = q.head() - // If the first item in this queue costs flow control tokens - // and we don't have enough, write as much as we can. - if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 { - allowed := wm.stream.flow.available() // max we can write - if allowed == 0 { - // No quota available. Caller can try the next stream. - return frameWriteMsg{}, false - } - if int32(ws.maxFrameSize) < allowed { - allowed = int32(ws.maxFrameSize) - } - // TODO: further restrict the allowed size, because even if - // the peer says it's okay to write 16MB data frames, we might - // want to write smaller ones to properly weight competing - // streams' priorities. - - if len(wd.p) > int(allowed) { - wm.stream.flow.take(allowed) - chunk := wd.p[:allowed] - wd.p = wd.p[allowed:] - // Make up a new write message of a valid size, rather - // than shifting one off the queue. - return frameWriteMsg{ - stream: wm.stream, - write: &writeData{ - streamID: wd.streamID, - p: chunk, - // even if the original had endStream set, there - // arebytes remaining because len(wd.p) > allowed, - // so we know endStream is false: - endStream: false, - }, - // our caller is blocking on the final DATA frame, not - // these intermediates, so no need to wait: - done: nil, - }, true - } - wm.stream.flow.take(int32(len(wd.p))) +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) } - - q.shift() - if q.empty() { - ws.putEmptyQueue(q) - delete(ws.sq, id) - } - return wm, true + return 0 } -func (ws *writeScheduler) forgetStream(id uint32) { - q, ok := ws.sq[id] - if !ok { +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { return } - delete(ws.sq, id) - - // But keep it for others later. - for i := range q.s { - q.s[i] = frameWriteMsg{} + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) } - q.s = q.s[:0] - ws.putEmptyQueue(q) + wr.write = nil // prevent use (assume it's tainted after wr.done send) } +// writeQueue is used by implementations of WriteScheduler. type writeQueue struct { - s []frameWriteMsg + s []FrameWriteRequest } -// streamID returns the stream ID for a non-empty stream-specific queue. -func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id } - func (q *writeQueue) empty() bool { return len(q.s) == 0 } -func (q *writeQueue) push(wm frameWriteMsg) { - q.s = append(q.s, wm) +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) } -// head returns the next item that would be removed by shift. -func (q *writeQueue) head() frameWriteMsg { +func (q *writeQueue) shift() FrameWriteRequest { if len(q.s) == 0 { panic("invalid use of queue") } - return q.s[0] -} - -func (q *writeQueue) shift() frameWriteMsg { - if len(q.s) == 0 { - panic("invalid use of queue") - } - wm := q.s[0] + wr := q.s[0] // TODO: less copy-happy queue. copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = frameWriteMsg{} + q.s[len(q.s)-1] = FrameWriteRequest{} q.s = q.s[:len(q.s)-1] - return wm + return wr } -func (q *writeQueue) firstIsNoCost() bool { - if df, ok := q.s[0].write.(*writeData); ok { - return len(df.p) == 0 +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false } - return true + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q } diff --git a/fn/vendor/golang.org/x/net/http2/writesched_priority.go b/fn/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 000000000..848fed6ec --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/fn/vendor/golang.org/x/net/http2/writesched_priority_test.go b/fn/vendor/golang.org/x/net/http2/writesched_priority_test.go new file mode 100644 index 000000000..f2b535a2c --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/writesched_priority_test.go @@ -0,0 +1,541 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "sort" + "testing" +) + +func defaultPriorityWriteScheduler() *priorityWriteScheduler { + return NewPriorityWriteScheduler(nil).(*priorityWriteScheduler) +} + +func checkPriorityWellFormed(ws *priorityWriteScheduler) error { + for id, n := range ws.nodes { + if id != n.id { + return fmt.Errorf("bad ws.nodes: ws.nodes[%d] = %d", id, n.id) + } + if n.parent == nil { + if n.next != nil || n.prev != nil { + return fmt.Errorf("bad node %d: nil parent but prev/next not nil", id) + } + continue + } + found := false + for k := n.parent.kids; k != nil; k = k.next { + if k.id == id { + found = true + break + } + } + if !found { + return fmt.Errorf("bad node %d: not found in parent %d kids list", id, n.parent.id) + } + } + return nil +} + +func fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string { + var ids []int + for _, n := range ws.nodes { + ids = append(ids, int(n.id)) + } + sort.Ints(ids) + + var buf bytes.Buffer + for _, id := range ids { + if buf.Len() != 0 { + buf.WriteString(" ") + } + if id == 0 { + buf.WriteString(fmtNode(&ws.root)) + } else { + buf.WriteString(fmtNode(ws.nodes[uint32(id)])) + } + } + return buf.String() +} + +func fmtNodeParentSkipRoot(n *priorityNode) string { + switch { + case n.id == 0: + return "" + case n.parent == nil: + return fmt.Sprintf("%d{parent:nil}", n.id) + default: + return fmt.Sprintf("%d{parent:%d}", n.id, n.parent.id) + } +} + +func fmtNodeWeightParentSkipRoot(n *priorityNode) string { + switch { + case n.id == 0: + return "" + case n.parent == nil: + return fmt.Sprintf("%d{weight:%d,parent:nil}", n.id, n.weight) + default: + return fmt.Sprintf("%d{weight:%d,parent:%d}", n.id, n.weight, n.parent.id) + } +} + +func TestPriorityTwoStreams(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + + want := "1{weight:15,parent:0} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + + // Move 1's parent to 2. + ws.AdjustStream(1, PriorityParam{ + StreamDep: 2, + Weight: 32, + Exclusive: false, + }) + want = "1{weight:32,parent:2} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityAdjustExclusiveZero(t *testing.T) { + // 1, 2, and 3 are all children of the 0 stream. + // Exclusive reprioritization to any of the streams should bring + // the rest of the streams under the reprioritized stream. + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + ws.OpenStream(3, OpenStreamOptions{}) + + want := "1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + + ws.AdjustStream(2, PriorityParam{ + StreamDep: 0, + Weight: 20, + Exclusive: true, + }) + want = "1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityAdjustOwnParent(t *testing.T) { + // Assigning a node as its own parent should have no effect. + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + ws.AdjustStream(2, PriorityParam{ + StreamDep: 2, + Weight: 20, + Exclusive: true, + }) + want := "1{weight:15,parent:0} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityClosedStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + + // Close the first three streams. We lose 1, but keep 2 and 3. + ws.CloseStream(1) + ws.CloseStream(2) + ws.CloseStream(3) + + want := "2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After close\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } + + // Adding a stream as an exclusive child of 1 gives it default + // priorities, since 1 is gone. + ws.OpenStream(5, OpenStreamOptions{}) + ws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true}) + + // Adding a stream as an exclusive child of 2 should work, since 2 is not gone. + ws.OpenStream(6, OpenStreamOptions{}) + ws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true}) + + want = "2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After add streams\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityClosedStreamsDisabled(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + + // Close the first two streams. We keep only 3. + ws.CloseStream(1) + ws.CloseStream(2) + + want := "3{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After close\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityIdleStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle + ws.OpenStream(4, OpenStreamOptions{}) + ws.OpenStream(5, OpenStreamOptions{}) + ws.OpenStream(6, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15}) + ws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15}) + ws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15}) + + want := "2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityIdleStreamsDisabled(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle + ws.OpenStream(4, OpenStreamOptions{}) + + want := "4{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection531NonExclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.1. + // A,B,C,D = 1,2,3,4 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{ + StreamDep: 1, + Weight: 15, + Exclusive: false, + }) + want := "1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection531Exclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.1. + // A,B,C,D = 1,2,3,4 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{ + StreamDep: 1, + Weight: 15, + Exclusive: true, + }) + want := "1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func makeSection533Tree() *priorityWriteScheduler { + // Initial tree from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + return ws +} + +func TestPrioritySection533NonExclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + ws.AdjustStream(1, PriorityParam{ + StreamDep: 4, + Weight: 15, + Exclusive: false, + }) + want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection533Exclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + ws.AdjustStream(1, PriorityParam{ + StreamDep: 4, + Weight: 15, + Exclusive: true, + }) + want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func checkPopAll(ws WriteScheduler, order []uint32) error { + for k, id := range order { + wr, ok := ws.Pop() + if !ok { + return fmt.Errorf("Pop[%d]: got ok=false, want %d (order=%v)", k, id, order) + } + if got := wr.StreamID(); got != id { + return fmt.Errorf("Pop[%d]: got %v, want %d (order=%v)", k, got, id, order) + } + } + wr, ok := ws.Pop() + if ok { + return fmt.Errorf("Pop[%d]: got %v, want ok=false (order=%v)", len(order), wr.StreamID(), order) + } + return nil +} + +func TestPriorityPopFrom533Tree(t *testing.T) { + ws := makeSection533Tree() + + ws.Push(makeWriteHeadersRequest(3 /*C*/)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteHeadersRequest(5 /*E*/)) + ws.Push(makeWriteHeadersRequest(1 /*A*/)) + t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) + + if err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil { + t.Error(err) + } +} + +func TestPriorityPopFromLinearTree(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + + ws.Push(makeWriteHeadersRequest(3)) + ws.Push(makeWriteHeadersRequest(4)) + ws.Push(makeWriteHeadersRequest(1)) + ws.Push(makeWriteHeadersRequest(2)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteNonStreamRequest()) + t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) + + if err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil { + t.Error(err) + } +} + +func TestPriorityFlowControl(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false}) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + + sc := &serverConn{maxFrameSize: 16} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil}) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil}) + ws.AdjustStream(2, PriorityParam{StreamDep: 1}) + + // No flow-control bytes available. + if wr, ok := ws.Pop(); ok { + t.Fatalf("Pop(limited by flow control)=%v,true, want false", wr) + } + + // Add enough flow-control bytes to write st2 in two Pop calls. + // Should write data from st2 even though it's lower priority than st1. + for i := 1; i <= 2; i++ { + st2.flow.add(8) + wr, ok := ws.Pop() + if !ok { + t.Fatalf("Pop(%d)=false, want true", i) + } + if got, want := wr.DataSize(), 8; got != want { + t.Fatalf("Pop(%d)=%d bytes, want %d bytes", i, got, want) + } + } +} + +func TestPriorityThrottleOutOfOrderWrites(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true}) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + + sc := &serverConn{maxFrameSize: 4096} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + st1.flow.add(4096) + st2.flow.add(4096) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil}) + ws.AdjustStream(2, PriorityParam{StreamDep: 1}) + + // We have enough flow-control bytes to write st2 in a single Pop call. + // However, due to out-of-order write throttling, the first call should + // only write 1KB. + wr, ok := ws.Pop() + if !ok { + t.Fatalf("Pop(st2.first)=false, want true") + } + if got, want := wr.StreamID(), uint32(2); got != want { + t.Fatalf("Pop(st2.first)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 1024; got != want { + t.Fatalf("Pop(st2.first)=%d bytes, want %d bytes", got, want) + } + + // Now add data on st1. This should take precedence. + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil}) + wr, ok = ws.Pop() + if !ok { + t.Fatalf("Pop(st1)=false, want true") + } + if got, want := wr.StreamID(), uint32(1); got != want { + t.Fatalf("Pop(st1)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 4096; got != want { + t.Fatalf("Pop(st1)=%d bytes, want %d bytes", got, want) + } + + // Should go back to writing 1KB from st2. + wr, ok = ws.Pop() + if !ok { + t.Fatalf("Pop(st2.last)=false, want true") + } + if got, want := wr.StreamID(), uint32(2); got != want { + t.Fatalf("Pop(st2.last)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 1024; got != want { + t.Fatalf("Pop(st2.last)=%d bytes, want %d bytes", got, want) + } +} + +func TestPriorityWeights(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + + sc := &serverConn{maxFrameSize: 8} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + st1.flow.add(40) + st2.flow.add(40) + + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil}) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil}) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34}) + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9}) + + // st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)). + // The maximum frame size is 8 bytes. The write sequence should be: + // st1, total bytes so far is (st1=8, st=0) + // st2, total bytes so far is (st1=8, st=8) + // st1, total bytes so far is (st1=16, st=8) + // st1, total bytes so far is (st1=24, st=8) // 3x bandwidth + // st1, total bytes so far is (st1=32, st=8) // 4x bandwidth + // st2, total bytes so far is (st1=32, st=16) // 2x bandwidth + // st1, total bytes so far is (st1=40, st=16) + // st2, total bytes so far is (st1=40, st=24) + // st2, total bytes so far is (st1=40, st=32) + // st2, total bytes so far is (st1=40, st=40) + if err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil { + t.Error(err) + } +} + +func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 0, + MaxIdleNodesInTree: 0, + }) + ws.OpenStream(1, OpenStreamOptions{}) + ws.CloseStream(1) + ws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)}) + ws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)}) + + if err := checkPopAll(ws, []uint32{1, 2}); err != nil { + t.Error(err) + } +} diff --git a/fn/vendor/golang.org/x/net/http2/writesched_random.go b/fn/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 000000000..36d7919f1 --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/fn/vendor/golang.org/x/net/http2/writesched_random_test.go b/fn/vendor/golang.org/x/net/http2/writesched_random_test.go new file mode 100644 index 000000000..3bf4aa36a --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/writesched_random_test.go @@ -0,0 +1,44 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "testing" + +func TestRandomScheduler(t *testing.T) { + ws := NewRandomWriteScheduler() + ws.Push(makeWriteHeadersRequest(3)) + ws.Push(makeWriteHeadersRequest(4)) + ws.Push(makeWriteHeadersRequest(1)) + ws.Push(makeWriteHeadersRequest(2)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteNonStreamRequest()) + + // Pop all frames. Should get the non-stream requests first, + // followed by the stream requests in any order. + var order []FrameWriteRequest + for { + wr, ok := ws.Pop() + if !ok { + break + } + order = append(order, wr) + } + t.Logf("got frames: %v", order) + if len(order) != 6 { + t.Fatalf("got %d frames, expected 6", len(order)) + } + if order[0].StreamID() != 0 || order[1].StreamID() != 0 { + t.Fatal("expected non-stream frames first", order[0], order[1]) + } + got := make(map[uint32]bool) + for _, wr := range order[2:] { + got[wr.StreamID()] = true + } + for id := uint32(1); id <= 4; id++ { + if !got[id] { + t.Errorf("frame not found for stream %d", id) + } + } +} diff --git a/fn/vendor/golang.org/x/net/http2/writesched_test.go b/fn/vendor/golang.org/x/net/http2/writesched_test.go new file mode 100644 index 000000000..0807056bc --- /dev/null +++ b/fn/vendor/golang.org/x/net/http2/writesched_test.go @@ -0,0 +1,125 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "reflect" + "testing" +) + +func makeWriteNonStreamRequest() FrameWriteRequest { + return FrameWriteRequest{writeSettingsAck{}, nil, nil} +} + +func makeWriteHeadersRequest(streamID uint32) FrameWriteRequest { + st := &stream{id: streamID} + return FrameWriteRequest{&writeResHeaders{streamID: streamID, httpResCode: 200}, st, nil} +} + +func checkConsume(wr FrameWriteRequest, nbytes int32, want []FrameWriteRequest) error { + consumed, rest, n := wr.Consume(nbytes) + var wantConsumed, wantRest FrameWriteRequest + switch len(want) { + case 0: + case 1: + wantConsumed = want[0] + case 2: + wantConsumed = want[0] + wantRest = want[1] + } + if !reflect.DeepEqual(consumed, wantConsumed) || !reflect.DeepEqual(rest, wantRest) || n != len(want) { + return fmt.Errorf("got %v, %v, %v\nwant %v, %v, %v", consumed, rest, n, wantConsumed, wantRest, len(want)) + } + return nil +} + +func TestFrameWriteRequestNonData(t *testing.T) { + wr := makeWriteNonStreamRequest() + if got, want := wr.DataSize(), 0; got != want { + t.Errorf("DataSize: got %v, want %v", got, want) + } + + // Non-DATA frames are always consumed whole. + if err := checkConsume(wr, 0, []FrameWriteRequest{wr}); err != nil { + t.Errorf("Consume:\n%v", err) + } +} + +func TestFrameWriteRequestData(t *testing.T) { + st := &stream{ + id: 1, + sc: &serverConn{maxFrameSize: 16}, + } + const size = 32 + wr := FrameWriteRequest{&writeData{st.id, make([]byte, size), true}, st, make(chan error)} + if got, want := wr.DataSize(), size; got != want { + t.Errorf("DataSize: got %v, want %v", got, want) + } + + // No flow-control bytes available: cannot consume anything. + if err := checkConsume(wr, math.MaxInt32, []FrameWriteRequest{}); err != nil { + t.Errorf("Consume(limited by flow control):\n%v", err) + } + + // Add enough flow-control bytes to consume the entire frame, + // but we're now restricted by st.sc.maxFrameSize. + st.flow.add(size) + want := []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, st.sc.maxFrameSize), false}, + stream: st, + done: nil, + }, + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(wr, math.MaxInt32, want); err != nil { + t.Errorf("Consume(limited by maxFrameSize):\n%v", err) + } + rest := want[1] + + // Consume 8 bytes from the remaining frame. + want = []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, 8), false}, + stream: st, + done: nil, + }, + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(rest, 8, want); err != nil { + t.Errorf("Consume(8):\n%v", err) + } + rest = want[1] + + // Consume all remaining bytes. + want = []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(rest, math.MaxInt32, want); err != nil { + t.Errorf("Consume(remainder):\n%v", err) + } +} + +func TestFrameWriteRequest_StreamID(t *testing.T) { + const streamID = 123 + wr := FrameWriteRequest{write: streamError(streamID, ErrCodeNo)} + if got := wr.StreamID(); got != streamID { + t.Errorf("FrameWriteRequest(StreamError) = %v; want %v", got, streamID) + } +} diff --git a/fn/vendor/golang.org/x/net/http2/z_spec_test.go b/fn/vendor/golang.org/x/net/http2/z_spec_test.go index e0f420a18..610b2cdbc 100644 --- a/fn/vendor/golang.org/x/net/http2/z_spec_test.go +++ b/fn/vendor/golang.org/x/net/http2/z_spec_test.go @@ -37,7 +37,7 @@ func loadSpec() { } // covers marks all sentences for section sec in defaultSpecCoverage. Sentences not -// "covered" will be included in report outputed by TestSpecCoverage. +// "covered" will be included in report outputted by TestSpecCoverage. func covers(sec, sentences string) { loadSpecOnce.Do(loadSpec) defaultSpecCoverage.cover(sec, sentences) @@ -311,7 +311,7 @@ func TestSpecCoverage(t *testing.T) { t.Errorf("\tSECTION %s: %s", p.section, p.sentence) } - t.Logf("%d/%d (%d%%) sentances covered", complete, total, (complete/total)*100) + t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100) } func attrSig(se xml.StartElement) string { diff --git a/fn/vendor/golang.org/x/net/icmp/echo.go b/fn/vendor/golang.org/x/net/icmp/echo.go index 8943eab34..e6f15efd7 100644 --- a/fn/vendor/golang.org/x/net/icmp/echo.go +++ b/fn/vendor/golang.org/x/net/icmp/echo.go @@ -1,9 +1,11 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package icmp +import "encoding/binary" + // An Echo represents an ICMP echo request or reply message body. type Echo struct { ID int // identifier @@ -22,8 +24,8 @@ func (p *Echo) Len(proto int) int { // Marshal implements the Marshal method of MessageBody interface. func (p *Echo) Marshal(proto int) ([]byte, error) { b := make([]byte, 4+len(p.Data)) - b[0], b[1] = byte(p.ID>>8), byte(p.ID) - b[2], b[3] = byte(p.Seq>>8), byte(p.Seq) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq)) copy(b[4:], p.Data) return b, nil } @@ -34,7 +36,7 @@ func parseEcho(proto int, b []byte) (MessageBody, error) { if bodyLen < 4 { return nil, errMessageTooShort } - p := &Echo{ID: int(b[0])<<8 | int(b[1]), Seq: int(b[2])<<8 | int(b[3])} + p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))} if bodyLen > 4 { p.Data = make([]byte, bodyLen-4) copy(p.Data, b[4:]) diff --git a/fn/vendor/golang.org/x/net/icmp/endpoint.go b/fn/vendor/golang.org/x/net/icmp/endpoint.go index 0213d1a13..a68bfb010 100644 --- a/fn/vendor/golang.org/x/net/icmp/endpoint.go +++ b/fn/vendor/golang.org/x/net/icmp/endpoint.go @@ -51,7 +51,7 @@ func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { } // Please be informed that ipv4.NewPacketConn enables // IP_STRIPHDR option by default on Darwin. - // See golang.org/issue/9395 for futher information. + // See golang.org/issue/9395 for further information. if runtime.GOOS == "darwin" && c.p4 != nil { n, _, peer, err := c.p4.ReadFrom(b) return n, peer, err diff --git a/fn/vendor/golang.org/x/net/icmp/extension.go b/fn/vendor/golang.org/x/net/icmp/extension.go index b47529b17..402a7514b 100644 --- a/fn/vendor/golang.org/x/net/icmp/extension.go +++ b/fn/vendor/golang.org/x/net/icmp/extension.go @@ -4,6 +4,8 @@ package icmp +import "encoding/binary" + // An Extension represents an ICMP extension. type Extension interface { // Len returns the length of ICMP extension. @@ -19,7 +21,7 @@ const extensionVersion = 2 func validExtensionHeader(b []byte) bool { v := int(b[0]&0xf0) >> 4 - s := uint16(b[2])<<8 | uint16(b[3]) + s := binary.BigEndian.Uint16(b[2:4]) if s != 0 { s = checksum(b) } @@ -63,7 +65,7 @@ func parseExtensions(b []byte, l int) ([]Extension, int, error) { } var exts []Extension for b = b[l+4:]; len(b) >= 4; { - ol := int(b[0])<<8 | int(b[1]) + ol := int(binary.BigEndian.Uint16(b[:2])) if 4 > ol || ol > len(b) { break } diff --git a/fn/vendor/golang.org/x/net/icmp/interface.go b/fn/vendor/golang.org/x/net/icmp/interface.go index c7bf8dd1a..78b5b98bf 100644 --- a/fn/vendor/golang.org/x/net/icmp/interface.go +++ b/fn/vendor/golang.org/x/net/icmp/interface.go @@ -5,6 +5,7 @@ package icmp import ( + "encoding/binary" "net" "strings" @@ -89,7 +90,7 @@ func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) { } func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { - b[0], b[1] = byte(l>>8), byte(l) + binary.BigEndian.PutUint16(b[:2], uint16(l)) b[2], b[3] = classInterfaceInfo, byte(ifi.Type) for b = b[4:]; len(b) > 0 && attrs != 0; { switch { @@ -111,7 +112,7 @@ func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { } func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte { - b[0], b[1], b[2], b[3] = byte(ifi.Interface.Index>>24), byte(ifi.Interface.Index>>16), byte(ifi.Interface.Index>>8), byte(ifi.Interface.Index) + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index)) return b[4:] } @@ -119,18 +120,18 @@ func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) { if len(b) < 4 { return nil, errMessageTooShort } - ifi.Interface.Index = int(b[0])<<24 | int(b[1])<<16 | int(b[2])<<8 | int(b[3]) + ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4])) return b[4:], nil } func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { switch proto { case iana.ProtocolICMP: - b[0], b[1] = byte(afiIPv4>>8), byte(afiIPv4) + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4)) copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) b = b[4+net.IPv4len:] case iana.ProtocolIPv6ICMP: - b[0], b[1] = byte(afiIPv6>>8), byte(afiIPv6) + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6)) copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) b = b[4+net.IPv6len:] } @@ -141,7 +142,7 @@ func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) { if len(b) < 4 { return nil, errMessageTooShort } - afi := int(b[0])<<8 | int(b[1]) + afi := int(binary.BigEndian.Uint16(b[:2])) b = b[4:] switch afi { case afiIPv4: @@ -184,7 +185,7 @@ func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) { } func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte { - b[0], b[1], b[2], b[3] = byte(ifi.Interface.MTU>>24), byte(ifi.Interface.MTU>>16), byte(ifi.Interface.MTU>>8), byte(ifi.Interface.MTU) + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU)) return b[4:] } @@ -192,7 +193,7 @@ func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) { if len(b) < 4 { return nil, errMessageTooShort } - ifi.Interface.MTU = int(b[0])<<24 | int(b[1])<<16 | int(b[2])<<8 | int(b[3]) + ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4])) return b[4:], nil } diff --git a/fn/vendor/golang.org/x/net/icmp/ipv4.go b/fn/vendor/golang.org/x/net/icmp/ipv4.go index a252d730e..ffc66ed4d 100644 --- a/fn/vendor/golang.org/x/net/icmp/ipv4.go +++ b/fn/vendor/golang.org/x/net/icmp/ipv4.go @@ -5,13 +5,15 @@ package icmp import ( + "encoding/binary" "net" "runtime" - "unsafe" + "golang.org/x/net/internal/socket" "golang.org/x/net/ipv4" ) +// freebsdVersion is set in sys_freebsd.go. // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. var freebsdVersion uint32 @@ -29,27 +31,25 @@ func ParseIPv4Header(b []byte) (*ipv4.Header, error) { Version: int(b[0] >> 4), Len: hdrlen, TOS: int(b[1]), - ID: int(b[4])<<8 | int(b[5]), - FragOff: int(b[6])<<8 | int(b[7]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + FragOff: int(binary.BigEndian.Uint16(b[6:8])), TTL: int(b[8]), Protocol: int(b[9]), - Checksum: int(b[10])<<8 | int(b[11]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), Src: net.IPv4(b[12], b[13], b[14], b[15]), Dst: net.IPv4(b[16], b[17], b[18], b[19]), } switch runtime.GOOS { case "darwin": - // TODO(mikio): fix potential misaligned memory access - h.TotalLen = int(*(*uint16)(unsafe.Pointer(&b[2:3][0]))) + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) case "freebsd": if freebsdVersion >= 1000000 { - h.TotalLen = int(b[2])<<8 | int(b[3]) + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) } else { - // TODO(mikio): fix potential misaligned memory access - h.TotalLen = int(*(*uint16)(unsafe.Pointer(&b[2:3][0]))) + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) } default: - h.TotalLen = int(b[2])<<8 | int(b[3]) + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) } h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13 h.FragOff = h.FragOff & 0x1fff diff --git a/fn/vendor/golang.org/x/net/icmp/ipv4_test.go b/fn/vendor/golang.org/x/net/icmp/ipv4_test.go index b05c69739..058953f43 100644 --- a/fn/vendor/golang.org/x/net/icmp/ipv4_test.go +++ b/fn/vendor/golang.org/x/net/icmp/ipv4_test.go @@ -5,33 +5,40 @@ package icmp import ( + "encoding/binary" "net" "reflect" "runtime" "testing" + "golang.org/x/net/internal/socket" "golang.org/x/net/ipv4" ) -var ( - wireHeaderFromKernel = [ipv4.HeaderLen]byte{ +type ipv4HeaderTest struct { + wireHeaderFromKernel [ipv4.HeaderLen]byte + wireHeaderFromTradBSDKernel [ipv4.HeaderLen]byte + Header *ipv4.Header +} + +var ipv4HeaderLittleEndianTest = ipv4HeaderTest{ + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + wireHeaderFromKernel: [ipv4.HeaderLen]byte{ 0x45, 0x01, 0xbe, 0xef, 0xca, 0xfe, 0x45, 0xdc, 0xff, 0x01, 0xde, 0xad, 172, 16, 254, 254, 192, 168, 0, 1, - } - wireHeaderFromTradBSDKernel = [ipv4.HeaderLen]byte{ + }, + wireHeaderFromTradBSDKernel: [ipv4.HeaderLen]byte{ 0x45, 0x01, 0xef, 0xbe, 0xca, 0xfe, 0x45, 0xdc, 0xff, 0x01, 0xde, 0xad, 172, 16, 254, 254, 192, 168, 0, 1, - } - // TODO(mikio): Add platform dependent wire header formats when - // we support new platforms. - - testHeader = &ipv4.Header{ + }, + Header: &ipv4.Header{ Version: ipv4.Version, Len: ipv4.HeaderLen, TOS: 1, @@ -44,28 +51,33 @@ var ( Checksum: 0xdead, Src: net.IPv4(172, 16, 254, 254), Dst: net.IPv4(192, 168, 0, 1), - } -) + }, +} func TestParseIPv4Header(t *testing.T) { + tt := &ipv4HeaderLittleEndianTest + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") + } + var wh []byte switch runtime.GOOS { case "darwin": - wh = wireHeaderFromTradBSDKernel[:] + wh = tt.wireHeaderFromTradBSDKernel[:] case "freebsd": if freebsdVersion >= 1000000 { - wh = wireHeaderFromKernel[:] + wh = tt.wireHeaderFromKernel[:] } else { - wh = wireHeaderFromTradBSDKernel[:] + wh = tt.wireHeaderFromTradBSDKernel[:] } default: - wh = wireHeaderFromKernel[:] + wh = tt.wireHeaderFromKernel[:] } h, err := ParseIPv4Header(wh) if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(h, testHeader) { - t.Fatalf("got %#v; want %#v", h, testHeader) + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) } } diff --git a/fn/vendor/golang.org/x/net/icmp/ipv6.go b/fn/vendor/golang.org/x/net/icmp/ipv6.go index 58eaa77d0..2e8cfeb13 100644 --- a/fn/vendor/golang.org/x/net/icmp/ipv6.go +++ b/fn/vendor/golang.org/x/net/icmp/ipv6.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/icmp/listen_posix.go b/fn/vendor/golang.org/x/net/icmp/listen_posix.go index b9f260796..7fac4f965 100644 --- a/fn/vendor/golang.org/x/net/icmp/listen_posix.go +++ b/fn/vendor/golang.org/x/net/icmp/listen_posix.go @@ -65,22 +65,24 @@ func ListenPacket(network, address string) (*PacketConn, error) { if err != nil { return nil, os.NewSyscallError("socket", err) } - defer syscall.Close(s) if runtime.GOOS == "darwin" && family == syscall.AF_INET { if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil { + syscall.Close(s) return nil, os.NewSyscallError("setsockopt", err) } } sa, err := sockaddr(family, address) if err != nil { + syscall.Close(s) return nil, err } if err := syscall.Bind(s, sa); err != nil { + syscall.Close(s) return nil, os.NewSyscallError("bind", err) } f := os.NewFile(uintptr(s), "datagram-oriented icmp") - defer f.Close() c, cerr = net.FilePacketConn(f) + f.Close() default: c, cerr = net.ListenPacket(network, address) } diff --git a/fn/vendor/golang.org/x/net/icmp/message.go b/fn/vendor/golang.org/x/net/icmp/message.go index a20adacd1..81140b0df 100644 --- a/fn/vendor/golang.org/x/net/icmp/message.go +++ b/fn/vendor/golang.org/x/net/icmp/message.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -14,6 +14,7 @@ package icmp // import "golang.org/x/net/icmp" import ( + "encoding/binary" "errors" "net" "syscall" @@ -23,6 +24,8 @@ import ( "golang.org/x/net/ipv6" ) +// BUG(mikio): This package is not implemented on NaCl and Plan 9. + var ( errMessageTooShort = errors.New("message too short") errHeaderTooShort = errors.New("header too short") @@ -94,7 +97,7 @@ func (m *Message) Marshal(psh []byte) ([]byte, error) { return b, nil } off, l := 2*net.IPv6len, len(b)-len(psh) - b[off], b[off+1], b[off+2], b[off+3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + binary.BigEndian.PutUint32(b[off:off+4], uint32(l)) } s := checksum(b) // Place checksum back in header; using ^= avoids the @@ -128,7 +131,7 @@ func ParseMessage(proto int, b []byte) (*Message, error) { return nil, errMessageTooShort } var err error - m := &Message{Code: int(b[1]), Checksum: int(b[2])<<8 | int(b[3])} + m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))} switch proto { case iana.ProtocolICMP: m.Type = ipv4.ICMPType(b[0]) diff --git a/fn/vendor/golang.org/x/net/icmp/messagebody.go b/fn/vendor/golang.org/x/net/icmp/messagebody.go index 2121a17be..2463730ae 100644 --- a/fn/vendor/golang.org/x/net/icmp/messagebody.go +++ b/fn/vendor/golang.org/x/net/icmp/messagebody.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/icmp/mpls.go b/fn/vendor/golang.org/x/net/icmp/mpls.go index 31bcfe8da..c31491748 100644 --- a/fn/vendor/golang.org/x/net/icmp/mpls.go +++ b/fn/vendor/golang.org/x/net/icmp/mpls.go @@ -4,6 +4,8 @@ package icmp +import "encoding/binary" + // A MPLSLabel represents a MPLS label stack entry. type MPLSLabel struct { Label int // label value @@ -40,7 +42,7 @@ func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) { func (ls *MPLSLabelStack) marshal(proto int, b []byte) error { l := ls.Len(proto) - b[0], b[1] = byte(l>>8), byte(l) + binary.BigEndian.PutUint16(b[:2], uint16(l)) b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack off := 4 for _, ll := range ls.Labels { diff --git a/fn/vendor/golang.org/x/net/icmp/packettoobig.go b/fn/vendor/golang.org/x/net/icmp/packettoobig.go index 91d289b23..a1c9df7bf 100644 --- a/fn/vendor/golang.org/x/net/icmp/packettoobig.go +++ b/fn/vendor/golang.org/x/net/icmp/packettoobig.go @@ -4,6 +4,8 @@ package icmp +import "encoding/binary" + // A PacketTooBig represents an ICMP packet too big message body. type PacketTooBig struct { MTU int // maximum transmission unit of the nexthop link @@ -21,7 +23,7 @@ func (p *PacketTooBig) Len(proto int) int { // Marshal implements the Marshal method of MessageBody interface. func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { b := make([]byte, 4+len(p.Data)) - b[0], b[1], b[2], b[3] = byte(p.MTU>>24), byte(p.MTU>>16), byte(p.MTU>>8), byte(p.MTU) + binary.BigEndian.PutUint32(b[:4], uint32(p.MTU)) copy(b[4:], p.Data) return b, nil } @@ -32,7 +34,7 @@ func parsePacketTooBig(proto int, b []byte) (MessageBody, error) { if bodyLen < 4 { return nil, errMessageTooShort } - p := &PacketTooBig{MTU: int(b[0])<<24 | int(b[1])<<16 | int(b[2])<<8 | int(b[3])} + p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))} if bodyLen > 4 { p.Data = make([]byte, bodyLen-4) copy(p.Data, b[4:]) diff --git a/fn/vendor/golang.org/x/net/icmp/paramprob.go b/fn/vendor/golang.org/x/net/icmp/paramprob.go index 05cc311eb..0a2548daa 100644 --- a/fn/vendor/golang.org/x/net/icmp/paramprob.go +++ b/fn/vendor/golang.org/x/net/icmp/paramprob.go @@ -4,7 +4,10 @@ package icmp -import "golang.org/x/net/internal/iana" +import ( + "encoding/binary" + "golang.org/x/net/internal/iana" +) // A ParamProb represents an ICMP parameter problem message body. type ParamProb struct { @@ -26,7 +29,7 @@ func (p *ParamProb) Len(proto int) int { func (p *ParamProb) Marshal(proto int) ([]byte, error) { if proto == iana.ProtocolIPv6ICMP { b := make([]byte, p.Len(proto)) - b[0], b[1], b[2], b[3] = byte(p.Pointer>>24), byte(p.Pointer>>16), byte(p.Pointer>>8), byte(p.Pointer) + binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer)) copy(b[4:], p.Data) return b, nil } @@ -45,7 +48,7 @@ func parseParamProb(proto int, b []byte) (MessageBody, error) { } p := &ParamProb{} if proto == iana.ProtocolIPv6ICMP { - p.Pointer = uintptr(b[0])<<24 | uintptr(b[1])<<16 | uintptr(b[2])<<8 | uintptr(b[3]) + p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4])) p.Data = make([]byte, len(b)-4) copy(p.Data, b[4:]) return p, nil diff --git a/fn/vendor/golang.org/x/net/icmp/ping_test.go b/fn/vendor/golang.org/x/net/icmp/ping_test.go index 4ec269284..3171dad11 100644 --- a/fn/vendor/golang.org/x/net/icmp/ping_test.go +++ b/fn/vendor/golang.org/x/net/icmp/ping_test.go @@ -10,6 +10,7 @@ import ( "net" "os" "runtime" + "sync" "testing" "time" @@ -164,3 +165,36 @@ func doPing(tt pingTest, seq int) error { return fmt.Errorf("got %+v from %v; want echo reply", rm, peer) } } + +func TestConcurrentNonPrivilegedListenPacket(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + network, address := "udp4", "127.0.0.1" + if !nettest.SupportsIPv4() { + network, address = "udp6", "::1" + } + const N = 1000 + var wg sync.WaitGroup + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + defer wg.Done() + c, err := icmp.ListenPacket(network, address) + if err != nil { + t.Error(err) + return + } + c.Close() + }() + } + wg.Wait() +} diff --git a/fn/vendor/golang.org/x/net/idna/example_test.go b/fn/vendor/golang.org/x/net/idna/example_test.go new file mode 100644 index 000000000..948f6eb20 --- /dev/null +++ b/fn/vendor/golang.org/x/net/idna/example_test.go @@ -0,0 +1,70 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna_test + +import ( + "fmt" + + "golang.org/x/net/idna" +) + +func ExampleProfile() { + // Raw Punycode has no restrictions and does no mappings. + fmt.Println(idna.ToASCII("")) + fmt.Println(idna.ToASCII("*.faß.com")) + fmt.Println(idna.Punycode.ToASCII("*.faß.com")) + + // Rewrite IDN for lookup. This (currently) uses transitional mappings to + // find a balance between IDNA2003 and IDNA2008 compatibility. + fmt.Println(idna.Lookup.ToASCII("")) + fmt.Println(idna.Lookup.ToASCII("www.faß.com")) + + // Convert an IDN to ASCII for registration purposes. This changes the + // encoding, but reports an error if the input was illformed. + fmt.Println(idna.Registration.ToASCII("")) + fmt.Println(idna.Registration.ToASCII("www.faß.com")) + + // Output: + // + // *.xn--fa-hia.com + // *.xn--fa-hia.com + // + // www.fass.com + // idna: invalid label "" + // www.xn--fa-hia.com +} + +func ExampleNew() { + var p *idna.Profile + + // Raw Punycode has no restrictions and does no mappings. + p = idna.New() + fmt.Println(p.ToASCII("*.faß.com")) + + // Do mappings. Note that star is not allowed in a DNS lookup. + p = idna.New( + idna.MapForLookup(), + idna.Transitional(true)) // Map ß -> ss + fmt.Println(p.ToASCII("*.faß.com")) + + // Lookup for registration. Also does not allow '*'. + p = idna.New(idna.ValidateForRegistration()) + fmt.Println(p.ToUnicode("*.faß.com")) + + // Set up a profile maps for lookup, but allows wild cards. + p = idna.New( + idna.MapForLookup(), + idna.Transitional(true), // Map ß -> ss + idna.StrictDomainName(false)) // Set more permissive ASCII rules. + fmt.Println(p.ToASCII("*.faß.com")) + + // Output: + // *.xn--fa-hia.com + // *.fass.com idna: disallowed rune U+002A + // *.faß.com idna: disallowed rune U+002A + // *.fass.com +} diff --git a/fn/vendor/golang.org/x/net/idna/idna.go b/fn/vendor/golang.org/x/net/idna/idna.go index 3daa8979e..eb2473507 100644 --- a/fn/vendor/golang.org/x/net/idna/idna.go +++ b/fn/vendor/golang.org/x/net/idna/idna.go @@ -1,61 +1,673 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package idna implements IDNA2008 (Internationalized Domain Names for -// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and -// RFC 5894. +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in http://www.unicode.org/reports/tr46. +// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. package idna // import "golang.org/x/net/idna" import ( + "fmt" "strings" "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/norm" ) -// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or -// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. -// acePrefix is the ASCII Compatible Encoding prefix. -const acePrefix = "xn--" +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissable ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + RemoveLeadingDots(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (string, error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of a IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} // ToASCII converts a domain or domain label to its ASCII form. For example, // ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and -// ToASCII("golang") is "golang". -func ToASCII(s string) (string, error) { - if ascii(s) { - return s, nil - } - labels := strings.Split(s, ".") - for i, label := range labels { - if !ascii(label) { - a, err := encode(acePrefix, label) - if err != nil { - return "", err - } - labels[i] = a - } - } - return strings.Join(labels, "."), nil +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) } // ToUnicode converts a domain or domain label to its Unicode form. For example, // ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and -// ToUnicode("golang") is "golang". -func ToUnicode(s string) (string, error) { - if !strings.Contains(s, acePrefix) { - return s, nil +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" } - labels := strings.Split(s, ".") - for i, label := range labels { - if strings.HasPrefix(label, acePrefix) { - u, err := decode(label[len(acePrefix):]) - if err != nil { - return "", err - } - labels[i] = u + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see http://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + if p.mapping != nil { + s, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { } } - return strings.Join(labels, "."), nil + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (string, error) { + return norm.NFC.String(s), nil +} + +func validateRegistration(p *Profile, s string) (string, error) { + if !norm.NFC.IsNormalString(s) { + return s, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, runeError(r) + } + i += sz + } + return s, nil +} + +func validateAndMap(p *Profile, s string) (string, error) { + var ( + err error + b []byte + k int + ) + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + s = norm.NFC.String(s) + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) error { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if p.bidirule != nil && !p.bidirule(s) { + return &labelError{s, "B"} + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil } func ascii(s string) bool { diff --git a/fn/vendor/golang.org/x/net/idna/punycode.go b/fn/vendor/golang.org/x/net/idna/punycode.go index 92e733f6a..02c7d59af 100644 --- a/fn/vendor/golang.org/x/net/idna/punycode.go +++ b/fn/vendor/golang.org/x/net/idna/punycode.go @@ -1,4 +1,6 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -7,7 +9,6 @@ package idna // This file implements the Punycode algorithm from RFC 3492. import ( - "fmt" "math" "strings" "unicode/utf8" @@ -27,6 +28,8 @@ const ( tmin int32 = 1 ) +func punyError(s string) error { return &labelError{s, "A3"} } + // decode decodes a string as specified in section 6.2. func decode(encoded string) (string, error) { if encoded == "" { @@ -34,7 +37,7 @@ func decode(encoded string) (string, error) { } pos := 1 + strings.LastIndex(encoded, "-") if pos == 1 { - return "", fmt.Errorf("idna: invalid label %q", encoded) + return "", punyError(encoded) } if pos == len(encoded) { return encoded[:len(encoded)-1], nil @@ -50,16 +53,16 @@ func decode(encoded string) (string, error) { oldI, w := i, int32(1) for k := base; ; k += base { if pos == len(encoded) { - return "", fmt.Errorf("idna: invalid label %q", encoded) + return "", punyError(encoded) } digit, ok := decodeDigit(encoded[pos]) if !ok { - return "", fmt.Errorf("idna: invalid label %q", encoded) + return "", punyError(encoded) } pos++ i += digit * w if i < 0 { - return "", fmt.Errorf("idna: invalid label %q", encoded) + return "", punyError(encoded) } t := k - bias if t < tmin { @@ -72,7 +75,7 @@ func decode(encoded string) (string, error) { } w *= base - t if w >= math.MaxInt32/base { - return "", fmt.Errorf("idna: invalid label %q", encoded) + return "", punyError(encoded) } } x := int32(len(output) + 1) @@ -80,7 +83,7 @@ func decode(encoded string) (string, error) { n += i / x i %= x if n > utf8.MaxRune || len(output) >= 1024 { - return "", fmt.Errorf("idna: invalid label %q", encoded) + return "", punyError(encoded) } output = append(output, 0) copy(output[i+1:], output[i:]) @@ -121,14 +124,14 @@ func encode(prefix, s string) (string, error) { } delta += (m - n) * (h + 1) if delta < 0 { - return "", fmt.Errorf("idna: invalid label %q", s) + return "", punyError(s) } n = m for _, r := range s { if r < n { delta++ if delta < 0 { - return "", fmt.Errorf("idna: invalid label %q", s) + return "", punyError(s) } continue } diff --git a/fn/vendor/golang.org/x/net/idna/tables.go b/fn/vendor/golang.org/x/net/idna/tables.go new file mode 100644 index 000000000..d2819345f --- /dev/null +++ b/fn/vendor/golang.org/x/net/idna/tables.go @@ -0,0 +1,4477 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +var mappings string = "" + // Size: 8176 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 28496 bytes (27.83 KiB). Checksum: 43288b883596640e. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 123: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 123 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 125 blocks, 8000 entries, 16000 bytes +// The third block is the zero block. +var idnaValues = [8000]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x1308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x1308, 0x287: 0x1308, 0x288: 0x1308, 0x289: 0x1308, 0x28a: 0x1308, 0x28b: 0x1308, + 0x28c: 0x1308, 0x28d: 0x1308, 0x28e: 0x1308, 0x28f: 0x13c0, 0x290: 0x1308, 0x291: 0x1308, + 0x292: 0x1308, 0x293: 0x1308, 0x294: 0x1308, 0x295: 0x1308, 0x296: 0x1308, 0x297: 0x1308, + 0x298: 0x1308, 0x299: 0x1308, 0x29a: 0x1308, 0x29b: 0x1308, 0x29c: 0x1308, 0x29d: 0x1308, + 0x29e: 0x1308, 0x29f: 0x1308, 0x2a0: 0x1308, 0x2a1: 0x1308, 0x2a2: 0x1308, 0x2a3: 0x1308, + 0x2a4: 0x1308, 0x2a5: 0x1308, 0x2a6: 0x1308, 0x2a7: 0x1308, 0x2a8: 0x1308, 0x2a9: 0x1308, + 0x2aa: 0x1308, 0x2ab: 0x1308, 0x2ac: 0x1308, 0x2ad: 0x1308, 0x2ae: 0x1308, 0x2af: 0x1308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x1308, 0x384: 0x1308, 0x385: 0x1308, + 0x386: 0x1308, 0x387: 0x1308, 0x388: 0x1318, 0x389: 0x1318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0040, 0x441: 0x0040, 0x442: 0x0040, 0x443: 0x0040, 0x444: 0x0040, 0x445: 0x0040, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0018, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0018, + 0x44c: 0x0018, 0x44d: 0x0018, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x1308, 0x451: 0x1308, + 0x452: 0x1308, 0x453: 0x1308, 0x454: 0x1308, 0x455: 0x1308, 0x456: 0x1308, 0x457: 0x1308, + 0x458: 0x1308, 0x459: 0x1308, 0x45a: 0x1308, 0x45b: 0x0018, 0x45c: 0x0340, 0x45d: 0x0040, + 0x45e: 0x0018, 0x45f: 0x0018, 0x460: 0x0208, 0x461: 0x0008, 0x462: 0x0408, 0x463: 0x0408, + 0x464: 0x0408, 0x465: 0x0408, 0x466: 0x0208, 0x467: 0x0408, 0x468: 0x0208, 0x469: 0x0408, + 0x46a: 0x0208, 0x46b: 0x0208, 0x46c: 0x0208, 0x46d: 0x0208, 0x46e: 0x0208, 0x46f: 0x0408, + 0x470: 0x0408, 0x471: 0x0408, 0x472: 0x0408, 0x473: 0x0208, 0x474: 0x0208, 0x475: 0x0208, + 0x476: 0x0208, 0x477: 0x0208, 0x478: 0x0208, 0x479: 0x0208, 0x47a: 0x0208, 0x47b: 0x0208, + 0x47c: 0x0208, 0x47d: 0x0208, 0x47e: 0x0208, 0x47f: 0x0208, + // Block 0x12, offset 0x480 + 0x480: 0x0408, 0x481: 0x0208, 0x482: 0x0208, 0x483: 0x0408, 0x484: 0x0408, 0x485: 0x0408, + 0x486: 0x0408, 0x487: 0x0408, 0x488: 0x0408, 0x489: 0x0408, 0x48a: 0x0408, 0x48b: 0x0408, + 0x48c: 0x0208, 0x48d: 0x0408, 0x48e: 0x0208, 0x48f: 0x0408, 0x490: 0x0208, 0x491: 0x0208, + 0x492: 0x0408, 0x493: 0x0408, 0x494: 0x0018, 0x495: 0x0408, 0x496: 0x1308, 0x497: 0x1308, + 0x498: 0x1308, 0x499: 0x1308, 0x49a: 0x1308, 0x49b: 0x1308, 0x49c: 0x1308, 0x49d: 0x0040, + 0x49e: 0x0018, 0x49f: 0x1308, 0x4a0: 0x1308, 0x4a1: 0x1308, 0x4a2: 0x1308, 0x4a3: 0x1308, + 0x4a4: 0x1308, 0x4a5: 0x0008, 0x4a6: 0x0008, 0x4a7: 0x1308, 0x4a8: 0x1308, 0x4a9: 0x0018, + 0x4aa: 0x1308, 0x4ab: 0x1308, 0x4ac: 0x1308, 0x4ad: 0x1308, 0x4ae: 0x0408, 0x4af: 0x0408, + 0x4b0: 0x0008, 0x4b1: 0x0008, 0x4b2: 0x0008, 0x4b3: 0x0008, 0x4b4: 0x0008, 0x4b5: 0x0008, + 0x4b6: 0x0008, 0x4b7: 0x0008, 0x4b8: 0x0008, 0x4b9: 0x0008, 0x4ba: 0x0208, 0x4bb: 0x0208, + 0x4bc: 0x0208, 0x4bd: 0x0008, 0x4be: 0x0008, 0x4bf: 0x0208, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0018, 0x4c1: 0x0018, 0x4c2: 0x0018, 0x4c3: 0x0018, 0x4c4: 0x0018, 0x4c5: 0x0018, + 0x4c6: 0x0018, 0x4c7: 0x0018, 0x4c8: 0x0018, 0x4c9: 0x0018, 0x4ca: 0x0018, 0x4cb: 0x0018, + 0x4cc: 0x0018, 0x4cd: 0x0018, 0x4ce: 0x0040, 0x4cf: 0x0340, 0x4d0: 0x0408, 0x4d1: 0x1308, + 0x4d2: 0x0208, 0x4d3: 0x0208, 0x4d4: 0x0208, 0x4d5: 0x0408, 0x4d6: 0x0408, 0x4d7: 0x0408, + 0x4d8: 0x0408, 0x4d9: 0x0408, 0x4da: 0x0208, 0x4db: 0x0208, 0x4dc: 0x0208, 0x4dd: 0x0208, + 0x4de: 0x0408, 0x4df: 0x0208, 0x4e0: 0x0208, 0x4e1: 0x0208, 0x4e2: 0x0208, 0x4e3: 0x0208, + 0x4e4: 0x0208, 0x4e5: 0x0208, 0x4e6: 0x0208, 0x4e7: 0x0208, 0x4e8: 0x0408, 0x4e9: 0x0208, + 0x4ea: 0x0408, 0x4eb: 0x0208, 0x4ec: 0x0408, 0x4ed: 0x0208, 0x4ee: 0x0208, 0x4ef: 0x0408, + 0x4f0: 0x1308, 0x4f1: 0x1308, 0x4f2: 0x1308, 0x4f3: 0x1308, 0x4f4: 0x1308, 0x4f5: 0x1308, + 0x4f6: 0x1308, 0x4f7: 0x1308, 0x4f8: 0x1308, 0x4f9: 0x1308, 0x4fa: 0x1308, 0x4fb: 0x1308, + 0x4fc: 0x1308, 0x4fd: 0x1308, 0x4fe: 0x1308, 0x4ff: 0x1308, + // Block 0x14, offset 0x500 + 0x500: 0x1008, 0x501: 0x1308, 0x502: 0x1308, 0x503: 0x1308, 0x504: 0x1308, 0x505: 0x1308, + 0x506: 0x1308, 0x507: 0x1308, 0x508: 0x1308, 0x509: 0x1008, 0x50a: 0x1008, 0x50b: 0x1008, + 0x50c: 0x1008, 0x50d: 0x1b08, 0x50e: 0x1008, 0x50f: 0x1008, 0x510: 0x0008, 0x511: 0x1308, + 0x512: 0x1308, 0x513: 0x1308, 0x514: 0x1308, 0x515: 0x1308, 0x516: 0x1308, 0x517: 0x1308, + 0x518: 0x04c9, 0x519: 0x0501, 0x51a: 0x0539, 0x51b: 0x0571, 0x51c: 0x05a9, 0x51d: 0x05e1, + 0x51e: 0x0619, 0x51f: 0x0651, 0x520: 0x0008, 0x521: 0x0008, 0x522: 0x1308, 0x523: 0x1308, + 0x524: 0x0018, 0x525: 0x0018, 0x526: 0x0008, 0x527: 0x0008, 0x528: 0x0008, 0x529: 0x0008, + 0x52a: 0x0008, 0x52b: 0x0008, 0x52c: 0x0008, 0x52d: 0x0008, 0x52e: 0x0008, 0x52f: 0x0008, + 0x530: 0x0018, 0x531: 0x0008, 0x532: 0x0008, 0x533: 0x0008, 0x534: 0x0008, 0x535: 0x0008, + 0x536: 0x0008, 0x537: 0x0008, 0x538: 0x0008, 0x539: 0x0008, 0x53a: 0x0008, 0x53b: 0x0008, + 0x53c: 0x0008, 0x53d: 0x0008, 0x53e: 0x0008, 0x53f: 0x0008, + // Block 0x15, offset 0x540 + 0x540: 0x0008, 0x541: 0x1308, 0x542: 0x1008, 0x543: 0x1008, 0x544: 0x0040, 0x545: 0x0008, + 0x546: 0x0008, 0x547: 0x0008, 0x548: 0x0008, 0x549: 0x0008, 0x54a: 0x0008, 0x54b: 0x0008, + 0x54c: 0x0008, 0x54d: 0x0040, 0x54e: 0x0040, 0x54f: 0x0008, 0x550: 0x0008, 0x551: 0x0040, + 0x552: 0x0040, 0x553: 0x0008, 0x554: 0x0008, 0x555: 0x0008, 0x556: 0x0008, 0x557: 0x0008, + 0x558: 0x0008, 0x559: 0x0008, 0x55a: 0x0008, 0x55b: 0x0008, 0x55c: 0x0008, 0x55d: 0x0008, + 0x55e: 0x0008, 0x55f: 0x0008, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x0008, 0x563: 0x0008, + 0x564: 0x0008, 0x565: 0x0008, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0040, + 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, + 0x570: 0x0008, 0x571: 0x0040, 0x572: 0x0008, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x1308, 0x57d: 0x0008, 0x57e: 0x1008, 0x57f: 0x1008, + // Block 0x16, offset 0x580 + 0x580: 0x1008, 0x581: 0x1308, 0x582: 0x1308, 0x583: 0x1308, 0x584: 0x1308, 0x585: 0x0040, + 0x586: 0x0040, 0x587: 0x1008, 0x588: 0x1008, 0x589: 0x0040, 0x58a: 0x0040, 0x58b: 0x1008, + 0x58c: 0x1008, 0x58d: 0x1b08, 0x58e: 0x0008, 0x58f: 0x0040, 0x590: 0x0040, 0x591: 0x0040, + 0x592: 0x0040, 0x593: 0x0040, 0x594: 0x0040, 0x595: 0x0040, 0x596: 0x0040, 0x597: 0x1008, + 0x598: 0x0040, 0x599: 0x0040, 0x59a: 0x0040, 0x59b: 0x0040, 0x59c: 0x0689, 0x59d: 0x06c1, + 0x59e: 0x0040, 0x59f: 0x06f9, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x1308, 0x5a3: 0x1308, + 0x5a4: 0x0040, 0x5a5: 0x0040, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0008, 0x5b1: 0x0008, 0x5b2: 0x0018, 0x5b3: 0x0018, 0x5b4: 0x0018, 0x5b5: 0x0018, + 0x5b6: 0x0018, 0x5b7: 0x0018, 0x5b8: 0x0018, 0x5b9: 0x0018, 0x5ba: 0x0018, 0x5bb: 0x0018, + 0x5bc: 0x0040, 0x5bd: 0x0040, 0x5be: 0x0040, 0x5bf: 0x0040, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0040, 0x5c1: 0x1308, 0x5c2: 0x1308, 0x5c3: 0x1008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0040, + 0x5cc: 0x0040, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0731, 0x5f4: 0x0040, 0x5f5: 0x0008, + 0x5f6: 0x0769, 0x5f7: 0x0040, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x1308, 0x5fd: 0x0040, 0x5fe: 0x1008, 0x5ff: 0x1008, + // Block 0x18, offset 0x600 + 0x600: 0x1008, 0x601: 0x1308, 0x602: 0x1308, 0x603: 0x0040, 0x604: 0x0040, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x1308, 0x608: 0x1308, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x1308, + 0x60c: 0x1308, 0x60d: 0x1b08, 0x60e: 0x0040, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x1308, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x0040, + 0x618: 0x0040, 0x619: 0x07a1, 0x61a: 0x07d9, 0x61b: 0x0811, 0x61c: 0x0008, 0x61d: 0x0040, + 0x61e: 0x0849, 0x61f: 0x0040, 0x620: 0x0040, 0x621: 0x0040, 0x622: 0x0040, 0x623: 0x0040, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x1308, 0x631: 0x1308, 0x632: 0x0008, 0x633: 0x0008, 0x634: 0x0008, 0x635: 0x1308, + 0x636: 0x0040, 0x637: 0x0040, 0x638: 0x0040, 0x639: 0x0040, 0x63a: 0x0040, 0x63b: 0x0040, + 0x63c: 0x0040, 0x63d: 0x0040, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x1308, 0x642: 0x1308, 0x643: 0x1008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0008, + 0x64c: 0x0008, 0x64d: 0x0008, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0008, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0008, 0x677: 0x0008, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x1308, 0x67d: 0x0008, 0x67e: 0x1008, 0x67f: 0x1008, + // Block 0x1a, offset 0x680 + 0x680: 0x1008, 0x681: 0x1308, 0x682: 0x1308, 0x683: 0x1308, 0x684: 0x1308, 0x685: 0x1308, + 0x686: 0x0040, 0x687: 0x1308, 0x688: 0x1308, 0x689: 0x1008, 0x68a: 0x0040, 0x68b: 0x1008, + 0x68c: 0x1008, 0x68d: 0x1b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0008, 0x691: 0x0040, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x0040, 0x69a: 0x0040, 0x69b: 0x0040, 0x69c: 0x0040, 0x69d: 0x0040, + 0x69e: 0x0040, 0x69f: 0x0040, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x1308, 0x6a3: 0x1308, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x0018, 0x6b1: 0x0018, 0x6b2: 0x0040, 0x6b3: 0x0040, 0x6b4: 0x0040, 0x6b5: 0x0040, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x1308, 0x6c2: 0x1008, 0x6c3: 0x1008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0040, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0040, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x1308, 0x6fd: 0x0008, 0x6fe: 0x1008, 0x6ff: 0x1308, + // Block 0x1c, offset 0x700 + 0x700: 0x1008, 0x701: 0x1308, 0x702: 0x1308, 0x703: 0x1308, 0x704: 0x1308, 0x705: 0x0040, + 0x706: 0x0040, 0x707: 0x1008, 0x708: 0x1008, 0x709: 0x0040, 0x70a: 0x0040, 0x70b: 0x1008, + 0x70c: 0x1008, 0x70d: 0x1b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0040, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x1308, 0x717: 0x1008, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0881, 0x71d: 0x08b9, + 0x71e: 0x0040, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x1308, 0x723: 0x1308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0008, 0x732: 0x0018, 0x733: 0x0018, 0x734: 0x0018, 0x735: 0x0018, + 0x736: 0x0018, 0x737: 0x0018, 0x738: 0x0040, 0x739: 0x0040, 0x73a: 0x0040, 0x73b: 0x0040, + 0x73c: 0x0040, 0x73d: 0x0040, 0x73e: 0x0040, 0x73f: 0x0040, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x0040, 0x742: 0x1308, 0x743: 0x0008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0040, + 0x74c: 0x0040, 0x74d: 0x0040, 0x74e: 0x0008, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0008, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0040, 0x757: 0x0040, + 0x758: 0x0040, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0040, 0x75c: 0x0008, 0x75d: 0x0040, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0040, 0x761: 0x0040, 0x762: 0x0040, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0040, 0x766: 0x0040, 0x767: 0x0040, 0x768: 0x0008, 0x769: 0x0008, + 0x76a: 0x0008, 0x76b: 0x0040, 0x76c: 0x0040, 0x76d: 0x0040, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0008, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0008, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x1008, 0x77f: 0x1008, + // Block 0x1e, offset 0x780 + 0x780: 0x1308, 0x781: 0x1008, 0x782: 0x1008, 0x783: 0x1008, 0x784: 0x1008, 0x785: 0x0040, + 0x786: 0x1308, 0x787: 0x1308, 0x788: 0x1308, 0x789: 0x0040, 0x78a: 0x1308, 0x78b: 0x1308, + 0x78c: 0x1308, 0x78d: 0x1b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x1308, 0x796: 0x1308, 0x797: 0x0040, + 0x798: 0x0008, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0040, 0x79d: 0x0040, + 0x79e: 0x0040, 0x79f: 0x0040, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x1308, 0x7a3: 0x1308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0040, 0x7b1: 0x0040, 0x7b2: 0x0040, 0x7b3: 0x0040, 0x7b4: 0x0040, 0x7b5: 0x0040, + 0x7b6: 0x0040, 0x7b7: 0x0040, 0x7b8: 0x0018, 0x7b9: 0x0018, 0x7ba: 0x0018, 0x7bb: 0x0018, + 0x7bc: 0x0018, 0x7bd: 0x0018, 0x7be: 0x0018, 0x7bf: 0x0018, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0008, 0x7c1: 0x1308, 0x7c2: 0x1008, 0x7c3: 0x1008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0008, + 0x7cc: 0x0008, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0008, 0x7d7: 0x0008, + 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0008, 0x7dc: 0x0008, 0x7dd: 0x0008, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x0008, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0008, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0040, + 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0040, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x1308, 0x7fd: 0x0008, 0x7fe: 0x1008, 0x7ff: 0x1308, + // Block 0x20, offset 0x800 + 0x800: 0x1008, 0x801: 0x1008, 0x802: 0x1008, 0x803: 0x1008, 0x804: 0x1008, 0x805: 0x0040, + 0x806: 0x1308, 0x807: 0x1008, 0x808: 0x1008, 0x809: 0x0040, 0x80a: 0x1008, 0x80b: 0x1008, + 0x80c: 0x1308, 0x80d: 0x1b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x1008, 0x816: 0x1008, 0x817: 0x0040, + 0x818: 0x0040, 0x819: 0x0040, 0x81a: 0x0040, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0008, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x1308, 0x823: 0x1308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0040, 0x839: 0x0040, 0x83a: 0x0040, 0x83b: 0x0040, + 0x83c: 0x0040, 0x83d: 0x0040, 0x83e: 0x0040, 0x83f: 0x0040, + // Block 0x21, offset 0x840 + 0x840: 0x1008, 0x841: 0x1308, 0x842: 0x1308, 0x843: 0x1308, 0x844: 0x1308, 0x845: 0x0040, + 0x846: 0x1008, 0x847: 0x1008, 0x848: 0x1008, 0x849: 0x0040, 0x84a: 0x1008, 0x84b: 0x1008, + 0x84c: 0x1008, 0x84d: 0x1b08, 0x84e: 0x0008, 0x84f: 0x0018, 0x850: 0x0040, 0x851: 0x0040, + 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x1008, + 0x858: 0x0018, 0x859: 0x0018, 0x85a: 0x0018, 0x85b: 0x0018, 0x85c: 0x0018, 0x85d: 0x0018, + 0x85e: 0x0018, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x1308, 0x863: 0x1308, + 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0018, 0x871: 0x0018, 0x872: 0x0018, 0x873: 0x0018, 0x874: 0x0018, 0x875: 0x0018, + 0x876: 0x0018, 0x877: 0x0018, 0x878: 0x0018, 0x879: 0x0018, 0x87a: 0x0008, 0x87b: 0x0008, + 0x87c: 0x0008, 0x87d: 0x0008, 0x87e: 0x0008, 0x87f: 0x0008, + // Block 0x22, offset 0x880 + 0x880: 0x0040, 0x881: 0x0008, 0x882: 0x0008, 0x883: 0x0040, 0x884: 0x0008, 0x885: 0x0040, + 0x886: 0x0040, 0x887: 0x0008, 0x888: 0x0008, 0x889: 0x0040, 0x88a: 0x0008, 0x88b: 0x0040, + 0x88c: 0x0040, 0x88d: 0x0008, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x0008, + 0x898: 0x0040, 0x899: 0x0008, 0x89a: 0x0008, 0x89b: 0x0008, 0x89c: 0x0008, 0x89d: 0x0008, + 0x89e: 0x0008, 0x89f: 0x0008, 0x8a0: 0x0040, 0x8a1: 0x0008, 0x8a2: 0x0008, 0x8a3: 0x0008, + 0x8a4: 0x0040, 0x8a5: 0x0008, 0x8a6: 0x0040, 0x8a7: 0x0008, 0x8a8: 0x0040, 0x8a9: 0x0040, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0040, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0008, 0x8b1: 0x1308, 0x8b2: 0x0008, 0x8b3: 0x0929, 0x8b4: 0x1308, 0x8b5: 0x1308, + 0x8b6: 0x1308, 0x8b7: 0x1308, 0x8b8: 0x1308, 0x8b9: 0x1308, 0x8ba: 0x0040, 0x8bb: 0x1308, + 0x8bc: 0x1308, 0x8bd: 0x0008, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0008, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x09d1, 0x8c4: 0x0008, 0x8c5: 0x0008, + 0x8c6: 0x0008, 0x8c7: 0x0008, 0x8c8: 0x0040, 0x8c9: 0x0008, 0x8ca: 0x0008, 0x8cb: 0x0008, + 0x8cc: 0x0008, 0x8cd: 0x0a09, 0x8ce: 0x0008, 0x8cf: 0x0008, 0x8d0: 0x0008, 0x8d1: 0x0008, + 0x8d2: 0x0a41, 0x8d3: 0x0008, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0a79, + 0x8d8: 0x0008, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0ab1, 0x8dd: 0x0008, + 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, + 0x8e4: 0x0008, 0x8e5: 0x0008, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0ae9, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0040, 0x8ee: 0x0040, 0x8ef: 0x0040, + 0x8f0: 0x0040, 0x8f1: 0x1308, 0x8f2: 0x1308, 0x8f3: 0x0b21, 0x8f4: 0x1308, 0x8f5: 0x0b59, + 0x8f6: 0x0b91, 0x8f7: 0x0bc9, 0x8f8: 0x0c19, 0x8f9: 0x0c51, 0x8fa: 0x1308, 0x8fb: 0x1308, + 0x8fc: 0x1308, 0x8fd: 0x1308, 0x8fe: 0x1308, 0x8ff: 0x1008, + // Block 0x24, offset 0x900 + 0x900: 0x1308, 0x901: 0x0ca1, 0x902: 0x1308, 0x903: 0x1308, 0x904: 0x1b08, 0x905: 0x0018, + 0x906: 0x1308, 0x907: 0x1308, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, + 0x90c: 0x0008, 0x90d: 0x1308, 0x90e: 0x1308, 0x90f: 0x1308, 0x910: 0x1308, 0x911: 0x1308, + 0x912: 0x1308, 0x913: 0x0cd9, 0x914: 0x1308, 0x915: 0x1308, 0x916: 0x1308, 0x917: 0x1308, + 0x918: 0x0040, 0x919: 0x1308, 0x91a: 0x1308, 0x91b: 0x1308, 0x91c: 0x1308, 0x91d: 0x0d11, + 0x91e: 0x1308, 0x91f: 0x1308, 0x920: 0x1308, 0x921: 0x1308, 0x922: 0x0d49, 0x923: 0x1308, + 0x924: 0x1308, 0x925: 0x1308, 0x926: 0x1308, 0x927: 0x0d81, 0x928: 0x1308, 0x929: 0x1308, + 0x92a: 0x1308, 0x92b: 0x1308, 0x92c: 0x0db9, 0x92d: 0x1308, 0x92e: 0x1308, 0x92f: 0x1308, + 0x930: 0x1308, 0x931: 0x1308, 0x932: 0x1308, 0x933: 0x1308, 0x934: 0x1308, 0x935: 0x1308, + 0x936: 0x1308, 0x937: 0x1308, 0x938: 0x1308, 0x939: 0x0df1, 0x93a: 0x1308, 0x93b: 0x1308, + 0x93c: 0x1308, 0x93d: 0x0040, 0x93e: 0x0018, 0x93f: 0x0018, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0008, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0008, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0008, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0008, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0008, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0008, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0039, 0x96d: 0x0ed1, 0x96e: 0x0ee9, 0x96f: 0x0008, + 0x970: 0x0ef9, 0x971: 0x0f09, 0x972: 0x0f19, 0x973: 0x0f31, 0x974: 0x0249, 0x975: 0x0f41, + 0x976: 0x0259, 0x977: 0x0f51, 0x978: 0x0359, 0x979: 0x0f61, 0x97a: 0x0f71, 0x97b: 0x0008, + 0x97c: 0x00d9, 0x97d: 0x0f81, 0x97e: 0x0f99, 0x97f: 0x0269, + // Block 0x26, offset 0x980 + 0x980: 0x0fa9, 0x981: 0x0fb9, 0x982: 0x0279, 0x983: 0x0039, 0x984: 0x0fc9, 0x985: 0x0fe1, + 0x986: 0x059d, 0x987: 0x0ee9, 0x988: 0x0ef9, 0x989: 0x0f09, 0x98a: 0x0ff9, 0x98b: 0x1011, + 0x98c: 0x1029, 0x98d: 0x0f31, 0x98e: 0x0008, 0x98f: 0x0f51, 0x990: 0x0f61, 0x991: 0x1041, + 0x992: 0x00d9, 0x993: 0x1059, 0x994: 0x05b5, 0x995: 0x05b5, 0x996: 0x0f99, 0x997: 0x0fa9, + 0x998: 0x0fb9, 0x999: 0x059d, 0x99a: 0x1071, 0x99b: 0x1089, 0x99c: 0x05cd, 0x99d: 0x1099, + 0x99e: 0x10b1, 0x99f: 0x10c9, 0x9a0: 0x10e1, 0x9a1: 0x10f9, 0x9a2: 0x0f41, 0x9a3: 0x0269, + 0x9a4: 0x0fb9, 0x9a5: 0x1089, 0x9a6: 0x1099, 0x9a7: 0x10b1, 0x9a8: 0x1111, 0x9a9: 0x10e1, + 0x9aa: 0x10f9, 0x9ab: 0x0008, 0x9ac: 0x0008, 0x9ad: 0x0008, 0x9ae: 0x0008, 0x9af: 0x0008, + 0x9b0: 0x0008, 0x9b1: 0x0008, 0x9b2: 0x0008, 0x9b3: 0x0008, 0x9b4: 0x0008, 0x9b5: 0x0008, + 0x9b6: 0x0008, 0x9b7: 0x0008, 0x9b8: 0x1129, 0x9b9: 0x0008, 0x9ba: 0x0008, 0x9bb: 0x0008, + 0x9bc: 0x0008, 0x9bd: 0x0008, 0x9be: 0x0008, 0x9bf: 0x0008, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x1141, 0x9dc: 0x1159, 0x9dd: 0x1169, + 0x9de: 0x1181, 0x9df: 0x1029, 0x9e0: 0x1199, 0x9e1: 0x11a9, 0x9e2: 0x11c1, 0x9e3: 0x11d9, + 0x9e4: 0x11f1, 0x9e5: 0x1209, 0x9e6: 0x1221, 0x9e7: 0x05e5, 0x9e8: 0x1239, 0x9e9: 0x1251, + 0x9ea: 0xe17d, 0x9eb: 0x1269, 0x9ec: 0x1281, 0x9ed: 0x1299, 0x9ee: 0x12b1, 0x9ef: 0x12c9, + 0x9f0: 0x12e1, 0x9f1: 0x12f9, 0x9f2: 0x1311, 0x9f3: 0x1329, 0x9f4: 0x1341, 0x9f5: 0x1359, + 0x9f6: 0x1371, 0x9f7: 0x1389, 0x9f8: 0x05fd, 0x9f9: 0x13a1, 0x9fa: 0x13b9, 0x9fb: 0x13d1, + 0x9fc: 0x13e1, 0x9fd: 0x13f9, 0x9fe: 0x1411, 0x9ff: 0x1429, + // Block 0x28, offset 0xa00 + 0xa00: 0xe00d, 0xa01: 0x0008, 0xa02: 0xe00d, 0xa03: 0x0008, 0xa04: 0xe00d, 0xa05: 0x0008, + 0xa06: 0xe00d, 0xa07: 0x0008, 0xa08: 0xe00d, 0xa09: 0x0008, 0xa0a: 0xe00d, 0xa0b: 0x0008, + 0xa0c: 0xe00d, 0xa0d: 0x0008, 0xa0e: 0xe00d, 0xa0f: 0x0008, 0xa10: 0xe00d, 0xa11: 0x0008, + 0xa12: 0xe00d, 0xa13: 0x0008, 0xa14: 0xe00d, 0xa15: 0x0008, 0xa16: 0xe00d, 0xa17: 0x0008, + 0xa18: 0xe00d, 0xa19: 0x0008, 0xa1a: 0xe00d, 0xa1b: 0x0008, 0xa1c: 0xe00d, 0xa1d: 0x0008, + 0xa1e: 0xe00d, 0xa1f: 0x0008, 0xa20: 0xe00d, 0xa21: 0x0008, 0xa22: 0xe00d, 0xa23: 0x0008, + 0xa24: 0xe00d, 0xa25: 0x0008, 0xa26: 0xe00d, 0xa27: 0x0008, 0xa28: 0xe00d, 0xa29: 0x0008, + 0xa2a: 0xe00d, 0xa2b: 0x0008, 0xa2c: 0xe00d, 0xa2d: 0x0008, 0xa2e: 0xe00d, 0xa2f: 0x0008, + 0xa30: 0xe00d, 0xa31: 0x0008, 0xa32: 0xe00d, 0xa33: 0x0008, 0xa34: 0xe00d, 0xa35: 0x0008, + 0xa36: 0xe00d, 0xa37: 0x0008, 0xa38: 0xe00d, 0xa39: 0x0008, 0xa3a: 0xe00d, 0xa3b: 0x0008, + 0xa3c: 0xe00d, 0xa3d: 0x0008, 0xa3e: 0xe00d, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, + 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, + 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, + 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0615, 0xa5b: 0x0635, 0xa5c: 0x0008, 0xa5d: 0x0008, + 0xa5e: 0x1441, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, + 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, + 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, + 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, + 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, + 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0008, 0xa81: 0x0008, 0xa82: 0x0008, 0xa83: 0x0008, 0xa84: 0x0008, 0xa85: 0x0008, + 0xa86: 0x0040, 0xa87: 0x0040, 0xa88: 0xe045, 0xa89: 0xe045, 0xa8a: 0xe045, 0xa8b: 0xe045, + 0xa8c: 0xe045, 0xa8d: 0xe045, 0xa8e: 0x0040, 0xa8f: 0x0040, 0xa90: 0x0008, 0xa91: 0x0008, + 0xa92: 0x0008, 0xa93: 0x0008, 0xa94: 0x0008, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, + 0xa98: 0x0040, 0xa99: 0xe045, 0xa9a: 0x0040, 0xa9b: 0xe045, 0xa9c: 0x0040, 0xa9d: 0xe045, + 0xa9e: 0x0040, 0xa9f: 0xe045, 0xaa0: 0x0008, 0xaa1: 0x0008, 0xaa2: 0x0008, 0xaa3: 0x0008, + 0xaa4: 0x0008, 0xaa5: 0x0008, 0xaa6: 0x0008, 0xaa7: 0x0008, 0xaa8: 0xe045, 0xaa9: 0xe045, + 0xaaa: 0xe045, 0xaab: 0xe045, 0xaac: 0xe045, 0xaad: 0xe045, 0xaae: 0xe045, 0xaaf: 0xe045, + 0xab0: 0x0008, 0xab1: 0x1459, 0xab2: 0x0008, 0xab3: 0x1471, 0xab4: 0x0008, 0xab5: 0x1489, + 0xab6: 0x0008, 0xab7: 0x14a1, 0xab8: 0x0008, 0xab9: 0x14b9, 0xaba: 0x0008, 0xabb: 0x14d1, + 0xabc: 0x0008, 0xabd: 0x14e9, 0xabe: 0x0040, 0xabf: 0x0040, + // Block 0x2b, offset 0xac0 + 0xac0: 0x1501, 0xac1: 0x1531, 0xac2: 0x1561, 0xac3: 0x1591, 0xac4: 0x15c1, 0xac5: 0x15f1, + 0xac6: 0x1621, 0xac7: 0x1651, 0xac8: 0x1501, 0xac9: 0x1531, 0xaca: 0x1561, 0xacb: 0x1591, + 0xacc: 0x15c1, 0xacd: 0x15f1, 0xace: 0x1621, 0xacf: 0x1651, 0xad0: 0x1681, 0xad1: 0x16b1, + 0xad2: 0x16e1, 0xad3: 0x1711, 0xad4: 0x1741, 0xad5: 0x1771, 0xad6: 0x17a1, 0xad7: 0x17d1, + 0xad8: 0x1681, 0xad9: 0x16b1, 0xada: 0x16e1, 0xadb: 0x1711, 0xadc: 0x1741, 0xadd: 0x1771, + 0xade: 0x17a1, 0xadf: 0x17d1, 0xae0: 0x1801, 0xae1: 0x1831, 0xae2: 0x1861, 0xae3: 0x1891, + 0xae4: 0x18c1, 0xae5: 0x18f1, 0xae6: 0x1921, 0xae7: 0x1951, 0xae8: 0x1801, 0xae9: 0x1831, + 0xaea: 0x1861, 0xaeb: 0x1891, 0xaec: 0x18c1, 0xaed: 0x18f1, 0xaee: 0x1921, 0xaef: 0x1951, + 0xaf0: 0x0008, 0xaf1: 0x0008, 0xaf2: 0x1981, 0xaf3: 0x19b1, 0xaf4: 0x19d9, 0xaf5: 0x0040, + 0xaf6: 0x0008, 0xaf7: 0x1a01, 0xaf8: 0xe045, 0xaf9: 0xe045, 0xafa: 0x064d, 0xafb: 0x1459, + 0xafc: 0x19b1, 0xafd: 0x0666, 0xafe: 0x1a31, 0xaff: 0x0686, + // Block 0x2c, offset 0xb00 + 0xb00: 0x06a6, 0xb01: 0x1a4a, 0xb02: 0x1a79, 0xb03: 0x1aa9, 0xb04: 0x1ad1, 0xb05: 0x0040, + 0xb06: 0x0008, 0xb07: 0x1af9, 0xb08: 0x06c5, 0xb09: 0x1471, 0xb0a: 0x06dd, 0xb0b: 0x1489, + 0xb0c: 0x1aa9, 0xb0d: 0x1b2a, 0xb0e: 0x1b5a, 0xb0f: 0x1b8a, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x1bb9, 0xb14: 0x0040, 0xb15: 0x0040, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0xe045, 0xb19: 0xe045, 0xb1a: 0x06f5, 0xb1b: 0x14a1, 0xb1c: 0x0040, 0xb1d: 0x1bd2, + 0xb1e: 0x1c02, 0xb1f: 0x1c32, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x1c61, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0x070d, 0xb2b: 0x14d1, 0xb2c: 0xe04d, 0xb2d: 0x1c7a, 0xb2e: 0x03d2, 0xb2f: 0x1caa, + 0xb30: 0x0040, 0xb31: 0x0040, 0xb32: 0x1cb9, 0xb33: 0x1ce9, 0xb34: 0x1d11, 0xb35: 0x0040, + 0xb36: 0x0008, 0xb37: 0x1d39, 0xb38: 0x0725, 0xb39: 0x14b9, 0xb3a: 0x0515, 0xb3b: 0x14e9, + 0xb3c: 0x1ce9, 0xb3d: 0x073e, 0xb3e: 0x075e, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x000a, 0xb41: 0x000a, 0xb42: 0x000a, 0xb43: 0x000a, 0xb44: 0x000a, 0xb45: 0x000a, + 0xb46: 0x000a, 0xb47: 0x000a, 0xb48: 0x000a, 0xb49: 0x000a, 0xb4a: 0x000a, 0xb4b: 0x03c0, + 0xb4c: 0x0003, 0xb4d: 0x0003, 0xb4e: 0x0340, 0xb4f: 0x0340, 0xb50: 0x0018, 0xb51: 0xe00d, + 0xb52: 0x0018, 0xb53: 0x0018, 0xb54: 0x0018, 0xb55: 0x0018, 0xb56: 0x0018, 0xb57: 0x077e, + 0xb58: 0x0018, 0xb59: 0x0018, 0xb5a: 0x0018, 0xb5b: 0x0018, 0xb5c: 0x0018, 0xb5d: 0x0018, + 0xb5e: 0x0018, 0xb5f: 0x0018, 0xb60: 0x0018, 0xb61: 0x0018, 0xb62: 0x0018, 0xb63: 0x0018, + 0xb64: 0x0040, 0xb65: 0x0040, 0xb66: 0x0040, 0xb67: 0x0018, 0xb68: 0x0040, 0xb69: 0x0040, + 0xb6a: 0x0340, 0xb6b: 0x0340, 0xb6c: 0x0340, 0xb6d: 0x0340, 0xb6e: 0x0340, 0xb6f: 0x000a, + 0xb70: 0x0018, 0xb71: 0x0018, 0xb72: 0x0018, 0xb73: 0x1d69, 0xb74: 0x1da1, 0xb75: 0x0018, + 0xb76: 0x1df1, 0xb77: 0x1e29, 0xb78: 0x0018, 0xb79: 0x0018, 0xb7a: 0x0018, 0xb7b: 0x0018, + 0xb7c: 0x1e7a, 0xb7d: 0x0018, 0xb7e: 0x079e, 0xb7f: 0x0018, + // Block 0x2e, offset 0xb80 + 0xb80: 0x0018, 0xb81: 0x0018, 0xb82: 0x0018, 0xb83: 0x0018, 0xb84: 0x0018, 0xb85: 0x0018, + 0xb86: 0x0018, 0xb87: 0x1e92, 0xb88: 0x1eaa, 0xb89: 0x1ec2, 0xb8a: 0x0018, 0xb8b: 0x0018, + 0xb8c: 0x0018, 0xb8d: 0x0018, 0xb8e: 0x0018, 0xb8f: 0x0018, 0xb90: 0x0018, 0xb91: 0x0018, + 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x1ed9, + 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, + 0xb9e: 0x0018, 0xb9f: 0x000a, 0xba0: 0x03c0, 0xba1: 0x0340, 0xba2: 0x0340, 0xba3: 0x0340, + 0xba4: 0x03c0, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0040, 0xba8: 0x0040, 0xba9: 0x0040, + 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x0340, + 0xbb0: 0x1f41, 0xbb1: 0x0f41, 0xbb2: 0x0040, 0xbb3: 0x0040, 0xbb4: 0x1f51, 0xbb5: 0x1f61, + 0xbb6: 0x1f71, 0xbb7: 0x1f81, 0xbb8: 0x1f91, 0xbb9: 0x1fa1, 0xbba: 0x1fb2, 0xbbb: 0x07bd, + 0xbbc: 0x1fc2, 0xbbd: 0x1fd2, 0xbbe: 0x1fe2, 0xbbf: 0x0f71, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1f41, 0xbc1: 0x00c9, 0xbc2: 0x0069, 0xbc3: 0x0079, 0xbc4: 0x1f51, 0xbc5: 0x1f61, + 0xbc6: 0x1f71, 0xbc7: 0x1f81, 0xbc8: 0x1f91, 0xbc9: 0x1fa1, 0xbca: 0x1fb2, 0xbcb: 0x07d5, + 0xbcc: 0x1fc2, 0xbcd: 0x1fd2, 0xbce: 0x1fe2, 0xbcf: 0x0040, 0xbd0: 0x0039, 0xbd1: 0x0f09, + 0xbd2: 0x00d9, 0xbd3: 0x0369, 0xbd4: 0x0ff9, 0xbd5: 0x0249, 0xbd6: 0x0f51, 0xbd7: 0x0359, + 0xbd8: 0x0f61, 0xbd9: 0x0f71, 0xbda: 0x0f99, 0xbdb: 0x01d9, 0xbdc: 0x0fa9, 0xbdd: 0x0040, + 0xbde: 0x0040, 0xbdf: 0x0040, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0018, 0xbe5: 0x0018, 0xbe6: 0x0018, 0xbe7: 0x0018, 0xbe8: 0x1ff1, 0xbe9: 0x0018, + 0xbea: 0x0018, 0xbeb: 0x0018, 0xbec: 0x0018, 0xbed: 0x0018, 0xbee: 0x0018, 0xbef: 0x0018, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x0018, 0xbf4: 0x0018, 0xbf5: 0x0018, + 0xbf6: 0x0018, 0xbf7: 0x0018, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x0018, 0xbfd: 0x0018, 0xbfe: 0x0018, 0xbff: 0x0040, + // Block 0x30, offset 0xc00 + 0xc00: 0x07ee, 0xc01: 0x080e, 0xc02: 0x1159, 0xc03: 0x082d, 0xc04: 0x0018, 0xc05: 0x084e, + 0xc06: 0x086e, 0xc07: 0x1011, 0xc08: 0x0018, 0xc09: 0x088d, 0xc0a: 0x0f31, 0xc0b: 0x0249, + 0xc0c: 0x0249, 0xc0d: 0x0249, 0xc0e: 0x0249, 0xc0f: 0x2009, 0xc10: 0x0f41, 0xc11: 0x0f41, + 0xc12: 0x0359, 0xc13: 0x0359, 0xc14: 0x0018, 0xc15: 0x0f71, 0xc16: 0x2021, 0xc17: 0x0018, + 0xc18: 0x0018, 0xc19: 0x0f99, 0xc1a: 0x2039, 0xc1b: 0x0269, 0xc1c: 0x0269, 0xc1d: 0x0269, + 0xc1e: 0x0018, 0xc1f: 0x0018, 0xc20: 0x2049, 0xc21: 0x08ad, 0xc22: 0x2061, 0xc23: 0x0018, + 0xc24: 0x13d1, 0xc25: 0x0018, 0xc26: 0x2079, 0xc27: 0x0018, 0xc28: 0x13d1, 0xc29: 0x0018, + 0xc2a: 0x0f51, 0xc2b: 0x2091, 0xc2c: 0x0ee9, 0xc2d: 0x1159, 0xc2e: 0x0018, 0xc2f: 0x0f09, + 0xc30: 0x0f09, 0xc31: 0x1199, 0xc32: 0x0040, 0xc33: 0x0f61, 0xc34: 0x00d9, 0xc35: 0x20a9, + 0xc36: 0x20c1, 0xc37: 0x20d9, 0xc38: 0x20f1, 0xc39: 0x0f41, 0xc3a: 0x0018, 0xc3b: 0x08cd, + 0xc3c: 0x2109, 0xc3d: 0x10b1, 0xc3e: 0x10b1, 0xc3f: 0x2109, + // Block 0x31, offset 0xc40 + 0xc40: 0x08ed, 0xc41: 0x0018, 0xc42: 0x0018, 0xc43: 0x0018, 0xc44: 0x0018, 0xc45: 0x0ef9, + 0xc46: 0x0ef9, 0xc47: 0x0f09, 0xc48: 0x0f41, 0xc49: 0x0259, 0xc4a: 0x0018, 0xc4b: 0x0018, + 0xc4c: 0x0018, 0xc4d: 0x0018, 0xc4e: 0x0008, 0xc4f: 0x0018, 0xc50: 0x2121, 0xc51: 0x2151, + 0xc52: 0x2181, 0xc53: 0x21b9, 0xc54: 0x21e9, 0xc55: 0x2219, 0xc56: 0x2249, 0xc57: 0x2279, + 0xc58: 0x22a9, 0xc59: 0x22d9, 0xc5a: 0x2309, 0xc5b: 0x2339, 0xc5c: 0x2369, 0xc5d: 0x2399, + 0xc5e: 0x23c9, 0xc5f: 0x23f9, 0xc60: 0x0f41, 0xc61: 0x2421, 0xc62: 0x0905, 0xc63: 0x2439, + 0xc64: 0x1089, 0xc65: 0x2451, 0xc66: 0x0925, 0xc67: 0x2469, 0xc68: 0x2491, 0xc69: 0x0369, + 0xc6a: 0x24a9, 0xc6b: 0x0945, 0xc6c: 0x0359, 0xc6d: 0x1159, 0xc6e: 0x0ef9, 0xc6f: 0x0f61, + 0xc70: 0x0f41, 0xc71: 0x2421, 0xc72: 0x0965, 0xc73: 0x2439, 0xc74: 0x1089, 0xc75: 0x2451, + 0xc76: 0x0985, 0xc77: 0x2469, 0xc78: 0x2491, 0xc79: 0x0369, 0xc7a: 0x24a9, 0xc7b: 0x09a5, + 0xc7c: 0x0359, 0xc7d: 0x1159, 0xc7e: 0x0ef9, 0xc7f: 0x0f61, + // Block 0x32, offset 0xc80 + 0xc80: 0x0018, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0018, + 0xc86: 0x0018, 0xc87: 0x0018, 0xc88: 0x0018, 0xc89: 0x0018, 0xc8a: 0x0018, 0xc8b: 0x0040, + 0xc8c: 0x0040, 0xc8d: 0x0040, 0xc8e: 0x0040, 0xc8f: 0x0040, 0xc90: 0x0040, 0xc91: 0x0040, + 0xc92: 0x0040, 0xc93: 0x0040, 0xc94: 0x0040, 0xc95: 0x0040, 0xc96: 0x0040, 0xc97: 0x0040, + 0xc98: 0x0040, 0xc99: 0x0040, 0xc9a: 0x0040, 0xc9b: 0x0040, 0xc9c: 0x0040, 0xc9d: 0x0040, + 0xc9e: 0x0040, 0xc9f: 0x0040, 0xca0: 0x00c9, 0xca1: 0x0069, 0xca2: 0x0079, 0xca3: 0x1f51, + 0xca4: 0x1f61, 0xca5: 0x1f71, 0xca6: 0x1f81, 0xca7: 0x1f91, 0xca8: 0x1fa1, 0xca9: 0x2601, + 0xcaa: 0x2619, 0xcab: 0x2631, 0xcac: 0x2649, 0xcad: 0x2661, 0xcae: 0x2679, 0xcaf: 0x2691, + 0xcb0: 0x26a9, 0xcb1: 0x26c1, 0xcb2: 0x26d9, 0xcb3: 0x26f1, 0xcb4: 0x0a06, 0xcb5: 0x0a26, + 0xcb6: 0x0a46, 0xcb7: 0x0a66, 0xcb8: 0x0a86, 0xcb9: 0x0aa6, 0xcba: 0x0ac6, 0xcbb: 0x0ae6, + 0xcbc: 0x0b06, 0xcbd: 0x270a, 0xcbe: 0x2732, 0xcbf: 0x275a, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x2782, 0xcc1: 0x27aa, 0xcc2: 0x27d2, 0xcc3: 0x27fa, 0xcc4: 0x2822, 0xcc5: 0x284a, + 0xcc6: 0x2872, 0xcc7: 0x289a, 0xcc8: 0x0040, 0xcc9: 0x0040, 0xcca: 0x0040, 0xccb: 0x0040, + 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, + 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, + 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0b26, 0xcdd: 0x0b46, + 0xcde: 0x0b66, 0xcdf: 0x0b86, 0xce0: 0x0ba6, 0xce1: 0x0bc6, 0xce2: 0x0be6, 0xce3: 0x0c06, + 0xce4: 0x0c26, 0xce5: 0x0c46, 0xce6: 0x0c66, 0xce7: 0x0c86, 0xce8: 0x0ca6, 0xce9: 0x0cc6, + 0xcea: 0x0ce6, 0xceb: 0x0d06, 0xcec: 0x0d26, 0xced: 0x0d46, 0xcee: 0x0d66, 0xcef: 0x0d86, + 0xcf0: 0x0da6, 0xcf1: 0x0dc6, 0xcf2: 0x0de6, 0xcf3: 0x0e06, 0xcf4: 0x0e26, 0xcf5: 0x0e46, + 0xcf6: 0x0039, 0xcf7: 0x0ee9, 0xcf8: 0x1159, 0xcf9: 0x0ef9, 0xcfa: 0x0f09, 0xcfb: 0x1199, + 0xcfc: 0x0f31, 0xcfd: 0x0249, 0xcfe: 0x0f41, 0xcff: 0x0259, + // Block 0x34, offset 0xd00 + 0xd00: 0x0f51, 0xd01: 0x0359, 0xd02: 0x0f61, 0xd03: 0x0f71, 0xd04: 0x00d9, 0xd05: 0x0f99, + 0xd06: 0x2039, 0xd07: 0x0269, 0xd08: 0x01d9, 0xd09: 0x0fa9, 0xd0a: 0x0fb9, 0xd0b: 0x1089, + 0xd0c: 0x0279, 0xd0d: 0x0369, 0xd0e: 0x0289, 0xd0f: 0x13d1, 0xd10: 0x0039, 0xd11: 0x0ee9, + 0xd12: 0x1159, 0xd13: 0x0ef9, 0xd14: 0x0f09, 0xd15: 0x1199, 0xd16: 0x0f31, 0xd17: 0x0249, + 0xd18: 0x0f41, 0xd19: 0x0259, 0xd1a: 0x0f51, 0xd1b: 0x0359, 0xd1c: 0x0f61, 0xd1d: 0x0f71, + 0xd1e: 0x00d9, 0xd1f: 0x0f99, 0xd20: 0x2039, 0xd21: 0x0269, 0xd22: 0x01d9, 0xd23: 0x0fa9, + 0xd24: 0x0fb9, 0xd25: 0x1089, 0xd26: 0x0279, 0xd27: 0x0369, 0xd28: 0x0289, 0xd29: 0x13d1, + 0xd2a: 0x1f41, 0xd2b: 0x0018, 0xd2c: 0x0018, 0xd2d: 0x0018, 0xd2e: 0x0018, 0xd2f: 0x0018, + 0xd30: 0x0018, 0xd31: 0x0018, 0xd32: 0x0018, 0xd33: 0x0018, 0xd34: 0x0018, 0xd35: 0x0018, + 0xd36: 0x0018, 0xd37: 0x0018, 0xd38: 0x0018, 0xd39: 0x0018, 0xd3a: 0x0018, 0xd3b: 0x0018, + 0xd3c: 0x0018, 0xd3d: 0x0018, 0xd3e: 0x0018, 0xd3f: 0x0018, + // Block 0x35, offset 0xd40 + 0xd40: 0x0008, 0xd41: 0x0008, 0xd42: 0x0008, 0xd43: 0x0008, 0xd44: 0x0008, 0xd45: 0x0008, + 0xd46: 0x0008, 0xd47: 0x0008, 0xd48: 0x0008, 0xd49: 0x0008, 0xd4a: 0x0008, 0xd4b: 0x0008, + 0xd4c: 0x0008, 0xd4d: 0x0008, 0xd4e: 0x0008, 0xd4f: 0x0008, 0xd50: 0x0008, 0xd51: 0x0008, + 0xd52: 0x0008, 0xd53: 0x0008, 0xd54: 0x0008, 0xd55: 0x0008, 0xd56: 0x0008, 0xd57: 0x0008, + 0xd58: 0x0008, 0xd59: 0x0008, 0xd5a: 0x0008, 0xd5b: 0x0008, 0xd5c: 0x0008, 0xd5d: 0x0008, + 0xd5e: 0x0008, 0xd5f: 0x0040, 0xd60: 0xe00d, 0xd61: 0x0008, 0xd62: 0x2971, 0xd63: 0x0ebd, + 0xd64: 0x2989, 0xd65: 0x0008, 0xd66: 0x0008, 0xd67: 0xe07d, 0xd68: 0x0008, 0xd69: 0xe01d, + 0xd6a: 0x0008, 0xd6b: 0xe03d, 0xd6c: 0x0008, 0xd6d: 0x0fe1, 0xd6e: 0x1281, 0xd6f: 0x0fc9, + 0xd70: 0x1141, 0xd71: 0x0008, 0xd72: 0xe00d, 0xd73: 0x0008, 0xd74: 0x0008, 0xd75: 0xe01d, + 0xd76: 0x0008, 0xd77: 0x0008, 0xd78: 0x0008, 0xd79: 0x0008, 0xd7a: 0x0008, 0xd7b: 0x0008, + 0xd7c: 0x0259, 0xd7d: 0x1089, 0xd7e: 0x29a1, 0xd7f: 0x29b9, + // Block 0x36, offset 0xd80 + 0xd80: 0xe00d, 0xd81: 0x0008, 0xd82: 0xe00d, 0xd83: 0x0008, 0xd84: 0xe00d, 0xd85: 0x0008, + 0xd86: 0xe00d, 0xd87: 0x0008, 0xd88: 0xe00d, 0xd89: 0x0008, 0xd8a: 0xe00d, 0xd8b: 0x0008, + 0xd8c: 0xe00d, 0xd8d: 0x0008, 0xd8e: 0xe00d, 0xd8f: 0x0008, 0xd90: 0xe00d, 0xd91: 0x0008, + 0xd92: 0xe00d, 0xd93: 0x0008, 0xd94: 0xe00d, 0xd95: 0x0008, 0xd96: 0xe00d, 0xd97: 0x0008, + 0xd98: 0xe00d, 0xd99: 0x0008, 0xd9a: 0xe00d, 0xd9b: 0x0008, 0xd9c: 0xe00d, 0xd9d: 0x0008, + 0xd9e: 0xe00d, 0xd9f: 0x0008, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0xe00d, 0xda3: 0x0008, + 0xda4: 0x0008, 0xda5: 0x0018, 0xda6: 0x0018, 0xda7: 0x0018, 0xda8: 0x0018, 0xda9: 0x0018, + 0xdaa: 0x0018, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0xe01d, 0xdae: 0x0008, 0xdaf: 0x1308, + 0xdb0: 0x1308, 0xdb1: 0x1308, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0040, 0xdb5: 0x0040, + 0xdb6: 0x0040, 0xdb7: 0x0040, 0xdb8: 0x0040, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x26fd, 0xdc1: 0x271d, 0xdc2: 0x273d, 0xdc3: 0x275d, 0xdc4: 0x277d, 0xdc5: 0x279d, + 0xdc6: 0x27bd, 0xdc7: 0x27dd, 0xdc8: 0x27fd, 0xdc9: 0x281d, 0xdca: 0x283d, 0xdcb: 0x285d, + 0xdcc: 0x287d, 0xdcd: 0x289d, 0xdce: 0x28bd, 0xdcf: 0x28dd, 0xdd0: 0x28fd, 0xdd1: 0x291d, + 0xdd2: 0x293d, 0xdd3: 0x295d, 0xdd4: 0x297d, 0xdd5: 0x299d, 0xdd6: 0x0040, 0xdd7: 0x0040, + 0xdd8: 0x0040, 0xdd9: 0x0040, 0xdda: 0x0040, 0xddb: 0x0040, 0xddc: 0x0040, 0xddd: 0x0040, + 0xdde: 0x0040, 0xddf: 0x0040, 0xde0: 0x0040, 0xde1: 0x0040, 0xde2: 0x0040, 0xde3: 0x0040, + 0xde4: 0x0040, 0xde5: 0x0040, 0xde6: 0x0040, 0xde7: 0x0040, 0xde8: 0x0040, 0xde9: 0x0040, + 0xdea: 0x0040, 0xdeb: 0x0040, 0xdec: 0x0040, 0xded: 0x0040, 0xdee: 0x0040, 0xdef: 0x0040, + 0xdf0: 0x0040, 0xdf1: 0x0040, 0xdf2: 0x0040, 0xdf3: 0x0040, 0xdf4: 0x0040, 0xdf5: 0x0040, + 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0040, 0xdfa: 0x0040, 0xdfb: 0x0040, + 0xdfc: 0x0040, 0xdfd: 0x0040, 0xdfe: 0x0040, 0xdff: 0x0040, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, 0xe01: 0x0018, 0xe02: 0x29d1, 0xe03: 0x0018, 0xe04: 0x0018, 0xe05: 0x0008, + 0xe06: 0x0008, 0xe07: 0x0008, 0xe08: 0x0018, 0xe09: 0x0018, 0xe0a: 0x0018, 0xe0b: 0x0018, + 0xe0c: 0x0018, 0xe0d: 0x0018, 0xe0e: 0x0018, 0xe0f: 0x0018, 0xe10: 0x0018, 0xe11: 0x0018, + 0xe12: 0x0018, 0xe13: 0x0018, 0xe14: 0x0018, 0xe15: 0x0018, 0xe16: 0x0018, 0xe17: 0x0018, + 0xe18: 0x0018, 0xe19: 0x0018, 0xe1a: 0x0018, 0xe1b: 0x0018, 0xe1c: 0x0018, 0xe1d: 0x0018, + 0xe1e: 0x0018, 0xe1f: 0x0018, 0xe20: 0x0018, 0xe21: 0x0018, 0xe22: 0x0018, 0xe23: 0x0018, + 0xe24: 0x0018, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x1308, 0xe2b: 0x1308, 0xe2c: 0x1308, 0xe2d: 0x1308, 0xe2e: 0x1018, 0xe2f: 0x1018, + 0xe30: 0x0018, 0xe31: 0x0018, 0xe32: 0x0018, 0xe33: 0x0018, 0xe34: 0x0018, 0xe35: 0x0018, + 0xe36: 0xe125, 0xe37: 0x0018, 0xe38: 0x29bd, 0xe39: 0x29dd, 0xe3a: 0x29fd, 0xe3b: 0x0018, + 0xe3c: 0x0008, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x2b3d, 0xe41: 0x2b5d, 0xe42: 0x2b7d, 0xe43: 0x2b9d, 0xe44: 0x2bbd, 0xe45: 0x2bdd, + 0xe46: 0x2bdd, 0xe47: 0x2bdd, 0xe48: 0x2bfd, 0xe49: 0x2bfd, 0xe4a: 0x2bfd, 0xe4b: 0x2bfd, + 0xe4c: 0x2c1d, 0xe4d: 0x2c1d, 0xe4e: 0x2c1d, 0xe4f: 0x2c3d, 0xe50: 0x2c5d, 0xe51: 0x2c5d, + 0xe52: 0x2a7d, 0xe53: 0x2a7d, 0xe54: 0x2c5d, 0xe55: 0x2c5d, 0xe56: 0x2c7d, 0xe57: 0x2c7d, + 0xe58: 0x2c5d, 0xe59: 0x2c5d, 0xe5a: 0x2a7d, 0xe5b: 0x2a7d, 0xe5c: 0x2c5d, 0xe5d: 0x2c5d, + 0xe5e: 0x2c3d, 0xe5f: 0x2c3d, 0xe60: 0x2c9d, 0xe61: 0x2c9d, 0xe62: 0x2cbd, 0xe63: 0x2cbd, + 0xe64: 0x0040, 0xe65: 0x2cdd, 0xe66: 0x2cfd, 0xe67: 0x2d1d, 0xe68: 0x2d1d, 0xe69: 0x2d3d, + 0xe6a: 0x2d5d, 0xe6b: 0x2d7d, 0xe6c: 0x2d9d, 0xe6d: 0x2dbd, 0xe6e: 0x2ddd, 0xe6f: 0x2dfd, + 0xe70: 0x2e1d, 0xe71: 0x2e3d, 0xe72: 0x2e3d, 0xe73: 0x2e5d, 0xe74: 0x2e7d, 0xe75: 0x2e7d, + 0xe76: 0x2e9d, 0xe77: 0x2ebd, 0xe78: 0x2e5d, 0xe79: 0x2edd, 0xe7a: 0x2efd, 0xe7b: 0x2edd, + 0xe7c: 0x2e5d, 0xe7d: 0x2f1d, 0xe7e: 0x2f3d, 0xe7f: 0x2f5d, + // Block 0x3a, offset 0xe80 + 0xe80: 0x2f7d, 0xe81: 0x2f9d, 0xe82: 0x2cfd, 0xe83: 0x2cdd, 0xe84: 0x2fbd, 0xe85: 0x2fdd, + 0xe86: 0x2ffd, 0xe87: 0x301d, 0xe88: 0x303d, 0xe89: 0x305d, 0xe8a: 0x307d, 0xe8b: 0x309d, + 0xe8c: 0x30bd, 0xe8d: 0x30dd, 0xe8e: 0x30fd, 0xe8f: 0x0040, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x311d, 0xe93: 0x313d, 0xe94: 0x315d, 0xe95: 0x317d, 0xe96: 0x319d, 0xe97: 0x31bd, + 0xe98: 0x31dd, 0xe99: 0x31fd, 0xe9a: 0x321d, 0xe9b: 0x323d, 0xe9c: 0x315d, 0xe9d: 0x325d, + 0xe9e: 0x327d, 0xe9f: 0x329d, 0xea0: 0x0008, 0xea1: 0x0008, 0xea2: 0x0008, 0xea3: 0x0008, + 0xea4: 0x0008, 0xea5: 0x0008, 0xea6: 0x0008, 0xea7: 0x0008, 0xea8: 0x0008, 0xea9: 0x0008, + 0xeaa: 0x0008, 0xeab: 0x0008, 0xeac: 0x0008, 0xead: 0x0008, 0xeae: 0x0008, 0xeaf: 0x0008, + 0xeb0: 0x0008, 0xeb1: 0x0008, 0xeb2: 0x0008, 0xeb3: 0x0008, 0xeb4: 0x0008, 0xeb5: 0x0008, + 0xeb6: 0x0008, 0xeb7: 0x0008, 0xeb8: 0x0008, 0xeb9: 0x0008, 0xeba: 0x0008, 0xebb: 0x0040, + 0xebc: 0x0040, 0xebd: 0x0040, 0xebe: 0x0040, 0xebf: 0x0040, + // Block 0x3b, offset 0xec0 + 0xec0: 0x36a2, 0xec1: 0x36d2, 0xec2: 0x3702, 0xec3: 0x3732, 0xec4: 0x32bd, 0xec5: 0x32dd, + 0xec6: 0x32fd, 0xec7: 0x331d, 0xec8: 0x0018, 0xec9: 0x0018, 0xeca: 0x0018, 0xecb: 0x0018, + 0xecc: 0x0018, 0xecd: 0x0018, 0xece: 0x0018, 0xecf: 0x0018, 0xed0: 0x333d, 0xed1: 0x3761, + 0xed2: 0x3779, 0xed3: 0x3791, 0xed4: 0x37a9, 0xed5: 0x37c1, 0xed6: 0x37d9, 0xed7: 0x37f1, + 0xed8: 0x3809, 0xed9: 0x3821, 0xeda: 0x3839, 0xedb: 0x3851, 0xedc: 0x3869, 0xedd: 0x3881, + 0xede: 0x3899, 0xedf: 0x38b1, 0xee0: 0x335d, 0xee1: 0x337d, 0xee2: 0x339d, 0xee3: 0x33bd, + 0xee4: 0x33dd, 0xee5: 0x33dd, 0xee6: 0x33fd, 0xee7: 0x341d, 0xee8: 0x343d, 0xee9: 0x345d, + 0xeea: 0x347d, 0xeeb: 0x349d, 0xeec: 0x34bd, 0xeed: 0x34dd, 0xeee: 0x34fd, 0xeef: 0x351d, + 0xef0: 0x353d, 0xef1: 0x355d, 0xef2: 0x357d, 0xef3: 0x359d, 0xef4: 0x35bd, 0xef5: 0x35dd, + 0xef6: 0x35fd, 0xef7: 0x361d, 0xef8: 0x363d, 0xef9: 0x365d, 0xefa: 0x367d, 0xefb: 0x369d, + 0xefc: 0x38c9, 0xefd: 0x3901, 0xefe: 0x36bd, 0xeff: 0x0018, + // Block 0x3c, offset 0xf00 + 0xf00: 0x36dd, 0xf01: 0x36fd, 0xf02: 0x371d, 0xf03: 0x373d, 0xf04: 0x375d, 0xf05: 0x377d, + 0xf06: 0x379d, 0xf07: 0x37bd, 0xf08: 0x37dd, 0xf09: 0x37fd, 0xf0a: 0x381d, 0xf0b: 0x383d, + 0xf0c: 0x385d, 0xf0d: 0x387d, 0xf0e: 0x389d, 0xf0f: 0x38bd, 0xf10: 0x38dd, 0xf11: 0x38fd, + 0xf12: 0x391d, 0xf13: 0x393d, 0xf14: 0x395d, 0xf15: 0x397d, 0xf16: 0x399d, 0xf17: 0x39bd, + 0xf18: 0x39dd, 0xf19: 0x39fd, 0xf1a: 0x3a1d, 0xf1b: 0x3a3d, 0xf1c: 0x3a5d, 0xf1d: 0x3a7d, + 0xf1e: 0x3a9d, 0xf1f: 0x3abd, 0xf20: 0x3add, 0xf21: 0x3afd, 0xf22: 0x3b1d, 0xf23: 0x3b3d, + 0xf24: 0x3b5d, 0xf25: 0x3b7d, 0xf26: 0x127d, 0xf27: 0x3b9d, 0xf28: 0x3bbd, 0xf29: 0x3bdd, + 0xf2a: 0x3bfd, 0xf2b: 0x3c1d, 0xf2c: 0x3c3d, 0xf2d: 0x3c5d, 0xf2e: 0x239d, 0xf2f: 0x3c7d, + 0xf30: 0x3c9d, 0xf31: 0x3939, 0xf32: 0x3951, 0xf33: 0x3969, 0xf34: 0x3981, 0xf35: 0x3999, + 0xf36: 0x39b1, 0xf37: 0x39c9, 0xf38: 0x39e1, 0xf39: 0x39f9, 0xf3a: 0x3a11, 0xf3b: 0x3a29, + 0xf3c: 0x3a41, 0xf3d: 0x3a59, 0xf3e: 0x3a71, 0xf3f: 0x3a89, + // Block 0x3d, offset 0xf40 + 0xf40: 0x3aa1, 0xf41: 0x3ac9, 0xf42: 0x3af1, 0xf43: 0x3b19, 0xf44: 0x3b41, 0xf45: 0x3b69, + 0xf46: 0x3b91, 0xf47: 0x3bb9, 0xf48: 0x3be1, 0xf49: 0x3c09, 0xf4a: 0x3c39, 0xf4b: 0x3c69, + 0xf4c: 0x3c99, 0xf4d: 0x3cbd, 0xf4e: 0x3cb1, 0xf4f: 0x3cdd, 0xf50: 0x3cfd, 0xf51: 0x3d15, + 0xf52: 0x3d2d, 0xf53: 0x3d45, 0xf54: 0x3d5d, 0xf55: 0x3d5d, 0xf56: 0x3d45, 0xf57: 0x3d75, + 0xf58: 0x07bd, 0xf59: 0x3d8d, 0xf5a: 0x3da5, 0xf5b: 0x3dbd, 0xf5c: 0x3dd5, 0xf5d: 0x3ded, + 0xf5e: 0x3e05, 0xf5f: 0x3e1d, 0xf60: 0x3e35, 0xf61: 0x3e4d, 0xf62: 0x3e65, 0xf63: 0x3e7d, + 0xf64: 0x3e95, 0xf65: 0x3e95, 0xf66: 0x3ead, 0xf67: 0x3ead, 0xf68: 0x3ec5, 0xf69: 0x3ec5, + 0xf6a: 0x3edd, 0xf6b: 0x3ef5, 0xf6c: 0x3f0d, 0xf6d: 0x3f25, 0xf6e: 0x3f3d, 0xf6f: 0x3f3d, + 0xf70: 0x3f55, 0xf71: 0x3f55, 0xf72: 0x3f55, 0xf73: 0x3f6d, 0xf74: 0x3f85, 0xf75: 0x3f9d, + 0xf76: 0x3fb5, 0xf77: 0x3f9d, 0xf78: 0x3fcd, 0xf79: 0x3fe5, 0xf7a: 0x3f6d, 0xf7b: 0x3ffd, + 0xf7c: 0x4015, 0xf7d: 0x4015, 0xf7e: 0x4015, 0xf7f: 0x0040, + // Block 0x3e, offset 0xf80 + 0xf80: 0x3cc9, 0xf81: 0x3d31, 0xf82: 0x3d99, 0xf83: 0x3e01, 0xf84: 0x3e51, 0xf85: 0x3eb9, + 0xf86: 0x3f09, 0xf87: 0x3f59, 0xf88: 0x3fd9, 0xf89: 0x4041, 0xf8a: 0x4091, 0xf8b: 0x40e1, + 0xf8c: 0x4131, 0xf8d: 0x4199, 0xf8e: 0x4201, 0xf8f: 0x4251, 0xf90: 0x42a1, 0xf91: 0x42d9, + 0xf92: 0x4329, 0xf93: 0x4391, 0xf94: 0x43f9, 0xf95: 0x4431, 0xf96: 0x44b1, 0xf97: 0x4549, + 0xf98: 0x45c9, 0xf99: 0x4619, 0xf9a: 0x4699, 0xf9b: 0x4719, 0xf9c: 0x4781, 0xf9d: 0x47d1, + 0xf9e: 0x4821, 0xf9f: 0x4871, 0xfa0: 0x48d9, 0xfa1: 0x4959, 0xfa2: 0x49c1, 0xfa3: 0x4a11, + 0xfa4: 0x4a61, 0xfa5: 0x4ab1, 0xfa6: 0x4ae9, 0xfa7: 0x4b21, 0xfa8: 0x4b59, 0xfa9: 0x4b91, + 0xfaa: 0x4be1, 0xfab: 0x4c31, 0xfac: 0x4cb1, 0xfad: 0x4d01, 0xfae: 0x4d69, 0xfaf: 0x4de9, + 0xfb0: 0x4e39, 0xfb1: 0x4e71, 0xfb2: 0x4ea9, 0xfb3: 0x4f29, 0xfb4: 0x4f91, 0xfb5: 0x5011, + 0xfb6: 0x5061, 0xfb7: 0x50e1, 0xfb8: 0x5119, 0xfb9: 0x5169, 0xfba: 0x51b9, 0xfbb: 0x5209, + 0xfbc: 0x5259, 0xfbd: 0x52a9, 0xfbe: 0x5311, 0xfbf: 0x5361, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x5399, 0xfc1: 0x53e9, 0xfc2: 0x5439, 0xfc3: 0x5489, 0xfc4: 0x54f1, 0xfc5: 0x5541, + 0xfc6: 0x5591, 0xfc7: 0x55e1, 0xfc8: 0x5661, 0xfc9: 0x56c9, 0xfca: 0x5701, 0xfcb: 0x5781, + 0xfcc: 0x57b9, 0xfcd: 0x5821, 0xfce: 0x5889, 0xfcf: 0x58d9, 0xfd0: 0x5929, 0xfd1: 0x5979, + 0xfd2: 0x59e1, 0xfd3: 0x5a19, 0xfd4: 0x5a69, 0xfd5: 0x5ad1, 0xfd6: 0x5b09, 0xfd7: 0x5b89, + 0xfd8: 0x5bd9, 0xfd9: 0x5c01, 0xfda: 0x5c29, 0xfdb: 0x5c51, 0xfdc: 0x5c79, 0xfdd: 0x5ca1, + 0xfde: 0x5cc9, 0xfdf: 0x5cf1, 0xfe0: 0x5d19, 0xfe1: 0x5d41, 0xfe2: 0x5d69, 0xfe3: 0x5d99, + 0xfe4: 0x5dc9, 0xfe5: 0x5df9, 0xfe6: 0x5e29, 0xfe7: 0x5e59, 0xfe8: 0x5e89, 0xfe9: 0x5eb9, + 0xfea: 0x5ee9, 0xfeb: 0x5f19, 0xfec: 0x5f49, 0xfed: 0x5f79, 0xfee: 0x5fa9, 0xfef: 0x5fd9, + 0xff0: 0x6009, 0xff1: 0x402d, 0xff2: 0x6039, 0xff3: 0x6051, 0xff4: 0x404d, 0xff5: 0x6069, + 0xff6: 0x6081, 0xff7: 0x6099, 0xff8: 0x406d, 0xff9: 0x406d, 0xffa: 0x60b1, 0xffb: 0x60c9, + 0xffc: 0x6101, 0xffd: 0x6139, 0xffe: 0x6171, 0xfff: 0x61a9, + // Block 0x40, offset 0x1000 + 0x1000: 0x6211, 0x1001: 0x6229, 0x1002: 0x408d, 0x1003: 0x6241, 0x1004: 0x6259, 0x1005: 0x6271, + 0x1006: 0x6289, 0x1007: 0x62a1, 0x1008: 0x40ad, 0x1009: 0x62b9, 0x100a: 0x62e1, 0x100b: 0x62f9, + 0x100c: 0x40cd, 0x100d: 0x40cd, 0x100e: 0x6311, 0x100f: 0x6329, 0x1010: 0x6341, 0x1011: 0x40ed, + 0x1012: 0x410d, 0x1013: 0x412d, 0x1014: 0x414d, 0x1015: 0x416d, 0x1016: 0x6359, 0x1017: 0x6371, + 0x1018: 0x6389, 0x1019: 0x63a1, 0x101a: 0x63b9, 0x101b: 0x418d, 0x101c: 0x63d1, 0x101d: 0x63e9, + 0x101e: 0x6401, 0x101f: 0x41ad, 0x1020: 0x41cd, 0x1021: 0x6419, 0x1022: 0x41ed, 0x1023: 0x420d, + 0x1024: 0x422d, 0x1025: 0x6431, 0x1026: 0x424d, 0x1027: 0x6449, 0x1028: 0x6479, 0x1029: 0x6211, + 0x102a: 0x426d, 0x102b: 0x428d, 0x102c: 0x42ad, 0x102d: 0x42cd, 0x102e: 0x64b1, 0x102f: 0x64f1, + 0x1030: 0x6539, 0x1031: 0x6551, 0x1032: 0x42ed, 0x1033: 0x6569, 0x1034: 0x6581, 0x1035: 0x6599, + 0x1036: 0x430d, 0x1037: 0x65b1, 0x1038: 0x65c9, 0x1039: 0x65b1, 0x103a: 0x65e1, 0x103b: 0x65f9, + 0x103c: 0x432d, 0x103d: 0x6611, 0x103e: 0x6629, 0x103f: 0x6611, + // Block 0x41, offset 0x1040 + 0x1040: 0x434d, 0x1041: 0x436d, 0x1042: 0x0040, 0x1043: 0x6641, 0x1044: 0x6659, 0x1045: 0x6671, + 0x1046: 0x6689, 0x1047: 0x0040, 0x1048: 0x66c1, 0x1049: 0x66d9, 0x104a: 0x66f1, 0x104b: 0x6709, + 0x104c: 0x6721, 0x104d: 0x6739, 0x104e: 0x6401, 0x104f: 0x6751, 0x1050: 0x6769, 0x1051: 0x6781, + 0x1052: 0x438d, 0x1053: 0x6799, 0x1054: 0x6289, 0x1055: 0x43ad, 0x1056: 0x43cd, 0x1057: 0x67b1, + 0x1058: 0x0040, 0x1059: 0x43ed, 0x105a: 0x67c9, 0x105b: 0x67e1, 0x105c: 0x67f9, 0x105d: 0x6811, + 0x105e: 0x6829, 0x105f: 0x6859, 0x1060: 0x6889, 0x1061: 0x68b1, 0x1062: 0x68d9, 0x1063: 0x6901, + 0x1064: 0x6929, 0x1065: 0x6951, 0x1066: 0x6979, 0x1067: 0x69a1, 0x1068: 0x69c9, 0x1069: 0x69f1, + 0x106a: 0x6a21, 0x106b: 0x6a51, 0x106c: 0x6a81, 0x106d: 0x6ab1, 0x106e: 0x6ae1, 0x106f: 0x6b11, + 0x1070: 0x6b41, 0x1071: 0x6b71, 0x1072: 0x6ba1, 0x1073: 0x6bd1, 0x1074: 0x6c01, 0x1075: 0x6c31, + 0x1076: 0x6c61, 0x1077: 0x6c91, 0x1078: 0x6cc1, 0x1079: 0x6cf1, 0x107a: 0x6d21, 0x107b: 0x6d51, + 0x107c: 0x6d81, 0x107d: 0x6db1, 0x107e: 0x6de1, 0x107f: 0x440d, + // Block 0x42, offset 0x1080 + 0x1080: 0xe00d, 0x1081: 0x0008, 0x1082: 0xe00d, 0x1083: 0x0008, 0x1084: 0xe00d, 0x1085: 0x0008, + 0x1086: 0xe00d, 0x1087: 0x0008, 0x1088: 0xe00d, 0x1089: 0x0008, 0x108a: 0xe00d, 0x108b: 0x0008, + 0x108c: 0xe00d, 0x108d: 0x0008, 0x108e: 0xe00d, 0x108f: 0x0008, 0x1090: 0xe00d, 0x1091: 0x0008, + 0x1092: 0xe00d, 0x1093: 0x0008, 0x1094: 0xe00d, 0x1095: 0x0008, 0x1096: 0xe00d, 0x1097: 0x0008, + 0x1098: 0xe00d, 0x1099: 0x0008, 0x109a: 0xe00d, 0x109b: 0x0008, 0x109c: 0xe00d, 0x109d: 0x0008, + 0x109e: 0xe00d, 0x109f: 0x0008, 0x10a0: 0xe00d, 0x10a1: 0x0008, 0x10a2: 0xe00d, 0x10a3: 0x0008, + 0x10a4: 0xe00d, 0x10a5: 0x0008, 0x10a6: 0xe00d, 0x10a7: 0x0008, 0x10a8: 0xe00d, 0x10a9: 0x0008, + 0x10aa: 0xe00d, 0x10ab: 0x0008, 0x10ac: 0xe00d, 0x10ad: 0x0008, 0x10ae: 0x0008, 0x10af: 0x1308, + 0x10b0: 0x1318, 0x10b1: 0x1318, 0x10b2: 0x1318, 0x10b3: 0x0018, 0x10b4: 0x1308, 0x10b5: 0x1308, + 0x10b6: 0x1308, 0x10b7: 0x1308, 0x10b8: 0x1308, 0x10b9: 0x1308, 0x10ba: 0x1308, 0x10bb: 0x1308, + 0x10bc: 0x1308, 0x10bd: 0x1308, 0x10be: 0x0018, 0x10bf: 0x0008, + // Block 0x43, offset 0x10c0 + 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, + 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, + 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, + 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, + 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0x0ea1, 0x10dd: 0x6e11, + 0x10de: 0x1308, 0x10df: 0x1308, 0x10e0: 0x0008, 0x10e1: 0x0008, 0x10e2: 0x0008, 0x10e3: 0x0008, + 0x10e4: 0x0008, 0x10e5: 0x0008, 0x10e6: 0x0008, 0x10e7: 0x0008, 0x10e8: 0x0008, 0x10e9: 0x0008, + 0x10ea: 0x0008, 0x10eb: 0x0008, 0x10ec: 0x0008, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x0008, + 0x10f0: 0x0008, 0x10f1: 0x0008, 0x10f2: 0x0008, 0x10f3: 0x0008, 0x10f4: 0x0008, 0x10f5: 0x0008, + 0x10f6: 0x0008, 0x10f7: 0x0008, 0x10f8: 0x0008, 0x10f9: 0x0008, 0x10fa: 0x0008, 0x10fb: 0x0008, + 0x10fc: 0x0008, 0x10fd: 0x0008, 0x10fe: 0x0008, 0x10ff: 0x0008, + // Block 0x44, offset 0x1100 + 0x1100: 0x0018, 0x1101: 0x0018, 0x1102: 0x0018, 0x1103: 0x0018, 0x1104: 0x0018, 0x1105: 0x0018, + 0x1106: 0x0018, 0x1107: 0x0018, 0x1108: 0x0018, 0x1109: 0x0018, 0x110a: 0x0018, 0x110b: 0x0018, + 0x110c: 0x0018, 0x110d: 0x0018, 0x110e: 0x0018, 0x110f: 0x0018, 0x1110: 0x0018, 0x1111: 0x0018, + 0x1112: 0x0018, 0x1113: 0x0018, 0x1114: 0x0018, 0x1115: 0x0018, 0x1116: 0x0018, 0x1117: 0x0008, + 0x1118: 0x0008, 0x1119: 0x0008, 0x111a: 0x0008, 0x111b: 0x0008, 0x111c: 0x0008, 0x111d: 0x0008, + 0x111e: 0x0008, 0x111f: 0x0008, 0x1120: 0x0018, 0x1121: 0x0018, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0xe00d, 0x112f: 0x0008, + 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0xe00d, 0x1133: 0x0008, 0x1134: 0xe00d, 0x1135: 0x0008, + 0x1136: 0xe00d, 0x1137: 0x0008, 0x1138: 0xe00d, 0x1139: 0x0008, 0x113a: 0xe00d, 0x113b: 0x0008, + 0x113c: 0xe00d, 0x113d: 0x0008, 0x113e: 0xe00d, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0xe00d, 0x115d: 0x0008, + 0x115e: 0xe00d, 0x115f: 0x0008, 0x1160: 0xe00d, 0x1161: 0x0008, 0x1162: 0xe00d, 0x1163: 0x0008, + 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, + 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, + 0x1170: 0xe0fd, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0xe01d, 0x117a: 0x0008, 0x117b: 0xe03d, + 0x117c: 0x0008, 0x117d: 0x442d, 0x117e: 0xe00d, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, + 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0x0008, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0xe03d, + 0x118c: 0x0008, 0x118d: 0x11d9, 0x118e: 0x0008, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, + 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0x0008, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, + 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, + 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0x6e29, 0x11ab: 0x1029, 0x11ac: 0x11c1, 0x11ad: 0x6e41, 0x11ae: 0x1221, 0x11af: 0x0040, + 0x11b0: 0x6e59, 0x11b1: 0x6e71, 0x11b2: 0x1239, 0x11b3: 0x444d, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0x0040, 0x11b9: 0x0040, 0x11ba: 0x0040, 0x11bb: 0x0040, + 0x11bc: 0x0040, 0x11bd: 0x0040, 0x11be: 0x0040, 0x11bf: 0x0040, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x64d5, 0x11c1: 0x64f5, 0x11c2: 0x6515, 0x11c3: 0x6535, 0x11c4: 0x6555, 0x11c5: 0x6575, + 0x11c6: 0x6595, 0x11c7: 0x65b5, 0x11c8: 0x65d5, 0x11c9: 0x65f5, 0x11ca: 0x6615, 0x11cb: 0x6635, + 0x11cc: 0x6655, 0x11cd: 0x6675, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0x6695, 0x11d1: 0x0008, + 0x11d2: 0x66b5, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x66d5, 0x11d6: 0x66f5, 0x11d7: 0x6715, + 0x11d8: 0x6735, 0x11d9: 0x6755, 0x11da: 0x6775, 0x11db: 0x6795, 0x11dc: 0x67b5, 0x11dd: 0x67d5, + 0x11de: 0x67f5, 0x11df: 0x0008, 0x11e0: 0x6815, 0x11e1: 0x0008, 0x11e2: 0x6835, 0x11e3: 0x0008, + 0x11e4: 0x0008, 0x11e5: 0x6855, 0x11e6: 0x6875, 0x11e7: 0x0008, 0x11e8: 0x0008, 0x11e9: 0x0008, + 0x11ea: 0x6895, 0x11eb: 0x68b5, 0x11ec: 0x68d5, 0x11ed: 0x68f5, 0x11ee: 0x6915, 0x11ef: 0x6935, + 0x11f0: 0x6955, 0x11f1: 0x6975, 0x11f2: 0x6995, 0x11f3: 0x69b5, 0x11f4: 0x69d5, 0x11f5: 0x69f5, + 0x11f6: 0x6a15, 0x11f7: 0x6a35, 0x11f8: 0x6a55, 0x11f9: 0x6a75, 0x11fa: 0x6a95, 0x11fb: 0x6ab5, + 0x11fc: 0x6ad5, 0x11fd: 0x6af5, 0x11fe: 0x6b15, 0x11ff: 0x6b35, + // Block 0x48, offset 0x1200 + 0x1200: 0x7a95, 0x1201: 0x7ab5, 0x1202: 0x7ad5, 0x1203: 0x7af5, 0x1204: 0x7b15, 0x1205: 0x7b35, + 0x1206: 0x7b55, 0x1207: 0x7b75, 0x1208: 0x7b95, 0x1209: 0x7bb5, 0x120a: 0x7bd5, 0x120b: 0x7bf5, + 0x120c: 0x7c15, 0x120d: 0x7c35, 0x120e: 0x7c55, 0x120f: 0x6ec9, 0x1210: 0x6ef1, 0x1211: 0x6f19, + 0x1212: 0x7c75, 0x1213: 0x7c95, 0x1214: 0x7cb5, 0x1215: 0x6f41, 0x1216: 0x6f69, 0x1217: 0x6f91, + 0x1218: 0x7cd5, 0x1219: 0x7cf5, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, + 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, + 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, + 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, + 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x0040, 0x1233: 0x0040, 0x1234: 0x0040, 0x1235: 0x0040, + 0x1236: 0x0040, 0x1237: 0x0040, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x6fb9, 0x1241: 0x6fd1, 0x1242: 0x6fe9, 0x1243: 0x7d15, 0x1244: 0x7d35, 0x1245: 0x7001, + 0x1246: 0x7001, 0x1247: 0x0040, 0x1248: 0x0040, 0x1249: 0x0040, 0x124a: 0x0040, 0x124b: 0x0040, + 0x124c: 0x0040, 0x124d: 0x0040, 0x124e: 0x0040, 0x124f: 0x0040, 0x1250: 0x0040, 0x1251: 0x0040, + 0x1252: 0x0040, 0x1253: 0x7019, 0x1254: 0x7041, 0x1255: 0x7069, 0x1256: 0x7091, 0x1257: 0x70b9, + 0x1258: 0x0040, 0x1259: 0x0040, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x70e1, + 0x125e: 0x1308, 0x125f: 0x7109, 0x1260: 0x7131, 0x1261: 0x20a9, 0x1262: 0x20f1, 0x1263: 0x7149, + 0x1264: 0x7161, 0x1265: 0x7179, 0x1266: 0x7191, 0x1267: 0x71a9, 0x1268: 0x71c1, 0x1269: 0x1fb2, + 0x126a: 0x71d9, 0x126b: 0x7201, 0x126c: 0x7229, 0x126d: 0x7261, 0x126e: 0x7299, 0x126f: 0x72c1, + 0x1270: 0x72e9, 0x1271: 0x7311, 0x1272: 0x7339, 0x1273: 0x7361, 0x1274: 0x7389, 0x1275: 0x73b1, + 0x1276: 0x73d9, 0x1277: 0x0040, 0x1278: 0x7401, 0x1279: 0x7429, 0x127a: 0x7451, 0x127b: 0x7479, + 0x127c: 0x74a1, 0x127d: 0x0040, 0x127e: 0x74c9, 0x127f: 0x0040, + // Block 0x4a, offset 0x1280 + 0x1280: 0x74f1, 0x1281: 0x7519, 0x1282: 0x0040, 0x1283: 0x7541, 0x1284: 0x7569, 0x1285: 0x0040, + 0x1286: 0x7591, 0x1287: 0x75b9, 0x1288: 0x75e1, 0x1289: 0x7609, 0x128a: 0x7631, 0x128b: 0x7659, + 0x128c: 0x7681, 0x128d: 0x76a9, 0x128e: 0x76d1, 0x128f: 0x76f9, 0x1290: 0x7721, 0x1291: 0x7721, + 0x1292: 0x7739, 0x1293: 0x7739, 0x1294: 0x7739, 0x1295: 0x7739, 0x1296: 0x7751, 0x1297: 0x7751, + 0x1298: 0x7751, 0x1299: 0x7751, 0x129a: 0x7769, 0x129b: 0x7769, 0x129c: 0x7769, 0x129d: 0x7769, + 0x129e: 0x7781, 0x129f: 0x7781, 0x12a0: 0x7781, 0x12a1: 0x7781, 0x12a2: 0x7799, 0x12a3: 0x7799, + 0x12a4: 0x7799, 0x12a5: 0x7799, 0x12a6: 0x77b1, 0x12a7: 0x77b1, 0x12a8: 0x77b1, 0x12a9: 0x77b1, + 0x12aa: 0x77c9, 0x12ab: 0x77c9, 0x12ac: 0x77c9, 0x12ad: 0x77c9, 0x12ae: 0x77e1, 0x12af: 0x77e1, + 0x12b0: 0x77e1, 0x12b1: 0x77e1, 0x12b2: 0x77f9, 0x12b3: 0x77f9, 0x12b4: 0x77f9, 0x12b5: 0x77f9, + 0x12b6: 0x7811, 0x12b7: 0x7811, 0x12b8: 0x7811, 0x12b9: 0x7811, 0x12ba: 0x7829, 0x12bb: 0x7829, + 0x12bc: 0x7829, 0x12bd: 0x7829, 0x12be: 0x7841, 0x12bf: 0x7841, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x7841, 0x12c1: 0x7841, 0x12c2: 0x7859, 0x12c3: 0x7859, 0x12c4: 0x7871, 0x12c5: 0x7871, + 0x12c6: 0x7889, 0x12c7: 0x7889, 0x12c8: 0x78a1, 0x12c9: 0x78a1, 0x12ca: 0x78b9, 0x12cb: 0x78b9, + 0x12cc: 0x78d1, 0x12cd: 0x78d1, 0x12ce: 0x78e9, 0x12cf: 0x78e9, 0x12d0: 0x78e9, 0x12d1: 0x78e9, + 0x12d2: 0x7901, 0x12d3: 0x7901, 0x12d4: 0x7901, 0x12d5: 0x7901, 0x12d6: 0x7919, 0x12d7: 0x7919, + 0x12d8: 0x7919, 0x12d9: 0x7919, 0x12da: 0x7931, 0x12db: 0x7931, 0x12dc: 0x7931, 0x12dd: 0x7931, + 0x12de: 0x7949, 0x12df: 0x7949, 0x12e0: 0x7961, 0x12e1: 0x7961, 0x12e2: 0x7961, 0x12e3: 0x7961, + 0x12e4: 0x7979, 0x12e5: 0x7979, 0x12e6: 0x7991, 0x12e7: 0x7991, 0x12e8: 0x7991, 0x12e9: 0x7991, + 0x12ea: 0x79a9, 0x12eb: 0x79a9, 0x12ec: 0x79a9, 0x12ed: 0x79a9, 0x12ee: 0x79c1, 0x12ef: 0x79c1, + 0x12f0: 0x79d9, 0x12f1: 0x79d9, 0x12f2: 0x0018, 0x12f3: 0x0018, 0x12f4: 0x0018, 0x12f5: 0x0018, + 0x12f6: 0x0018, 0x12f7: 0x0018, 0x12f8: 0x0018, 0x12f9: 0x0018, 0x12fa: 0x0018, 0x12fb: 0x0018, + 0x12fc: 0x0018, 0x12fd: 0x0018, 0x12fe: 0x0018, 0x12ff: 0x0018, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0018, 0x1301: 0x0018, 0x1302: 0x0040, 0x1303: 0x0040, 0x1304: 0x0040, 0x1305: 0x0040, + 0x1306: 0x0040, 0x1307: 0x0040, 0x1308: 0x0040, 0x1309: 0x0040, 0x130a: 0x0040, 0x130b: 0x0040, + 0x130c: 0x0040, 0x130d: 0x0040, 0x130e: 0x0040, 0x130f: 0x0040, 0x1310: 0x0040, 0x1311: 0x0040, + 0x1312: 0x0040, 0x1313: 0x79f1, 0x1314: 0x79f1, 0x1315: 0x79f1, 0x1316: 0x79f1, 0x1317: 0x7a09, + 0x1318: 0x7a09, 0x1319: 0x7a21, 0x131a: 0x7a21, 0x131b: 0x7a39, 0x131c: 0x7a39, 0x131d: 0x0479, + 0x131e: 0x7a51, 0x131f: 0x7a51, 0x1320: 0x7a69, 0x1321: 0x7a69, 0x1322: 0x7a81, 0x1323: 0x7a81, + 0x1324: 0x7a99, 0x1325: 0x7a99, 0x1326: 0x7a99, 0x1327: 0x7a99, 0x1328: 0x7ab1, 0x1329: 0x7ab1, + 0x132a: 0x7ac9, 0x132b: 0x7ac9, 0x132c: 0x7af1, 0x132d: 0x7af1, 0x132e: 0x7b19, 0x132f: 0x7b19, + 0x1330: 0x7b41, 0x1331: 0x7b41, 0x1332: 0x7b69, 0x1333: 0x7b69, 0x1334: 0x7b91, 0x1335: 0x7b91, + 0x1336: 0x7bb9, 0x1337: 0x7bb9, 0x1338: 0x7bb9, 0x1339: 0x7be1, 0x133a: 0x7be1, 0x133b: 0x7be1, + 0x133c: 0x7c09, 0x133d: 0x7c09, 0x133e: 0x7c09, 0x133f: 0x7c09, + // Block 0x4d, offset 0x1340 + 0x1340: 0x85f9, 0x1341: 0x8621, 0x1342: 0x8649, 0x1343: 0x8671, 0x1344: 0x8699, 0x1345: 0x86c1, + 0x1346: 0x86e9, 0x1347: 0x8711, 0x1348: 0x8739, 0x1349: 0x8761, 0x134a: 0x8789, 0x134b: 0x87b1, + 0x134c: 0x87d9, 0x134d: 0x8801, 0x134e: 0x8829, 0x134f: 0x8851, 0x1350: 0x8879, 0x1351: 0x88a1, + 0x1352: 0x88c9, 0x1353: 0x88f1, 0x1354: 0x8919, 0x1355: 0x8941, 0x1356: 0x8969, 0x1357: 0x8991, + 0x1358: 0x89b9, 0x1359: 0x89e1, 0x135a: 0x8a09, 0x135b: 0x8a31, 0x135c: 0x8a59, 0x135d: 0x8a81, + 0x135e: 0x8aaa, 0x135f: 0x8ada, 0x1360: 0x8b0a, 0x1361: 0x8b3a, 0x1362: 0x8b6a, 0x1363: 0x8b9a, + 0x1364: 0x8bc9, 0x1365: 0x8bf1, 0x1366: 0x7c71, 0x1367: 0x8c19, 0x1368: 0x7be1, 0x1369: 0x7c99, + 0x136a: 0x8c41, 0x136b: 0x8c69, 0x136c: 0x7d39, 0x136d: 0x8c91, 0x136e: 0x7d61, 0x136f: 0x7d89, + 0x1370: 0x8cb9, 0x1371: 0x8ce1, 0x1372: 0x7e29, 0x1373: 0x8d09, 0x1374: 0x7e51, 0x1375: 0x7e79, + 0x1376: 0x8d31, 0x1377: 0x8d59, 0x1378: 0x7ec9, 0x1379: 0x8d81, 0x137a: 0x7ef1, 0x137b: 0x7f19, + 0x137c: 0x83a1, 0x137d: 0x83c9, 0x137e: 0x8441, 0x137f: 0x8469, + // Block 0x4e, offset 0x1380 + 0x1380: 0x8491, 0x1381: 0x8531, 0x1382: 0x8559, 0x1383: 0x8581, 0x1384: 0x85a9, 0x1385: 0x8649, + 0x1386: 0x8671, 0x1387: 0x8699, 0x1388: 0x8da9, 0x1389: 0x8739, 0x138a: 0x8dd1, 0x138b: 0x8df9, + 0x138c: 0x8829, 0x138d: 0x8e21, 0x138e: 0x8851, 0x138f: 0x8879, 0x1390: 0x8a81, 0x1391: 0x8e49, + 0x1392: 0x8e71, 0x1393: 0x89b9, 0x1394: 0x8e99, 0x1395: 0x89e1, 0x1396: 0x8a09, 0x1397: 0x7c21, + 0x1398: 0x7c49, 0x1399: 0x8ec1, 0x139a: 0x7c71, 0x139b: 0x8ee9, 0x139c: 0x7cc1, 0x139d: 0x7ce9, + 0x139e: 0x7d11, 0x139f: 0x7d39, 0x13a0: 0x8f11, 0x13a1: 0x7db1, 0x13a2: 0x7dd9, 0x13a3: 0x7e01, + 0x13a4: 0x7e29, 0x13a5: 0x8f39, 0x13a6: 0x7ec9, 0x13a7: 0x7f41, 0x13a8: 0x7f69, 0x13a9: 0x7f91, + 0x13aa: 0x7fb9, 0x13ab: 0x7fe1, 0x13ac: 0x8031, 0x13ad: 0x8059, 0x13ae: 0x8081, 0x13af: 0x80a9, + 0x13b0: 0x80d1, 0x13b1: 0x80f9, 0x13b2: 0x8f61, 0x13b3: 0x8121, 0x13b4: 0x8149, 0x13b5: 0x8171, + 0x13b6: 0x8199, 0x13b7: 0x81c1, 0x13b8: 0x81e9, 0x13b9: 0x8239, 0x13ba: 0x8261, 0x13bb: 0x8289, + 0x13bc: 0x82b1, 0x13bd: 0x82d9, 0x13be: 0x8301, 0x13bf: 0x8329, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x8351, 0x13c1: 0x8379, 0x13c2: 0x83f1, 0x13c3: 0x8419, 0x13c4: 0x84b9, 0x13c5: 0x84e1, + 0x13c6: 0x8509, 0x13c7: 0x8531, 0x13c8: 0x8559, 0x13c9: 0x85d1, 0x13ca: 0x85f9, 0x13cb: 0x8621, + 0x13cc: 0x8649, 0x13cd: 0x8f89, 0x13ce: 0x86c1, 0x13cf: 0x86e9, 0x13d0: 0x8711, 0x13d1: 0x8739, + 0x13d2: 0x87b1, 0x13d3: 0x87d9, 0x13d4: 0x8801, 0x13d5: 0x8829, 0x13d6: 0x8fb1, 0x13d7: 0x88a1, + 0x13d8: 0x88c9, 0x13d9: 0x8fd9, 0x13da: 0x8941, 0x13db: 0x8969, 0x13dc: 0x8991, 0x13dd: 0x89b9, + 0x13de: 0x9001, 0x13df: 0x7c71, 0x13e0: 0x8ee9, 0x13e1: 0x7d39, 0x13e2: 0x8f11, 0x13e3: 0x7e29, + 0x13e4: 0x8f39, 0x13e5: 0x7ec9, 0x13e6: 0x9029, 0x13e7: 0x80d1, 0x13e8: 0x9051, 0x13e9: 0x9079, + 0x13ea: 0x90a1, 0x13eb: 0x8531, 0x13ec: 0x8559, 0x13ed: 0x8649, 0x13ee: 0x8829, 0x13ef: 0x8fb1, + 0x13f0: 0x89b9, 0x13f1: 0x9001, 0x13f2: 0x90c9, 0x13f3: 0x9101, 0x13f4: 0x9139, 0x13f5: 0x9171, + 0x13f6: 0x9199, 0x13f7: 0x91c1, 0x13f8: 0x91e9, 0x13f9: 0x9211, 0x13fa: 0x9239, 0x13fb: 0x9261, + 0x13fc: 0x9289, 0x13fd: 0x92b1, 0x13fe: 0x92d9, 0x13ff: 0x9301, + // Block 0x50, offset 0x1400 + 0x1400: 0x9329, 0x1401: 0x9351, 0x1402: 0x9379, 0x1403: 0x93a1, 0x1404: 0x93c9, 0x1405: 0x93f1, + 0x1406: 0x9419, 0x1407: 0x9441, 0x1408: 0x9469, 0x1409: 0x9491, 0x140a: 0x94b9, 0x140b: 0x94e1, + 0x140c: 0x9079, 0x140d: 0x9509, 0x140e: 0x9531, 0x140f: 0x9559, 0x1410: 0x9581, 0x1411: 0x9171, + 0x1412: 0x9199, 0x1413: 0x91c1, 0x1414: 0x91e9, 0x1415: 0x9211, 0x1416: 0x9239, 0x1417: 0x9261, + 0x1418: 0x9289, 0x1419: 0x92b1, 0x141a: 0x92d9, 0x141b: 0x9301, 0x141c: 0x9329, 0x141d: 0x9351, + 0x141e: 0x9379, 0x141f: 0x93a1, 0x1420: 0x93c9, 0x1421: 0x93f1, 0x1422: 0x9419, 0x1423: 0x9441, + 0x1424: 0x9469, 0x1425: 0x9491, 0x1426: 0x94b9, 0x1427: 0x94e1, 0x1428: 0x9079, 0x1429: 0x9509, + 0x142a: 0x9531, 0x142b: 0x9559, 0x142c: 0x9581, 0x142d: 0x9491, 0x142e: 0x94b9, 0x142f: 0x94e1, + 0x1430: 0x9079, 0x1431: 0x9051, 0x1432: 0x90a1, 0x1433: 0x8211, 0x1434: 0x8059, 0x1435: 0x8081, + 0x1436: 0x80a9, 0x1437: 0x9491, 0x1438: 0x94b9, 0x1439: 0x94e1, 0x143a: 0x8211, 0x143b: 0x8239, + 0x143c: 0x95a9, 0x143d: 0x95a9, 0x143e: 0x0018, 0x143f: 0x0018, + // Block 0x51, offset 0x1440 + 0x1440: 0x0040, 0x1441: 0x0040, 0x1442: 0x0040, 0x1443: 0x0040, 0x1444: 0x0040, 0x1445: 0x0040, + 0x1446: 0x0040, 0x1447: 0x0040, 0x1448: 0x0040, 0x1449: 0x0040, 0x144a: 0x0040, 0x144b: 0x0040, + 0x144c: 0x0040, 0x144d: 0x0040, 0x144e: 0x0040, 0x144f: 0x0040, 0x1450: 0x95d1, 0x1451: 0x9609, + 0x1452: 0x9609, 0x1453: 0x9641, 0x1454: 0x9679, 0x1455: 0x96b1, 0x1456: 0x96e9, 0x1457: 0x9721, + 0x1458: 0x9759, 0x1459: 0x9759, 0x145a: 0x9791, 0x145b: 0x97c9, 0x145c: 0x9801, 0x145d: 0x9839, + 0x145e: 0x9871, 0x145f: 0x98a9, 0x1460: 0x98a9, 0x1461: 0x98e1, 0x1462: 0x9919, 0x1463: 0x9919, + 0x1464: 0x9951, 0x1465: 0x9951, 0x1466: 0x9989, 0x1467: 0x99c1, 0x1468: 0x99c1, 0x1469: 0x99f9, + 0x146a: 0x9a31, 0x146b: 0x9a31, 0x146c: 0x9a69, 0x146d: 0x9a69, 0x146e: 0x9aa1, 0x146f: 0x9ad9, + 0x1470: 0x9ad9, 0x1471: 0x9b11, 0x1472: 0x9b11, 0x1473: 0x9b49, 0x1474: 0x9b81, 0x1475: 0x9bb9, + 0x1476: 0x9bf1, 0x1477: 0x9bf1, 0x1478: 0x9c29, 0x1479: 0x9c61, 0x147a: 0x9c99, 0x147b: 0x9cd1, + 0x147c: 0x9d09, 0x147d: 0x9d09, 0x147e: 0x9d41, 0x147f: 0x9d79, + // Block 0x52, offset 0x1480 + 0x1480: 0xa949, 0x1481: 0xa981, 0x1482: 0xa9b9, 0x1483: 0xa8a1, 0x1484: 0x9bb9, 0x1485: 0x9989, + 0x1486: 0xa9f1, 0x1487: 0xaa29, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, + 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x0040, 0x1491: 0x0040, + 0x1492: 0x0040, 0x1493: 0x0040, 0x1494: 0x0040, 0x1495: 0x0040, 0x1496: 0x0040, 0x1497: 0x0040, + 0x1498: 0x0040, 0x1499: 0x0040, 0x149a: 0x0040, 0x149b: 0x0040, 0x149c: 0x0040, 0x149d: 0x0040, + 0x149e: 0x0040, 0x149f: 0x0040, 0x14a0: 0x0040, 0x14a1: 0x0040, 0x14a2: 0x0040, 0x14a3: 0x0040, + 0x14a4: 0x0040, 0x14a5: 0x0040, 0x14a6: 0x0040, 0x14a7: 0x0040, 0x14a8: 0x0040, 0x14a9: 0x0040, + 0x14aa: 0x0040, 0x14ab: 0x0040, 0x14ac: 0x0040, 0x14ad: 0x0040, 0x14ae: 0x0040, 0x14af: 0x0040, + 0x14b0: 0xaa61, 0x14b1: 0xaa99, 0x14b2: 0xaad1, 0x14b3: 0xab19, 0x14b4: 0xab61, 0x14b5: 0xaba9, + 0x14b6: 0xabf1, 0x14b7: 0xac39, 0x14b8: 0xac81, 0x14b9: 0xacc9, 0x14ba: 0xad02, 0x14bb: 0xae12, + 0x14bc: 0xae91, 0x14bd: 0x0018, 0x14be: 0x0040, 0x14bf: 0x0040, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x13c0, 0x14c1: 0x13c0, 0x14c2: 0x13c0, 0x14c3: 0x13c0, 0x14c4: 0x13c0, 0x14c5: 0x13c0, + 0x14c6: 0x13c0, 0x14c7: 0x13c0, 0x14c8: 0x13c0, 0x14c9: 0x13c0, 0x14ca: 0x13c0, 0x14cb: 0x13c0, + 0x14cc: 0x13c0, 0x14cd: 0x13c0, 0x14ce: 0x13c0, 0x14cf: 0x13c0, 0x14d0: 0xaeda, 0x14d1: 0x7d55, + 0x14d2: 0x0040, 0x14d3: 0xaeea, 0x14d4: 0x03c2, 0x14d5: 0xaefa, 0x14d6: 0xaf0a, 0x14d7: 0x7d75, + 0x14d8: 0x7d95, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, + 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x1308, 0x14e1: 0x1308, 0x14e2: 0x1308, 0x14e3: 0x1308, + 0x14e4: 0x1308, 0x14e5: 0x1308, 0x14e6: 0x1308, 0x14e7: 0x1308, 0x14e8: 0x1308, 0x14e9: 0x1308, + 0x14ea: 0x1308, 0x14eb: 0x1308, 0x14ec: 0x1308, 0x14ed: 0x1308, 0x14ee: 0x1308, 0x14ef: 0x1308, + 0x14f0: 0x0040, 0x14f1: 0x7db5, 0x14f2: 0x7dd5, 0x14f3: 0xaf1a, 0x14f4: 0xaf1a, 0x14f5: 0x1fd2, + 0x14f6: 0x1fe2, 0x14f7: 0xaf2a, 0x14f8: 0xaf3a, 0x14f9: 0x7df5, 0x14fa: 0x7e15, 0x14fb: 0x7e35, + 0x14fc: 0x7df5, 0x14fd: 0x7e55, 0x14fe: 0x7e75, 0x14ff: 0x7e55, + // Block 0x54, offset 0x1500 + 0x1500: 0x7e95, 0x1501: 0x7eb5, 0x1502: 0x7ed5, 0x1503: 0x7eb5, 0x1504: 0x7ef5, 0x1505: 0x0018, + 0x1506: 0x0018, 0x1507: 0xaf4a, 0x1508: 0xaf5a, 0x1509: 0x7f16, 0x150a: 0x7f36, 0x150b: 0x7f56, + 0x150c: 0x7f76, 0x150d: 0xaf1a, 0x150e: 0xaf1a, 0x150f: 0xaf1a, 0x1510: 0xaeda, 0x1511: 0x7f95, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x03c2, 0x1515: 0xaeea, 0x1516: 0xaf0a, 0x1517: 0xaefa, + 0x1518: 0x7fb5, 0x1519: 0x1fd2, 0x151a: 0x1fe2, 0x151b: 0xaf2a, 0x151c: 0xaf3a, 0x151d: 0x7e95, + 0x151e: 0x7ef5, 0x151f: 0xaf6a, 0x1520: 0xaf7a, 0x1521: 0xaf8a, 0x1522: 0x1fb2, 0x1523: 0xaf99, + 0x1524: 0xafaa, 0x1525: 0xafba, 0x1526: 0x1fc2, 0x1527: 0x0040, 0x1528: 0xafca, 0x1529: 0xafda, + 0x152a: 0xafea, 0x152b: 0xaffa, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0x7fd6, 0x1531: 0xb009, 0x1532: 0x7ff6, 0x1533: 0x0008, 0x1534: 0x8016, 0x1535: 0x0040, + 0x1536: 0x8036, 0x1537: 0xb031, 0x1538: 0x8056, 0x1539: 0xb059, 0x153a: 0x8076, 0x153b: 0xb081, + 0x153c: 0x8096, 0x153d: 0xb0a9, 0x153e: 0x80b6, 0x153f: 0xb0d1, + // Block 0x55, offset 0x1540 + 0x1540: 0xb0f9, 0x1541: 0xb111, 0x1542: 0xb111, 0x1543: 0xb129, 0x1544: 0xb129, 0x1545: 0xb141, + 0x1546: 0xb141, 0x1547: 0xb159, 0x1548: 0xb159, 0x1549: 0xb171, 0x154a: 0xb171, 0x154b: 0xb171, + 0x154c: 0xb171, 0x154d: 0xb189, 0x154e: 0xb189, 0x154f: 0xb1a1, 0x1550: 0xb1a1, 0x1551: 0xb1a1, + 0x1552: 0xb1a1, 0x1553: 0xb1b9, 0x1554: 0xb1b9, 0x1555: 0xb1d1, 0x1556: 0xb1d1, 0x1557: 0xb1d1, + 0x1558: 0xb1d1, 0x1559: 0xb1e9, 0x155a: 0xb1e9, 0x155b: 0xb1e9, 0x155c: 0xb1e9, 0x155d: 0xb201, + 0x155e: 0xb201, 0x155f: 0xb201, 0x1560: 0xb201, 0x1561: 0xb219, 0x1562: 0xb219, 0x1563: 0xb219, + 0x1564: 0xb219, 0x1565: 0xb231, 0x1566: 0xb231, 0x1567: 0xb231, 0x1568: 0xb231, 0x1569: 0xb249, + 0x156a: 0xb249, 0x156b: 0xb261, 0x156c: 0xb261, 0x156d: 0xb279, 0x156e: 0xb279, 0x156f: 0xb291, + 0x1570: 0xb291, 0x1571: 0xb2a9, 0x1572: 0xb2a9, 0x1573: 0xb2a9, 0x1574: 0xb2a9, 0x1575: 0xb2c1, + 0x1576: 0xb2c1, 0x1577: 0xb2c1, 0x1578: 0xb2c1, 0x1579: 0xb2d9, 0x157a: 0xb2d9, 0x157b: 0xb2d9, + 0x157c: 0xb2d9, 0x157d: 0xb2f1, 0x157e: 0xb2f1, 0x157f: 0xb2f1, + // Block 0x56, offset 0x1580 + 0x1580: 0xb2f1, 0x1581: 0xb309, 0x1582: 0xb309, 0x1583: 0xb309, 0x1584: 0xb309, 0x1585: 0xb321, + 0x1586: 0xb321, 0x1587: 0xb321, 0x1588: 0xb321, 0x1589: 0xb339, 0x158a: 0xb339, 0x158b: 0xb339, + 0x158c: 0xb339, 0x158d: 0xb351, 0x158e: 0xb351, 0x158f: 0xb351, 0x1590: 0xb351, 0x1591: 0xb369, + 0x1592: 0xb369, 0x1593: 0xb369, 0x1594: 0xb369, 0x1595: 0xb381, 0x1596: 0xb381, 0x1597: 0xb381, + 0x1598: 0xb381, 0x1599: 0xb399, 0x159a: 0xb399, 0x159b: 0xb399, 0x159c: 0xb399, 0x159d: 0xb3b1, + 0x159e: 0xb3b1, 0x159f: 0xb3b1, 0x15a0: 0xb3b1, 0x15a1: 0xb3c9, 0x15a2: 0xb3c9, 0x15a3: 0xb3c9, + 0x15a4: 0xb3c9, 0x15a5: 0xb3e1, 0x15a6: 0xb3e1, 0x15a7: 0xb3e1, 0x15a8: 0xb3e1, 0x15a9: 0xb3f9, + 0x15aa: 0xb3f9, 0x15ab: 0xb3f9, 0x15ac: 0xb3f9, 0x15ad: 0xb411, 0x15ae: 0xb411, 0x15af: 0x7ab1, + 0x15b0: 0x7ab1, 0x15b1: 0xb429, 0x15b2: 0xb429, 0x15b3: 0xb429, 0x15b4: 0xb429, 0x15b5: 0xb441, + 0x15b6: 0xb441, 0x15b7: 0xb469, 0x15b8: 0xb469, 0x15b9: 0xb491, 0x15ba: 0xb491, 0x15bb: 0xb4b9, + 0x15bc: 0xb4b9, 0x15bd: 0x0040, 0x15be: 0x0040, 0x15bf: 0x03c0, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0040, 0x15c1: 0xaefa, 0x15c2: 0xb4e2, 0x15c3: 0xaf6a, 0x15c4: 0xafda, 0x15c5: 0xafea, + 0x15c6: 0xaf7a, 0x15c7: 0xb4f2, 0x15c8: 0x1fd2, 0x15c9: 0x1fe2, 0x15ca: 0xaf8a, 0x15cb: 0x1fb2, + 0x15cc: 0xaeda, 0x15cd: 0xaf99, 0x15ce: 0x29d1, 0x15cf: 0xb502, 0x15d0: 0x1f41, 0x15d1: 0x00c9, + 0x15d2: 0x0069, 0x15d3: 0x0079, 0x15d4: 0x1f51, 0x15d5: 0x1f61, 0x15d6: 0x1f71, 0x15d7: 0x1f81, + 0x15d8: 0x1f91, 0x15d9: 0x1fa1, 0x15da: 0xaeea, 0x15db: 0x03c2, 0x15dc: 0xafaa, 0x15dd: 0x1fc2, + 0x15de: 0xafba, 0x15df: 0xaf0a, 0x15e0: 0xaffa, 0x15e1: 0x0039, 0x15e2: 0x0ee9, 0x15e3: 0x1159, + 0x15e4: 0x0ef9, 0x15e5: 0x0f09, 0x15e6: 0x1199, 0x15e7: 0x0f31, 0x15e8: 0x0249, 0x15e9: 0x0f41, + 0x15ea: 0x0259, 0x15eb: 0x0f51, 0x15ec: 0x0359, 0x15ed: 0x0f61, 0x15ee: 0x0f71, 0x15ef: 0x00d9, + 0x15f0: 0x0f99, 0x15f1: 0x2039, 0x15f2: 0x0269, 0x15f3: 0x01d9, 0x15f4: 0x0fa9, 0x15f5: 0x0fb9, + 0x15f6: 0x1089, 0x15f7: 0x0279, 0x15f8: 0x0369, 0x15f9: 0x0289, 0x15fa: 0x13d1, 0x15fb: 0xaf4a, + 0x15fc: 0xafca, 0x15fd: 0xaf5a, 0x15fe: 0xb512, 0x15ff: 0xaf1a, + // Block 0x58, offset 0x1600 + 0x1600: 0x1caa, 0x1601: 0x0039, 0x1602: 0x0ee9, 0x1603: 0x1159, 0x1604: 0x0ef9, 0x1605: 0x0f09, + 0x1606: 0x1199, 0x1607: 0x0f31, 0x1608: 0x0249, 0x1609: 0x0f41, 0x160a: 0x0259, 0x160b: 0x0f51, + 0x160c: 0x0359, 0x160d: 0x0f61, 0x160e: 0x0f71, 0x160f: 0x00d9, 0x1610: 0x0f99, 0x1611: 0x2039, + 0x1612: 0x0269, 0x1613: 0x01d9, 0x1614: 0x0fa9, 0x1615: 0x0fb9, 0x1616: 0x1089, 0x1617: 0x0279, + 0x1618: 0x0369, 0x1619: 0x0289, 0x161a: 0x13d1, 0x161b: 0xaf2a, 0x161c: 0xb522, 0x161d: 0xaf3a, + 0x161e: 0xb532, 0x161f: 0x80d5, 0x1620: 0x80f5, 0x1621: 0x29d1, 0x1622: 0x8115, 0x1623: 0x8115, + 0x1624: 0x8135, 0x1625: 0x8155, 0x1626: 0x8175, 0x1627: 0x8195, 0x1628: 0x81b5, 0x1629: 0x81d5, + 0x162a: 0x81f5, 0x162b: 0x8215, 0x162c: 0x8235, 0x162d: 0x8255, 0x162e: 0x8275, 0x162f: 0x8295, + 0x1630: 0x82b5, 0x1631: 0x82d5, 0x1632: 0x82f5, 0x1633: 0x8315, 0x1634: 0x8335, 0x1635: 0x8355, + 0x1636: 0x8375, 0x1637: 0x8395, 0x1638: 0x83b5, 0x1639: 0x83d5, 0x163a: 0x83f5, 0x163b: 0x8415, + 0x163c: 0x81b5, 0x163d: 0x8435, 0x163e: 0x8455, 0x163f: 0x8215, + // Block 0x59, offset 0x1640 + 0x1640: 0x8475, 0x1641: 0x8495, 0x1642: 0x84b5, 0x1643: 0x84d5, 0x1644: 0x84f5, 0x1645: 0x8515, + 0x1646: 0x8535, 0x1647: 0x8555, 0x1648: 0x84d5, 0x1649: 0x8575, 0x164a: 0x84d5, 0x164b: 0x8595, + 0x164c: 0x8595, 0x164d: 0x85b5, 0x164e: 0x85b5, 0x164f: 0x85d5, 0x1650: 0x8515, 0x1651: 0x85f5, + 0x1652: 0x8615, 0x1653: 0x85f5, 0x1654: 0x8635, 0x1655: 0x8615, 0x1656: 0x8655, 0x1657: 0x8655, + 0x1658: 0x8675, 0x1659: 0x8675, 0x165a: 0x8695, 0x165b: 0x8695, 0x165c: 0x8615, 0x165d: 0x8115, + 0x165e: 0x86b5, 0x165f: 0x86d5, 0x1660: 0x0040, 0x1661: 0x86f5, 0x1662: 0x8715, 0x1663: 0x8735, + 0x1664: 0x8755, 0x1665: 0x8735, 0x1666: 0x8775, 0x1667: 0x8795, 0x1668: 0x87b5, 0x1669: 0x87b5, + 0x166a: 0x87d5, 0x166b: 0x87d5, 0x166c: 0x87f5, 0x166d: 0x87f5, 0x166e: 0x87d5, 0x166f: 0x87d5, + 0x1670: 0x8815, 0x1671: 0x8835, 0x1672: 0x8855, 0x1673: 0x8875, 0x1674: 0x8895, 0x1675: 0x88b5, + 0x1676: 0x88b5, 0x1677: 0x88b5, 0x1678: 0x88d5, 0x1679: 0x88d5, 0x167a: 0x88d5, 0x167b: 0x88d5, + 0x167c: 0x87b5, 0x167d: 0x87b5, 0x167e: 0x87b5, 0x167f: 0x0040, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0040, 0x1681: 0x0040, 0x1682: 0x8715, 0x1683: 0x86f5, 0x1684: 0x88f5, 0x1685: 0x86f5, + 0x1686: 0x8715, 0x1687: 0x86f5, 0x1688: 0x0040, 0x1689: 0x0040, 0x168a: 0x8915, 0x168b: 0x8715, + 0x168c: 0x8935, 0x168d: 0x88f5, 0x168e: 0x8935, 0x168f: 0x8715, 0x1690: 0x0040, 0x1691: 0x0040, + 0x1692: 0x8955, 0x1693: 0x8975, 0x1694: 0x8875, 0x1695: 0x8935, 0x1696: 0x88f5, 0x1697: 0x8935, + 0x1698: 0x0040, 0x1699: 0x0040, 0x169a: 0x8995, 0x169b: 0x89b5, 0x169c: 0x8995, 0x169d: 0x0040, + 0x169e: 0x0040, 0x169f: 0x0040, 0x16a0: 0xb541, 0x16a1: 0xb559, 0x16a2: 0xb571, 0x16a3: 0x89d6, + 0x16a4: 0xb589, 0x16a5: 0xb5a1, 0x16a6: 0x89f5, 0x16a7: 0x0040, 0x16a8: 0x8a15, 0x16a9: 0x8a35, + 0x16aa: 0x8a55, 0x16ab: 0x8a35, 0x16ac: 0x8a75, 0x16ad: 0x8a95, 0x16ae: 0x8ab5, 0x16af: 0x0040, + 0x16b0: 0x0040, 0x16b1: 0x0040, 0x16b2: 0x0040, 0x16b3: 0x0040, 0x16b4: 0x0040, 0x16b5: 0x0040, + 0x16b6: 0x0040, 0x16b7: 0x0040, 0x16b8: 0x0040, 0x16b9: 0x0340, 0x16ba: 0x0340, 0x16bb: 0x0340, + 0x16bc: 0x0040, 0x16bd: 0x0040, 0x16be: 0x0040, 0x16bf: 0x0040, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x0208, 0x16c1: 0x0208, 0x16c2: 0x0208, 0x16c3: 0x0208, 0x16c4: 0x0208, 0x16c5: 0x0408, + 0x16c6: 0x0008, 0x16c7: 0x0408, 0x16c8: 0x0018, 0x16c9: 0x0408, 0x16ca: 0x0408, 0x16cb: 0x0008, + 0x16cc: 0x0008, 0x16cd: 0x0108, 0x16ce: 0x0408, 0x16cf: 0x0408, 0x16d0: 0x0408, 0x16d1: 0x0408, + 0x16d2: 0x0408, 0x16d3: 0x0208, 0x16d4: 0x0208, 0x16d5: 0x0208, 0x16d6: 0x0208, 0x16d7: 0x0108, + 0x16d8: 0x0208, 0x16d9: 0x0208, 0x16da: 0x0208, 0x16db: 0x0208, 0x16dc: 0x0208, 0x16dd: 0x0408, + 0x16de: 0x0208, 0x16df: 0x0208, 0x16e0: 0x0208, 0x16e1: 0x0408, 0x16e2: 0x0008, 0x16e3: 0x0008, + 0x16e4: 0x0408, 0x16e5: 0x1308, 0x16e6: 0x1308, 0x16e7: 0x0040, 0x16e8: 0x0040, 0x16e9: 0x0040, + 0x16ea: 0x0040, 0x16eb: 0x0218, 0x16ec: 0x0218, 0x16ed: 0x0218, 0x16ee: 0x0218, 0x16ef: 0x0418, + 0x16f0: 0x0018, 0x16f1: 0x0018, 0x16f2: 0x0018, 0x16f3: 0x0018, 0x16f4: 0x0018, 0x16f5: 0x0018, + 0x16f6: 0x0018, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0040, 0x16fa: 0x0040, 0x16fb: 0x0040, + 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0208, 0x1701: 0x0408, 0x1702: 0x0208, 0x1703: 0x0408, 0x1704: 0x0408, 0x1705: 0x0408, + 0x1706: 0x0208, 0x1707: 0x0208, 0x1708: 0x0208, 0x1709: 0x0408, 0x170a: 0x0208, 0x170b: 0x0208, + 0x170c: 0x0408, 0x170d: 0x0208, 0x170e: 0x0408, 0x170f: 0x0408, 0x1710: 0x0208, 0x1711: 0x0408, + 0x1712: 0x0040, 0x1713: 0x0040, 0x1714: 0x0040, 0x1715: 0x0040, 0x1716: 0x0040, 0x1717: 0x0040, + 0x1718: 0x0040, 0x1719: 0x0018, 0x171a: 0x0018, 0x171b: 0x0018, 0x171c: 0x0018, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x0040, 0x1721: 0x0040, 0x1722: 0x0040, 0x1723: 0x0040, + 0x1724: 0x0040, 0x1725: 0x0040, 0x1726: 0x0040, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0418, + 0x172a: 0x0418, 0x172b: 0x0418, 0x172c: 0x0418, 0x172d: 0x0218, 0x172e: 0x0218, 0x172f: 0x0018, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1308, 0x1741: 0x1308, 0x1742: 0x1008, 0x1743: 0x1008, 0x1744: 0x0040, 0x1745: 0x0008, + 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, + 0x174c: 0x0008, 0x174d: 0x0040, 0x174e: 0x0040, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0040, + 0x1752: 0x0040, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, + 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, + 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, + 0x1764: 0x0008, 0x1765: 0x0008, 0x1766: 0x0008, 0x1767: 0x0008, 0x1768: 0x0008, 0x1769: 0x0040, + 0x176a: 0x0008, 0x176b: 0x0008, 0x176c: 0x0008, 0x176d: 0x0008, 0x176e: 0x0008, 0x176f: 0x0008, + 0x1770: 0x0008, 0x1771: 0x0040, 0x1772: 0x0008, 0x1773: 0x0008, 0x1774: 0x0040, 0x1775: 0x0008, + 0x1776: 0x0008, 0x1777: 0x0008, 0x1778: 0x0008, 0x1779: 0x0008, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x1308, 0x177d: 0x0008, 0x177e: 0x1008, 0x177f: 0x1008, + // Block 0x5e, offset 0x1780 + 0x1780: 0x1308, 0x1781: 0x1008, 0x1782: 0x1008, 0x1783: 0x1008, 0x1784: 0x1008, 0x1785: 0x0040, + 0x1786: 0x0040, 0x1787: 0x1008, 0x1788: 0x1008, 0x1789: 0x0040, 0x178a: 0x0040, 0x178b: 0x1008, + 0x178c: 0x1008, 0x178d: 0x1808, 0x178e: 0x0040, 0x178f: 0x0040, 0x1790: 0x0008, 0x1791: 0x0040, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x1008, + 0x1798: 0x0040, 0x1799: 0x0040, 0x179a: 0x0040, 0x179b: 0x0040, 0x179c: 0x0040, 0x179d: 0x0008, + 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x1008, 0x17a3: 0x1008, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x1308, 0x17a7: 0x1308, 0x17a8: 0x1308, 0x17a9: 0x1308, + 0x17aa: 0x1308, 0x17ab: 0x1308, 0x17ac: 0x1308, 0x17ad: 0x0040, 0x17ae: 0x0040, 0x17af: 0x0040, + 0x17b0: 0x1308, 0x17b1: 0x1308, 0x17b2: 0x1308, 0x17b3: 0x1308, 0x17b4: 0x1308, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x0039, 0x17c1: 0x0ee9, 0x17c2: 0x1159, 0x17c3: 0x0ef9, 0x17c4: 0x0f09, 0x17c5: 0x1199, + 0x17c6: 0x0f31, 0x17c7: 0x0249, 0x17c8: 0x0f41, 0x17c9: 0x0259, 0x17ca: 0x0f51, 0x17cb: 0x0359, + 0x17cc: 0x0f61, 0x17cd: 0x0f71, 0x17ce: 0x00d9, 0x17cf: 0x0f99, 0x17d0: 0x2039, 0x17d1: 0x0269, + 0x17d2: 0x01d9, 0x17d3: 0x0fa9, 0x17d4: 0x0fb9, 0x17d5: 0x1089, 0x17d6: 0x0279, 0x17d7: 0x0369, + 0x17d8: 0x0289, 0x17d9: 0x13d1, 0x17da: 0x0039, 0x17db: 0x0ee9, 0x17dc: 0x1159, 0x17dd: 0x0ef9, + 0x17de: 0x0f09, 0x17df: 0x1199, 0x17e0: 0x0f31, 0x17e1: 0x0249, 0x17e2: 0x0f41, 0x17e3: 0x0259, + 0x17e4: 0x0f51, 0x17e5: 0x0359, 0x17e6: 0x0f61, 0x17e7: 0x0f71, 0x17e8: 0x00d9, 0x17e9: 0x0f99, + 0x17ea: 0x2039, 0x17eb: 0x0269, 0x17ec: 0x01d9, 0x17ed: 0x0fa9, 0x17ee: 0x0fb9, 0x17ef: 0x1089, + 0x17f0: 0x0279, 0x17f1: 0x0369, 0x17f2: 0x0289, 0x17f3: 0x13d1, 0x17f4: 0x0039, 0x17f5: 0x0ee9, + 0x17f6: 0x1159, 0x17f7: 0x0ef9, 0x17f8: 0x0f09, 0x17f9: 0x1199, 0x17fa: 0x0f31, 0x17fb: 0x0249, + 0x17fc: 0x0f41, 0x17fd: 0x0259, 0x17fe: 0x0f51, 0x17ff: 0x0359, + // Block 0x60, offset 0x1800 + 0x1800: 0x0f61, 0x1801: 0x0f71, 0x1802: 0x00d9, 0x1803: 0x0f99, 0x1804: 0x2039, 0x1805: 0x0269, + 0x1806: 0x01d9, 0x1807: 0x0fa9, 0x1808: 0x0fb9, 0x1809: 0x1089, 0x180a: 0x0279, 0x180b: 0x0369, + 0x180c: 0x0289, 0x180d: 0x13d1, 0x180e: 0x0039, 0x180f: 0x0ee9, 0x1810: 0x1159, 0x1811: 0x0ef9, + 0x1812: 0x0f09, 0x1813: 0x1199, 0x1814: 0x0f31, 0x1815: 0x0040, 0x1816: 0x0f41, 0x1817: 0x0259, + 0x1818: 0x0f51, 0x1819: 0x0359, 0x181a: 0x0f61, 0x181b: 0x0f71, 0x181c: 0x00d9, 0x181d: 0x0f99, + 0x181e: 0x2039, 0x181f: 0x0269, 0x1820: 0x01d9, 0x1821: 0x0fa9, 0x1822: 0x0fb9, 0x1823: 0x1089, + 0x1824: 0x0279, 0x1825: 0x0369, 0x1826: 0x0289, 0x1827: 0x13d1, 0x1828: 0x0039, 0x1829: 0x0ee9, + 0x182a: 0x1159, 0x182b: 0x0ef9, 0x182c: 0x0f09, 0x182d: 0x1199, 0x182e: 0x0f31, 0x182f: 0x0249, + 0x1830: 0x0f41, 0x1831: 0x0259, 0x1832: 0x0f51, 0x1833: 0x0359, 0x1834: 0x0f61, 0x1835: 0x0f71, + 0x1836: 0x00d9, 0x1837: 0x0f99, 0x1838: 0x2039, 0x1839: 0x0269, 0x183a: 0x01d9, 0x183b: 0x0fa9, + 0x183c: 0x0fb9, 0x183d: 0x1089, 0x183e: 0x0279, 0x183f: 0x0369, + // Block 0x61, offset 0x1840 + 0x1840: 0x0289, 0x1841: 0x13d1, 0x1842: 0x0039, 0x1843: 0x0ee9, 0x1844: 0x1159, 0x1845: 0x0ef9, + 0x1846: 0x0f09, 0x1847: 0x1199, 0x1848: 0x0f31, 0x1849: 0x0249, 0x184a: 0x0f41, 0x184b: 0x0259, + 0x184c: 0x0f51, 0x184d: 0x0359, 0x184e: 0x0f61, 0x184f: 0x0f71, 0x1850: 0x00d9, 0x1851: 0x0f99, + 0x1852: 0x2039, 0x1853: 0x0269, 0x1854: 0x01d9, 0x1855: 0x0fa9, 0x1856: 0x0fb9, 0x1857: 0x1089, + 0x1858: 0x0279, 0x1859: 0x0369, 0x185a: 0x0289, 0x185b: 0x13d1, 0x185c: 0x0039, 0x185d: 0x0040, + 0x185e: 0x1159, 0x185f: 0x0ef9, 0x1860: 0x0040, 0x1861: 0x0040, 0x1862: 0x0f31, 0x1863: 0x0040, + 0x1864: 0x0040, 0x1865: 0x0259, 0x1866: 0x0f51, 0x1867: 0x0040, 0x1868: 0x0040, 0x1869: 0x0f71, + 0x186a: 0x00d9, 0x186b: 0x0f99, 0x186c: 0x2039, 0x186d: 0x0040, 0x186e: 0x01d9, 0x186f: 0x0fa9, + 0x1870: 0x0fb9, 0x1871: 0x1089, 0x1872: 0x0279, 0x1873: 0x0369, 0x1874: 0x0289, 0x1875: 0x13d1, + 0x1876: 0x0039, 0x1877: 0x0ee9, 0x1878: 0x1159, 0x1879: 0x0ef9, 0x187a: 0x0040, 0x187b: 0x1199, + 0x187c: 0x0040, 0x187d: 0x0249, 0x187e: 0x0f41, 0x187f: 0x0259, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f51, 0x1881: 0x0359, 0x1882: 0x0f61, 0x1883: 0x0f71, 0x1884: 0x0040, 0x1885: 0x0f99, + 0x1886: 0x2039, 0x1887: 0x0269, 0x1888: 0x01d9, 0x1889: 0x0fa9, 0x188a: 0x0fb9, 0x188b: 0x1089, + 0x188c: 0x0279, 0x188d: 0x0369, 0x188e: 0x0289, 0x188f: 0x13d1, 0x1890: 0x0039, 0x1891: 0x0ee9, + 0x1892: 0x1159, 0x1893: 0x0ef9, 0x1894: 0x0f09, 0x1895: 0x1199, 0x1896: 0x0f31, 0x1897: 0x0249, + 0x1898: 0x0f41, 0x1899: 0x0259, 0x189a: 0x0f51, 0x189b: 0x0359, 0x189c: 0x0f61, 0x189d: 0x0f71, + 0x189e: 0x00d9, 0x189f: 0x0f99, 0x18a0: 0x2039, 0x18a1: 0x0269, 0x18a2: 0x01d9, 0x18a3: 0x0fa9, + 0x18a4: 0x0fb9, 0x18a5: 0x1089, 0x18a6: 0x0279, 0x18a7: 0x0369, 0x18a8: 0x0289, 0x18a9: 0x13d1, + 0x18aa: 0x0039, 0x18ab: 0x0ee9, 0x18ac: 0x1159, 0x18ad: 0x0ef9, 0x18ae: 0x0f09, 0x18af: 0x1199, + 0x18b0: 0x0f31, 0x18b1: 0x0249, 0x18b2: 0x0f41, 0x18b3: 0x0259, 0x18b4: 0x0f51, 0x18b5: 0x0359, + 0x18b6: 0x0f61, 0x18b7: 0x0f71, 0x18b8: 0x00d9, 0x18b9: 0x0f99, 0x18ba: 0x2039, 0x18bb: 0x0269, + 0x18bc: 0x01d9, 0x18bd: 0x0fa9, 0x18be: 0x0fb9, 0x18bf: 0x1089, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0279, 0x18c1: 0x0369, 0x18c2: 0x0289, 0x18c3: 0x13d1, 0x18c4: 0x0039, 0x18c5: 0x0ee9, + 0x18c6: 0x0040, 0x18c7: 0x0ef9, 0x18c8: 0x0f09, 0x18c9: 0x1199, 0x18ca: 0x0f31, 0x18cb: 0x0040, + 0x18cc: 0x0040, 0x18cd: 0x0259, 0x18ce: 0x0f51, 0x18cf: 0x0359, 0x18d0: 0x0f61, 0x18d1: 0x0f71, + 0x18d2: 0x00d9, 0x18d3: 0x0f99, 0x18d4: 0x2039, 0x18d5: 0x0040, 0x18d6: 0x01d9, 0x18d7: 0x0fa9, + 0x18d8: 0x0fb9, 0x18d9: 0x1089, 0x18da: 0x0279, 0x18db: 0x0369, 0x18dc: 0x0289, 0x18dd: 0x0040, + 0x18de: 0x0039, 0x18df: 0x0ee9, 0x18e0: 0x1159, 0x18e1: 0x0ef9, 0x18e2: 0x0f09, 0x18e3: 0x1199, + 0x18e4: 0x0f31, 0x18e5: 0x0249, 0x18e6: 0x0f41, 0x18e7: 0x0259, 0x18e8: 0x0f51, 0x18e9: 0x0359, + 0x18ea: 0x0f61, 0x18eb: 0x0f71, 0x18ec: 0x00d9, 0x18ed: 0x0f99, 0x18ee: 0x2039, 0x18ef: 0x0269, + 0x18f0: 0x01d9, 0x18f1: 0x0fa9, 0x18f2: 0x0fb9, 0x18f3: 0x1089, 0x18f4: 0x0279, 0x18f5: 0x0369, + 0x18f6: 0x0289, 0x18f7: 0x13d1, 0x18f8: 0x0039, 0x18f9: 0x0ee9, 0x18fa: 0x0040, 0x18fb: 0x0ef9, + 0x18fc: 0x0f09, 0x18fd: 0x1199, 0x18fe: 0x0f31, 0x18ff: 0x0040, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f41, 0x1901: 0x0259, 0x1902: 0x0f51, 0x1903: 0x0359, 0x1904: 0x0f61, 0x1905: 0x0040, + 0x1906: 0x00d9, 0x1907: 0x0040, 0x1908: 0x0040, 0x1909: 0x0040, 0x190a: 0x01d9, 0x190b: 0x0fa9, + 0x190c: 0x0fb9, 0x190d: 0x1089, 0x190e: 0x0279, 0x190f: 0x0369, 0x1910: 0x0289, 0x1911: 0x0040, + 0x1912: 0x0039, 0x1913: 0x0ee9, 0x1914: 0x1159, 0x1915: 0x0ef9, 0x1916: 0x0f09, 0x1917: 0x1199, + 0x1918: 0x0f31, 0x1919: 0x0249, 0x191a: 0x0f41, 0x191b: 0x0259, 0x191c: 0x0f51, 0x191d: 0x0359, + 0x191e: 0x0f61, 0x191f: 0x0f71, 0x1920: 0x00d9, 0x1921: 0x0f99, 0x1922: 0x2039, 0x1923: 0x0269, + 0x1924: 0x01d9, 0x1925: 0x0fa9, 0x1926: 0x0fb9, 0x1927: 0x1089, 0x1928: 0x0279, 0x1929: 0x0369, + 0x192a: 0x0289, 0x192b: 0x13d1, 0x192c: 0x0039, 0x192d: 0x0ee9, 0x192e: 0x1159, 0x192f: 0x0ef9, + 0x1930: 0x0f09, 0x1931: 0x1199, 0x1932: 0x0f31, 0x1933: 0x0249, 0x1934: 0x0f41, 0x1935: 0x0259, + 0x1936: 0x0f51, 0x1937: 0x0359, 0x1938: 0x0f61, 0x1939: 0x0f71, 0x193a: 0x00d9, 0x193b: 0x0f99, + 0x193c: 0x2039, 0x193d: 0x0269, 0x193e: 0x01d9, 0x193f: 0x0fa9, + // Block 0x65, offset 0x1940 + 0x1940: 0x0fb9, 0x1941: 0x1089, 0x1942: 0x0279, 0x1943: 0x0369, 0x1944: 0x0289, 0x1945: 0x13d1, + 0x1946: 0x0039, 0x1947: 0x0ee9, 0x1948: 0x1159, 0x1949: 0x0ef9, 0x194a: 0x0f09, 0x194b: 0x1199, + 0x194c: 0x0f31, 0x194d: 0x0249, 0x194e: 0x0f41, 0x194f: 0x0259, 0x1950: 0x0f51, 0x1951: 0x0359, + 0x1952: 0x0f61, 0x1953: 0x0f71, 0x1954: 0x00d9, 0x1955: 0x0f99, 0x1956: 0x2039, 0x1957: 0x0269, + 0x1958: 0x01d9, 0x1959: 0x0fa9, 0x195a: 0x0fb9, 0x195b: 0x1089, 0x195c: 0x0279, 0x195d: 0x0369, + 0x195e: 0x0289, 0x195f: 0x13d1, 0x1960: 0x0039, 0x1961: 0x0ee9, 0x1962: 0x1159, 0x1963: 0x0ef9, + 0x1964: 0x0f09, 0x1965: 0x1199, 0x1966: 0x0f31, 0x1967: 0x0249, 0x1968: 0x0f41, 0x1969: 0x0259, + 0x196a: 0x0f51, 0x196b: 0x0359, 0x196c: 0x0f61, 0x196d: 0x0f71, 0x196e: 0x00d9, 0x196f: 0x0f99, + 0x1970: 0x2039, 0x1971: 0x0269, 0x1972: 0x01d9, 0x1973: 0x0fa9, 0x1974: 0x0fb9, 0x1975: 0x1089, + 0x1976: 0x0279, 0x1977: 0x0369, 0x1978: 0x0289, 0x1979: 0x13d1, 0x197a: 0x0039, 0x197b: 0x0ee9, + 0x197c: 0x1159, 0x197d: 0x0ef9, 0x197e: 0x0f09, 0x197f: 0x1199, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f31, 0x1981: 0x0249, 0x1982: 0x0f41, 0x1983: 0x0259, 0x1984: 0x0f51, 0x1985: 0x0359, + 0x1986: 0x0f61, 0x1987: 0x0f71, 0x1988: 0x00d9, 0x1989: 0x0f99, 0x198a: 0x2039, 0x198b: 0x0269, + 0x198c: 0x01d9, 0x198d: 0x0fa9, 0x198e: 0x0fb9, 0x198f: 0x1089, 0x1990: 0x0279, 0x1991: 0x0369, + 0x1992: 0x0289, 0x1993: 0x13d1, 0x1994: 0x0039, 0x1995: 0x0ee9, 0x1996: 0x1159, 0x1997: 0x0ef9, + 0x1998: 0x0f09, 0x1999: 0x1199, 0x199a: 0x0f31, 0x199b: 0x0249, 0x199c: 0x0f41, 0x199d: 0x0259, + 0x199e: 0x0f51, 0x199f: 0x0359, 0x19a0: 0x0f61, 0x19a1: 0x0f71, 0x19a2: 0x00d9, 0x19a3: 0x0f99, + 0x19a4: 0x2039, 0x19a5: 0x0269, 0x19a6: 0x01d9, 0x19a7: 0x0fa9, 0x19a8: 0x0fb9, 0x19a9: 0x1089, + 0x19aa: 0x0279, 0x19ab: 0x0369, 0x19ac: 0x0289, 0x19ad: 0x13d1, 0x19ae: 0x0039, 0x19af: 0x0ee9, + 0x19b0: 0x1159, 0x19b1: 0x0ef9, 0x19b2: 0x0f09, 0x19b3: 0x1199, 0x19b4: 0x0f31, 0x19b5: 0x0249, + 0x19b6: 0x0f41, 0x19b7: 0x0259, 0x19b8: 0x0f51, 0x19b9: 0x0359, 0x19ba: 0x0f61, 0x19bb: 0x0f71, + 0x19bc: 0x00d9, 0x19bd: 0x0f99, 0x19be: 0x2039, 0x19bf: 0x0269, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x01d9, 0x19c1: 0x0fa9, 0x19c2: 0x0fb9, 0x19c3: 0x1089, 0x19c4: 0x0279, 0x19c5: 0x0369, + 0x19c6: 0x0289, 0x19c7: 0x13d1, 0x19c8: 0x0039, 0x19c9: 0x0ee9, 0x19ca: 0x1159, 0x19cb: 0x0ef9, + 0x19cc: 0x0f09, 0x19cd: 0x1199, 0x19ce: 0x0f31, 0x19cf: 0x0249, 0x19d0: 0x0f41, 0x19d1: 0x0259, + 0x19d2: 0x0f51, 0x19d3: 0x0359, 0x19d4: 0x0f61, 0x19d5: 0x0f71, 0x19d6: 0x00d9, 0x19d7: 0x0f99, + 0x19d8: 0x2039, 0x19d9: 0x0269, 0x19da: 0x01d9, 0x19db: 0x0fa9, 0x19dc: 0x0fb9, 0x19dd: 0x1089, + 0x19de: 0x0279, 0x19df: 0x0369, 0x19e0: 0x0289, 0x19e1: 0x13d1, 0x19e2: 0x0039, 0x19e3: 0x0ee9, + 0x19e4: 0x1159, 0x19e5: 0x0ef9, 0x19e6: 0x0f09, 0x19e7: 0x1199, 0x19e8: 0x0f31, 0x19e9: 0x0249, + 0x19ea: 0x0f41, 0x19eb: 0x0259, 0x19ec: 0x0f51, 0x19ed: 0x0359, 0x19ee: 0x0f61, 0x19ef: 0x0f71, + 0x19f0: 0x00d9, 0x19f1: 0x0f99, 0x19f2: 0x2039, 0x19f3: 0x0269, 0x19f4: 0x01d9, 0x19f5: 0x0fa9, + 0x19f6: 0x0fb9, 0x19f7: 0x1089, 0x19f8: 0x0279, 0x19f9: 0x0369, 0x19fa: 0x0289, 0x19fb: 0x13d1, + 0x19fc: 0x0039, 0x19fd: 0x0ee9, 0x19fe: 0x1159, 0x19ff: 0x0ef9, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f09, 0x1a01: 0x1199, 0x1a02: 0x0f31, 0x1a03: 0x0249, 0x1a04: 0x0f41, 0x1a05: 0x0259, + 0x1a06: 0x0f51, 0x1a07: 0x0359, 0x1a08: 0x0f61, 0x1a09: 0x0f71, 0x1a0a: 0x00d9, 0x1a0b: 0x0f99, + 0x1a0c: 0x2039, 0x1a0d: 0x0269, 0x1a0e: 0x01d9, 0x1a0f: 0x0fa9, 0x1a10: 0x0fb9, 0x1a11: 0x1089, + 0x1a12: 0x0279, 0x1a13: 0x0369, 0x1a14: 0x0289, 0x1a15: 0x13d1, 0x1a16: 0x0039, 0x1a17: 0x0ee9, + 0x1a18: 0x1159, 0x1a19: 0x0ef9, 0x1a1a: 0x0f09, 0x1a1b: 0x1199, 0x1a1c: 0x0f31, 0x1a1d: 0x0249, + 0x1a1e: 0x0f41, 0x1a1f: 0x0259, 0x1a20: 0x0f51, 0x1a21: 0x0359, 0x1a22: 0x0f61, 0x1a23: 0x0f71, + 0x1a24: 0x00d9, 0x1a25: 0x0f99, 0x1a26: 0x2039, 0x1a27: 0x0269, 0x1a28: 0x01d9, 0x1a29: 0x0fa9, + 0x1a2a: 0x0fb9, 0x1a2b: 0x1089, 0x1a2c: 0x0279, 0x1a2d: 0x0369, 0x1a2e: 0x0289, 0x1a2f: 0x13d1, + 0x1a30: 0x0039, 0x1a31: 0x0ee9, 0x1a32: 0x1159, 0x1a33: 0x0ef9, 0x1a34: 0x0f09, 0x1a35: 0x1199, + 0x1a36: 0x0f31, 0x1a37: 0x0249, 0x1a38: 0x0f41, 0x1a39: 0x0259, 0x1a3a: 0x0f51, 0x1a3b: 0x0359, + 0x1a3c: 0x0f61, 0x1a3d: 0x0f71, 0x1a3e: 0x00d9, 0x1a3f: 0x0f99, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x2039, 0x1a41: 0x0269, 0x1a42: 0x01d9, 0x1a43: 0x0fa9, 0x1a44: 0x0fb9, 0x1a45: 0x1089, + 0x1a46: 0x0279, 0x1a47: 0x0369, 0x1a48: 0x0289, 0x1a49: 0x13d1, 0x1a4a: 0x0039, 0x1a4b: 0x0ee9, + 0x1a4c: 0x1159, 0x1a4d: 0x0ef9, 0x1a4e: 0x0f09, 0x1a4f: 0x1199, 0x1a50: 0x0f31, 0x1a51: 0x0249, + 0x1a52: 0x0f41, 0x1a53: 0x0259, 0x1a54: 0x0f51, 0x1a55: 0x0359, 0x1a56: 0x0f61, 0x1a57: 0x0f71, + 0x1a58: 0x00d9, 0x1a59: 0x0f99, 0x1a5a: 0x2039, 0x1a5b: 0x0269, 0x1a5c: 0x01d9, 0x1a5d: 0x0fa9, + 0x1a5e: 0x0fb9, 0x1a5f: 0x1089, 0x1a60: 0x0279, 0x1a61: 0x0369, 0x1a62: 0x0289, 0x1a63: 0x13d1, + 0x1a64: 0xba81, 0x1a65: 0xba99, 0x1a66: 0x0040, 0x1a67: 0x0040, 0x1a68: 0xbab1, 0x1a69: 0x1099, + 0x1a6a: 0x10b1, 0x1a6b: 0x10c9, 0x1a6c: 0xbac9, 0x1a6d: 0xbae1, 0x1a6e: 0xbaf9, 0x1a6f: 0x1429, + 0x1a70: 0x1a31, 0x1a71: 0xbb11, 0x1a72: 0xbb29, 0x1a73: 0xbb41, 0x1a74: 0xbb59, 0x1a75: 0xbb71, + 0x1a76: 0xbb89, 0x1a77: 0x2109, 0x1a78: 0x1111, 0x1a79: 0x1429, 0x1a7a: 0xbba1, 0x1a7b: 0xbbb9, + 0x1a7c: 0xbbd1, 0x1a7d: 0x10e1, 0x1a7e: 0x10f9, 0x1a7f: 0xbbe9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x2079, 0x1a81: 0xbc01, 0x1a82: 0xbab1, 0x1a83: 0x1099, 0x1a84: 0x10b1, 0x1a85: 0x10c9, + 0x1a86: 0xbac9, 0x1a87: 0xbae1, 0x1a88: 0xbaf9, 0x1a89: 0x1429, 0x1a8a: 0x1a31, 0x1a8b: 0xbb11, + 0x1a8c: 0xbb29, 0x1a8d: 0xbb41, 0x1a8e: 0xbb59, 0x1a8f: 0xbb71, 0x1a90: 0xbb89, 0x1a91: 0x2109, + 0x1a92: 0x1111, 0x1a93: 0xbba1, 0x1a94: 0xbba1, 0x1a95: 0xbbb9, 0x1a96: 0xbbd1, 0x1a97: 0x10e1, + 0x1a98: 0x10f9, 0x1a99: 0xbbe9, 0x1a9a: 0x2079, 0x1a9b: 0xbc21, 0x1a9c: 0xbac9, 0x1a9d: 0x1429, + 0x1a9e: 0xbb11, 0x1a9f: 0x10e1, 0x1aa0: 0x1111, 0x1aa1: 0x2109, 0x1aa2: 0xbab1, 0x1aa3: 0x1099, + 0x1aa4: 0x10b1, 0x1aa5: 0x10c9, 0x1aa6: 0xbac9, 0x1aa7: 0xbae1, 0x1aa8: 0xbaf9, 0x1aa9: 0x1429, + 0x1aaa: 0x1a31, 0x1aab: 0xbb11, 0x1aac: 0xbb29, 0x1aad: 0xbb41, 0x1aae: 0xbb59, 0x1aaf: 0xbb71, + 0x1ab0: 0xbb89, 0x1ab1: 0x2109, 0x1ab2: 0x1111, 0x1ab3: 0x1429, 0x1ab4: 0xbba1, 0x1ab5: 0xbbb9, + 0x1ab6: 0xbbd1, 0x1ab7: 0x10e1, 0x1ab8: 0x10f9, 0x1ab9: 0xbbe9, 0x1aba: 0x2079, 0x1abb: 0xbc01, + 0x1abc: 0xbab1, 0x1abd: 0x1099, 0x1abe: 0x10b1, 0x1abf: 0x10c9, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0xbac9, 0x1ac1: 0xbae1, 0x1ac2: 0xbaf9, 0x1ac3: 0x1429, 0x1ac4: 0x1a31, 0x1ac5: 0xbb11, + 0x1ac6: 0xbb29, 0x1ac7: 0xbb41, 0x1ac8: 0xbb59, 0x1ac9: 0xbb71, 0x1aca: 0xbb89, 0x1acb: 0x2109, + 0x1acc: 0x1111, 0x1acd: 0xbba1, 0x1ace: 0xbba1, 0x1acf: 0xbbb9, 0x1ad0: 0xbbd1, 0x1ad1: 0x10e1, + 0x1ad2: 0x10f9, 0x1ad3: 0xbbe9, 0x1ad4: 0x2079, 0x1ad5: 0xbc21, 0x1ad6: 0xbac9, 0x1ad7: 0x1429, + 0x1ad8: 0xbb11, 0x1ad9: 0x10e1, 0x1ada: 0x1111, 0x1adb: 0x2109, 0x1adc: 0xbab1, 0x1add: 0x1099, + 0x1ade: 0x10b1, 0x1adf: 0x10c9, 0x1ae0: 0xbac9, 0x1ae1: 0xbae1, 0x1ae2: 0xbaf9, 0x1ae3: 0x1429, + 0x1ae4: 0x1a31, 0x1ae5: 0xbb11, 0x1ae6: 0xbb29, 0x1ae7: 0xbb41, 0x1ae8: 0xbb59, 0x1ae9: 0xbb71, + 0x1aea: 0xbb89, 0x1aeb: 0x2109, 0x1aec: 0x1111, 0x1aed: 0x1429, 0x1aee: 0xbba1, 0x1aef: 0xbbb9, + 0x1af0: 0xbbd1, 0x1af1: 0x10e1, 0x1af2: 0x10f9, 0x1af3: 0xbbe9, 0x1af4: 0x2079, 0x1af5: 0xbc01, + 0x1af6: 0xbab1, 0x1af7: 0x1099, 0x1af8: 0x10b1, 0x1af9: 0x10c9, 0x1afa: 0xbac9, 0x1afb: 0xbae1, + 0x1afc: 0xbaf9, 0x1afd: 0x1429, 0x1afe: 0x1a31, 0x1aff: 0xbb11, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0xbb29, 0x1b01: 0xbb41, 0x1b02: 0xbb59, 0x1b03: 0xbb71, 0x1b04: 0xbb89, 0x1b05: 0x2109, + 0x1b06: 0x1111, 0x1b07: 0xbba1, 0x1b08: 0xbba1, 0x1b09: 0xbbb9, 0x1b0a: 0xbbd1, 0x1b0b: 0x10e1, + 0x1b0c: 0x10f9, 0x1b0d: 0xbbe9, 0x1b0e: 0x2079, 0x1b0f: 0xbc21, 0x1b10: 0xbac9, 0x1b11: 0x1429, + 0x1b12: 0xbb11, 0x1b13: 0x10e1, 0x1b14: 0x1111, 0x1b15: 0x2109, 0x1b16: 0xbab1, 0x1b17: 0x1099, + 0x1b18: 0x10b1, 0x1b19: 0x10c9, 0x1b1a: 0xbac9, 0x1b1b: 0xbae1, 0x1b1c: 0xbaf9, 0x1b1d: 0x1429, + 0x1b1e: 0x1a31, 0x1b1f: 0xbb11, 0x1b20: 0xbb29, 0x1b21: 0xbb41, 0x1b22: 0xbb59, 0x1b23: 0xbb71, + 0x1b24: 0xbb89, 0x1b25: 0x2109, 0x1b26: 0x1111, 0x1b27: 0x1429, 0x1b28: 0xbba1, 0x1b29: 0xbbb9, + 0x1b2a: 0xbbd1, 0x1b2b: 0x10e1, 0x1b2c: 0x10f9, 0x1b2d: 0xbbe9, 0x1b2e: 0x2079, 0x1b2f: 0xbc01, + 0x1b30: 0xbab1, 0x1b31: 0x1099, 0x1b32: 0x10b1, 0x1b33: 0x10c9, 0x1b34: 0xbac9, 0x1b35: 0xbae1, + 0x1b36: 0xbaf9, 0x1b37: 0x1429, 0x1b38: 0x1a31, 0x1b39: 0xbb11, 0x1b3a: 0xbb29, 0x1b3b: 0xbb41, + 0x1b3c: 0xbb59, 0x1b3d: 0xbb71, 0x1b3e: 0xbb89, 0x1b3f: 0x2109, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x1111, 0x1b41: 0xbba1, 0x1b42: 0xbba1, 0x1b43: 0xbbb9, 0x1b44: 0xbbd1, 0x1b45: 0x10e1, + 0x1b46: 0x10f9, 0x1b47: 0xbbe9, 0x1b48: 0x2079, 0x1b49: 0xbc21, 0x1b4a: 0xbac9, 0x1b4b: 0x1429, + 0x1b4c: 0xbb11, 0x1b4d: 0x10e1, 0x1b4e: 0x1111, 0x1b4f: 0x2109, 0x1b50: 0xbab1, 0x1b51: 0x1099, + 0x1b52: 0x10b1, 0x1b53: 0x10c9, 0x1b54: 0xbac9, 0x1b55: 0xbae1, 0x1b56: 0xbaf9, 0x1b57: 0x1429, + 0x1b58: 0x1a31, 0x1b59: 0xbb11, 0x1b5a: 0xbb29, 0x1b5b: 0xbb41, 0x1b5c: 0xbb59, 0x1b5d: 0xbb71, + 0x1b5e: 0xbb89, 0x1b5f: 0x2109, 0x1b60: 0x1111, 0x1b61: 0x1429, 0x1b62: 0xbba1, 0x1b63: 0xbbb9, + 0x1b64: 0xbbd1, 0x1b65: 0x10e1, 0x1b66: 0x10f9, 0x1b67: 0xbbe9, 0x1b68: 0x2079, 0x1b69: 0xbc01, + 0x1b6a: 0xbab1, 0x1b6b: 0x1099, 0x1b6c: 0x10b1, 0x1b6d: 0x10c9, 0x1b6e: 0xbac9, 0x1b6f: 0xbae1, + 0x1b70: 0xbaf9, 0x1b71: 0x1429, 0x1b72: 0x1a31, 0x1b73: 0xbb11, 0x1b74: 0xbb29, 0x1b75: 0xbb41, + 0x1b76: 0xbb59, 0x1b77: 0xbb71, 0x1b78: 0xbb89, 0x1b79: 0x2109, 0x1b7a: 0x1111, 0x1b7b: 0xbba1, + 0x1b7c: 0xbba1, 0x1b7d: 0xbbb9, 0x1b7e: 0xbbd1, 0x1b7f: 0x10e1, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x10f9, 0x1b81: 0xbbe9, 0x1b82: 0x2079, 0x1b83: 0xbc21, 0x1b84: 0xbac9, 0x1b85: 0x1429, + 0x1b86: 0xbb11, 0x1b87: 0x10e1, 0x1b88: 0x1111, 0x1b89: 0x2109, 0x1b8a: 0xbc41, 0x1b8b: 0xbc41, + 0x1b8c: 0x0040, 0x1b8d: 0x0040, 0x1b8e: 0x1f41, 0x1b8f: 0x00c9, 0x1b90: 0x0069, 0x1b91: 0x0079, + 0x1b92: 0x1f51, 0x1b93: 0x1f61, 0x1b94: 0x1f71, 0x1b95: 0x1f81, 0x1b96: 0x1f91, 0x1b97: 0x1fa1, + 0x1b98: 0x1f41, 0x1b99: 0x00c9, 0x1b9a: 0x0069, 0x1b9b: 0x0079, 0x1b9c: 0x1f51, 0x1b9d: 0x1f61, + 0x1b9e: 0x1f71, 0x1b9f: 0x1f81, 0x1ba0: 0x1f91, 0x1ba1: 0x1fa1, 0x1ba2: 0x1f41, 0x1ba3: 0x00c9, + 0x1ba4: 0x0069, 0x1ba5: 0x0079, 0x1ba6: 0x1f51, 0x1ba7: 0x1f61, 0x1ba8: 0x1f71, 0x1ba9: 0x1f81, + 0x1baa: 0x1f91, 0x1bab: 0x1fa1, 0x1bac: 0x1f41, 0x1bad: 0x00c9, 0x1bae: 0x0069, 0x1baf: 0x0079, + 0x1bb0: 0x1f51, 0x1bb1: 0x1f61, 0x1bb2: 0x1f71, 0x1bb3: 0x1f81, 0x1bb4: 0x1f91, 0x1bb5: 0x1fa1, + 0x1bb6: 0x1f41, 0x1bb7: 0x00c9, 0x1bb8: 0x0069, 0x1bb9: 0x0079, 0x1bba: 0x1f51, 0x1bbb: 0x1f61, + 0x1bbc: 0x1f71, 0x1bbd: 0x1f81, 0x1bbe: 0x1f91, 0x1bbf: 0x1fa1, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0xe115, 0x1bc1: 0xe115, 0x1bc2: 0xe135, 0x1bc3: 0xe135, 0x1bc4: 0xe115, 0x1bc5: 0xe115, + 0x1bc6: 0xe175, 0x1bc7: 0xe175, 0x1bc8: 0xe115, 0x1bc9: 0xe115, 0x1bca: 0xe135, 0x1bcb: 0xe135, + 0x1bcc: 0xe115, 0x1bcd: 0xe115, 0x1bce: 0xe1f5, 0x1bcf: 0xe1f5, 0x1bd0: 0xe115, 0x1bd1: 0xe115, + 0x1bd2: 0xe135, 0x1bd3: 0xe135, 0x1bd4: 0xe115, 0x1bd5: 0xe115, 0x1bd6: 0xe175, 0x1bd7: 0xe175, + 0x1bd8: 0xe115, 0x1bd9: 0xe115, 0x1bda: 0xe135, 0x1bdb: 0xe135, 0x1bdc: 0xe115, 0x1bdd: 0xe115, + 0x1bde: 0x8b05, 0x1bdf: 0x8b05, 0x1be0: 0x04b5, 0x1be1: 0x04b5, 0x1be2: 0x0208, 0x1be3: 0x0208, + 0x1be4: 0x0208, 0x1be5: 0x0208, 0x1be6: 0x0208, 0x1be7: 0x0208, 0x1be8: 0x0208, 0x1be9: 0x0208, + 0x1bea: 0x0208, 0x1beb: 0x0208, 0x1bec: 0x0208, 0x1bed: 0x0208, 0x1bee: 0x0208, 0x1bef: 0x0208, + 0x1bf0: 0x0208, 0x1bf1: 0x0208, 0x1bf2: 0x0208, 0x1bf3: 0x0208, 0x1bf4: 0x0208, 0x1bf5: 0x0208, + 0x1bf6: 0x0208, 0x1bf7: 0x0208, 0x1bf8: 0x0208, 0x1bf9: 0x0208, 0x1bfa: 0x0208, 0x1bfb: 0x0208, + 0x1bfc: 0x0208, 0x1bfd: 0x0208, 0x1bfe: 0x0208, 0x1bff: 0x0208, + // Block 0x70, offset 0x1c00 + 0x1c00: 0xb189, 0x1c01: 0xb1a1, 0x1c02: 0xb201, 0x1c03: 0xb249, 0x1c04: 0x0040, 0x1c05: 0xb411, + 0x1c06: 0xb291, 0x1c07: 0xb219, 0x1c08: 0xb309, 0x1c09: 0xb429, 0x1c0a: 0xb399, 0x1c0b: 0xb3b1, + 0x1c0c: 0xb3c9, 0x1c0d: 0xb3e1, 0x1c0e: 0xb2a9, 0x1c0f: 0xb339, 0x1c10: 0xb369, 0x1c11: 0xb2d9, + 0x1c12: 0xb381, 0x1c13: 0xb279, 0x1c14: 0xb2c1, 0x1c15: 0xb1d1, 0x1c16: 0xb1e9, 0x1c17: 0xb231, + 0x1c18: 0xb261, 0x1c19: 0xb2f1, 0x1c1a: 0xb321, 0x1c1b: 0xb351, 0x1c1c: 0xbc59, 0x1c1d: 0x7949, + 0x1c1e: 0xbc71, 0x1c1f: 0xbc89, 0x1c20: 0x0040, 0x1c21: 0xb1a1, 0x1c22: 0xb201, 0x1c23: 0x0040, + 0x1c24: 0xb3f9, 0x1c25: 0x0040, 0x1c26: 0x0040, 0x1c27: 0xb219, 0x1c28: 0x0040, 0x1c29: 0xb429, + 0x1c2a: 0xb399, 0x1c2b: 0xb3b1, 0x1c2c: 0xb3c9, 0x1c2d: 0xb3e1, 0x1c2e: 0xb2a9, 0x1c2f: 0xb339, + 0x1c30: 0xb369, 0x1c31: 0xb2d9, 0x1c32: 0xb381, 0x1c33: 0x0040, 0x1c34: 0xb2c1, 0x1c35: 0xb1d1, + 0x1c36: 0xb1e9, 0x1c37: 0xb231, 0x1c38: 0x0040, 0x1c39: 0xb2f1, 0x1c3a: 0x0040, 0x1c3b: 0xb351, + 0x1c3c: 0x0040, 0x1c3d: 0x0040, 0x1c3e: 0x0040, 0x1c3f: 0x0040, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x0040, 0x1c41: 0x0040, 0x1c42: 0xb201, 0x1c43: 0x0040, 0x1c44: 0x0040, 0x1c45: 0x0040, + 0x1c46: 0x0040, 0x1c47: 0xb219, 0x1c48: 0x0040, 0x1c49: 0xb429, 0x1c4a: 0x0040, 0x1c4b: 0xb3b1, + 0x1c4c: 0x0040, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0x0040, 0x1c51: 0xb2d9, + 0x1c52: 0xb381, 0x1c53: 0x0040, 0x1c54: 0xb2c1, 0x1c55: 0x0040, 0x1c56: 0x0040, 0x1c57: 0xb231, + 0x1c58: 0x0040, 0x1c59: 0xb2f1, 0x1c5a: 0x0040, 0x1c5b: 0xb351, 0x1c5c: 0x0040, 0x1c5d: 0x7949, + 0x1c5e: 0x0040, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, + 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0xb309, 0x1c69: 0xb429, + 0x1c6a: 0xb399, 0x1c6b: 0x0040, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, + 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, + 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0xb321, 0x1c7b: 0xb351, + 0x1c7c: 0xbc59, 0x1c7d: 0x0040, 0x1c7e: 0xbc71, 0x1c7f: 0x0040, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0xb3f9, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x0040, + 0x1c9e: 0x0040, 0x1c9f: 0x0040, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0xb249, + 0x1ca4: 0x0040, 0x1ca5: 0xb411, 0x1ca6: 0xb291, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, + 0x1caa: 0x0040, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0xb279, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0xb261, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0xbca2, 0x1cc2: 0xbcba, 0x1cc3: 0xbcd2, 0x1cc4: 0xbcea, 0x1cc5: 0xbd02, + 0x1cc6: 0xbd1a, 0x1cc7: 0xbd32, 0x1cc8: 0xbd4a, 0x1cc9: 0xbd62, 0x1cca: 0xbd7a, 0x1ccb: 0x0018, + 0x1ccc: 0x0018, 0x1ccd: 0x0040, 0x1cce: 0x0040, 0x1ccf: 0x0040, 0x1cd0: 0xbd92, 0x1cd1: 0xbdb2, + 0x1cd2: 0xbdd2, 0x1cd3: 0xbdf2, 0x1cd4: 0xbe12, 0x1cd5: 0xbe32, 0x1cd6: 0xbe52, 0x1cd7: 0xbe72, + 0x1cd8: 0xbe92, 0x1cd9: 0xbeb2, 0x1cda: 0xbed2, 0x1cdb: 0xbef2, 0x1cdc: 0xbf12, 0x1cdd: 0xbf32, + 0x1cde: 0xbf52, 0x1cdf: 0xbf72, 0x1ce0: 0xbf92, 0x1ce1: 0xbfb2, 0x1ce2: 0xbfd2, 0x1ce3: 0xbff2, + 0x1ce4: 0xc012, 0x1ce5: 0xc032, 0x1ce6: 0xc052, 0x1ce7: 0xc072, 0x1ce8: 0xc092, 0x1ce9: 0xc0b2, + 0x1cea: 0xc0d1, 0x1ceb: 0x1159, 0x1cec: 0x0269, 0x1ced: 0x6671, 0x1cee: 0xc111, 0x1cef: 0x0040, + 0x1cf0: 0x0039, 0x1cf1: 0x0ee9, 0x1cf2: 0x1159, 0x1cf3: 0x0ef9, 0x1cf4: 0x0f09, 0x1cf5: 0x1199, + 0x1cf6: 0x0f31, 0x1cf7: 0x0249, 0x1cf8: 0x0f41, 0x1cf9: 0x0259, 0x1cfa: 0x0f51, 0x1cfb: 0x0359, + 0x1cfc: 0x0f61, 0x1cfd: 0x0f71, 0x1cfe: 0x00d9, 0x1cff: 0x0f99, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x2039, 0x1d01: 0x0269, 0x1d02: 0x01d9, 0x1d03: 0x0fa9, 0x1d04: 0x0fb9, 0x1d05: 0x1089, + 0x1d06: 0x0279, 0x1d07: 0x0369, 0x1d08: 0x0289, 0x1d09: 0x13d1, 0x1d0a: 0xc129, 0x1d0b: 0x65b1, + 0x1d0c: 0xc141, 0x1d0d: 0x1441, 0x1d0e: 0xc159, 0x1d0f: 0xc179, 0x1d10: 0x0018, 0x1d11: 0x0018, + 0x1d12: 0x0018, 0x1d13: 0x0018, 0x1d14: 0x0018, 0x1d15: 0x0018, 0x1d16: 0x0018, 0x1d17: 0x0018, + 0x1d18: 0x0018, 0x1d19: 0x0018, 0x1d1a: 0x0018, 0x1d1b: 0x0018, 0x1d1c: 0x0018, 0x1d1d: 0x0018, + 0x1d1e: 0x0018, 0x1d1f: 0x0018, 0x1d20: 0x0018, 0x1d21: 0x0018, 0x1d22: 0x0018, 0x1d23: 0x0018, + 0x1d24: 0x0018, 0x1d25: 0x0018, 0x1d26: 0x0018, 0x1d27: 0x0018, 0x1d28: 0x0018, 0x1d29: 0x0018, + 0x1d2a: 0xc191, 0x1d2b: 0xc1a9, 0x1d2c: 0x0040, 0x1d2d: 0x0040, 0x1d2e: 0x0040, 0x1d2f: 0x0040, + 0x1d30: 0x0018, 0x1d31: 0x0018, 0x1d32: 0x0018, 0x1d33: 0x0018, 0x1d34: 0x0018, 0x1d35: 0x0018, + 0x1d36: 0x0018, 0x1d37: 0x0018, 0x1d38: 0x0018, 0x1d39: 0x0018, 0x1d3a: 0x0018, 0x1d3b: 0x0018, + 0x1d3c: 0x0018, 0x1d3d: 0x0018, 0x1d3e: 0x0018, 0x1d3f: 0x0018, + // Block 0x75, offset 0x1d40 + 0x1d40: 0xc1d9, 0x1d41: 0xc211, 0x1d42: 0xc249, 0x1d43: 0x0040, 0x1d44: 0x0040, 0x1d45: 0x0040, + 0x1d46: 0x0040, 0x1d47: 0x0040, 0x1d48: 0x0040, 0x1d49: 0x0040, 0x1d4a: 0x0040, 0x1d4b: 0x0040, + 0x1d4c: 0x0040, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xc269, 0x1d51: 0xc289, + 0x1d52: 0xc2a9, 0x1d53: 0xc2c9, 0x1d54: 0xc2e9, 0x1d55: 0xc309, 0x1d56: 0xc329, 0x1d57: 0xc349, + 0x1d58: 0xc369, 0x1d59: 0xc389, 0x1d5a: 0xc3a9, 0x1d5b: 0xc3c9, 0x1d5c: 0xc3e9, 0x1d5d: 0xc409, + 0x1d5e: 0xc429, 0x1d5f: 0xc449, 0x1d60: 0xc469, 0x1d61: 0xc489, 0x1d62: 0xc4a9, 0x1d63: 0xc4c9, + 0x1d64: 0xc4e9, 0x1d65: 0xc509, 0x1d66: 0xc529, 0x1d67: 0xc549, 0x1d68: 0xc569, 0x1d69: 0xc589, + 0x1d6a: 0xc5a9, 0x1d6b: 0xc5c9, 0x1d6c: 0xc5e9, 0x1d6d: 0xc609, 0x1d6e: 0xc629, 0x1d6f: 0xc649, + 0x1d70: 0xc669, 0x1d71: 0xc689, 0x1d72: 0xc6a9, 0x1d73: 0xc6c9, 0x1d74: 0xc6e9, 0x1d75: 0xc709, + 0x1d76: 0xc729, 0x1d77: 0xc749, 0x1d78: 0xc769, 0x1d79: 0xc789, 0x1d7a: 0xc7a9, 0x1d7b: 0xc7c9, + 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, + // Block 0x76, offset 0x1d80 + 0x1d80: 0xcaf9, 0x1d81: 0xcb19, 0x1d82: 0xcb39, 0x1d83: 0x8b1d, 0x1d84: 0xcb59, 0x1d85: 0xcb79, + 0x1d86: 0xcb99, 0x1d87: 0xcbb9, 0x1d88: 0xcbd9, 0x1d89: 0xcbf9, 0x1d8a: 0xcc19, 0x1d8b: 0xcc39, + 0x1d8c: 0xcc59, 0x1d8d: 0x8b3d, 0x1d8e: 0xcc79, 0x1d8f: 0xcc99, 0x1d90: 0xccb9, 0x1d91: 0xccd9, + 0x1d92: 0x8b5d, 0x1d93: 0xccf9, 0x1d94: 0xcd19, 0x1d95: 0xc429, 0x1d96: 0x8b7d, 0x1d97: 0xcd39, + 0x1d98: 0xcd59, 0x1d99: 0xcd79, 0x1d9a: 0xcd99, 0x1d9b: 0xcdb9, 0x1d9c: 0x8b9d, 0x1d9d: 0xcdd9, + 0x1d9e: 0xcdf9, 0x1d9f: 0xce19, 0x1da0: 0xce39, 0x1da1: 0xce59, 0x1da2: 0xc789, 0x1da3: 0xce79, + 0x1da4: 0xce99, 0x1da5: 0xceb9, 0x1da6: 0xced9, 0x1da7: 0xcef9, 0x1da8: 0xcf19, 0x1da9: 0xcf39, + 0x1daa: 0xcf59, 0x1dab: 0xcf79, 0x1dac: 0xcf99, 0x1dad: 0xcfb9, 0x1dae: 0xcfd9, 0x1daf: 0xcff9, + 0x1db0: 0xd019, 0x1db1: 0xd039, 0x1db2: 0xd039, 0x1db3: 0xd039, 0x1db4: 0x8bbd, 0x1db5: 0xd059, + 0x1db6: 0xd079, 0x1db7: 0xd099, 0x1db8: 0x8bdd, 0x1db9: 0xd0b9, 0x1dba: 0xd0d9, 0x1dbb: 0xd0f9, + 0x1dbc: 0xd119, 0x1dbd: 0xd139, 0x1dbe: 0xd159, 0x1dbf: 0xd179, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xd199, 0x1dc1: 0xd1b9, 0x1dc2: 0xd1d9, 0x1dc3: 0xd1f9, 0x1dc4: 0xd219, 0x1dc5: 0xd239, + 0x1dc6: 0xd239, 0x1dc7: 0xd259, 0x1dc8: 0xd279, 0x1dc9: 0xd299, 0x1dca: 0xd2b9, 0x1dcb: 0xd2d9, + 0x1dcc: 0xd2f9, 0x1dcd: 0xd319, 0x1dce: 0xd339, 0x1dcf: 0xd359, 0x1dd0: 0xd379, 0x1dd1: 0xd399, + 0x1dd2: 0xd3b9, 0x1dd3: 0xd3d9, 0x1dd4: 0xd3f9, 0x1dd5: 0xd419, 0x1dd6: 0xd439, 0x1dd7: 0xd459, + 0x1dd8: 0xd479, 0x1dd9: 0x8bfd, 0x1dda: 0xd499, 0x1ddb: 0xd4b9, 0x1ddc: 0xd4d9, 0x1ddd: 0xc309, + 0x1dde: 0xd4f9, 0x1ddf: 0xd519, 0x1de0: 0x8c1d, 0x1de1: 0x8c3d, 0x1de2: 0xd539, 0x1de3: 0xd559, + 0x1de4: 0xd579, 0x1de5: 0xd599, 0x1de6: 0xd5b9, 0x1de7: 0xd5d9, 0x1de8: 0x0040, 0x1de9: 0xd5f9, + 0x1dea: 0xd619, 0x1deb: 0xd619, 0x1dec: 0x8c5d, 0x1ded: 0xd639, 0x1dee: 0xd659, 0x1def: 0xd679, + 0x1df0: 0xd699, 0x1df1: 0x8c7d, 0x1df2: 0xd6b9, 0x1df3: 0xd6d9, 0x1df4: 0x0040, 0x1df5: 0xd6f9, + 0x1df6: 0xd719, 0x1df7: 0xd739, 0x1df8: 0xd759, 0x1df9: 0xd779, 0x1dfa: 0xd799, 0x1dfb: 0x8c9d, + 0x1dfc: 0xd7b9, 0x1dfd: 0x8cbd, 0x1dfe: 0xd7d9, 0x1dff: 0xd7f9, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xd819, 0x1e01: 0xd839, 0x1e02: 0xd859, 0x1e03: 0xd879, 0x1e04: 0xd899, 0x1e05: 0xd8b9, + 0x1e06: 0xd8d9, 0x1e07: 0xd8f9, 0x1e08: 0xd919, 0x1e09: 0x8cdd, 0x1e0a: 0xd939, 0x1e0b: 0xd959, + 0x1e0c: 0xd979, 0x1e0d: 0xd999, 0x1e0e: 0xd9b9, 0x1e0f: 0x8cfd, 0x1e10: 0xd9d9, 0x1e11: 0x8d1d, + 0x1e12: 0x8d3d, 0x1e13: 0xd9f9, 0x1e14: 0xda19, 0x1e15: 0xda19, 0x1e16: 0xda39, 0x1e17: 0x8d5d, + 0x1e18: 0x8d7d, 0x1e19: 0xda59, 0x1e1a: 0xda79, 0x1e1b: 0xda99, 0x1e1c: 0xdab9, 0x1e1d: 0xdad9, + 0x1e1e: 0xdaf9, 0x1e1f: 0xdb19, 0x1e20: 0xdb39, 0x1e21: 0xdb59, 0x1e22: 0xdb79, 0x1e23: 0xdb99, + 0x1e24: 0x8d9d, 0x1e25: 0xdbb9, 0x1e26: 0xdbd9, 0x1e27: 0xdbf9, 0x1e28: 0xdc19, 0x1e29: 0xdbf9, + 0x1e2a: 0xdc39, 0x1e2b: 0xdc59, 0x1e2c: 0xdc79, 0x1e2d: 0xdc99, 0x1e2e: 0xdcb9, 0x1e2f: 0xdcd9, + 0x1e30: 0xdcf9, 0x1e31: 0xdd19, 0x1e32: 0xdd39, 0x1e33: 0xdd59, 0x1e34: 0xdd79, 0x1e35: 0xdd99, + 0x1e36: 0xddb9, 0x1e37: 0xddd9, 0x1e38: 0x8dbd, 0x1e39: 0xddf9, 0x1e3a: 0xde19, 0x1e3b: 0xde39, + 0x1e3c: 0xde59, 0x1e3d: 0xde79, 0x1e3e: 0x8ddd, 0x1e3f: 0xde99, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xe599, 0x1e41: 0xe5b9, 0x1e42: 0xe5d9, 0x1e43: 0xe5f9, 0x1e44: 0xe619, 0x1e45: 0xe639, + 0x1e46: 0x8efd, 0x1e47: 0xe659, 0x1e48: 0xe679, 0x1e49: 0xe699, 0x1e4a: 0xe6b9, 0x1e4b: 0xe6d9, + 0x1e4c: 0xe6f9, 0x1e4d: 0x8f1d, 0x1e4e: 0xe719, 0x1e4f: 0xe739, 0x1e50: 0x8f3d, 0x1e51: 0x8f5d, + 0x1e52: 0xe759, 0x1e53: 0xe779, 0x1e54: 0xe799, 0x1e55: 0xe7b9, 0x1e56: 0xe7d9, 0x1e57: 0xe7f9, + 0x1e58: 0xe819, 0x1e59: 0xe839, 0x1e5a: 0xe859, 0x1e5b: 0x8f7d, 0x1e5c: 0xe879, 0x1e5d: 0x8f9d, + 0x1e5e: 0xe899, 0x1e5f: 0x0040, 0x1e60: 0xe8b9, 0x1e61: 0xe8d9, 0x1e62: 0xe8f9, 0x1e63: 0x8fbd, + 0x1e64: 0xe919, 0x1e65: 0xe939, 0x1e66: 0x8fdd, 0x1e67: 0x8ffd, 0x1e68: 0xe959, 0x1e69: 0xe979, + 0x1e6a: 0xe999, 0x1e6b: 0xe9b9, 0x1e6c: 0xe9d9, 0x1e6d: 0xe9d9, 0x1e6e: 0xe9f9, 0x1e6f: 0xea19, + 0x1e70: 0xea39, 0x1e71: 0xea59, 0x1e72: 0xea79, 0x1e73: 0xea99, 0x1e74: 0xeab9, 0x1e75: 0x901d, + 0x1e76: 0xead9, 0x1e77: 0x903d, 0x1e78: 0xeaf9, 0x1e79: 0x905d, 0x1e7a: 0xeb19, 0x1e7b: 0x907d, + 0x1e7c: 0x909d, 0x1e7d: 0x90bd, 0x1e7e: 0xeb39, 0x1e7f: 0xeb59, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xeb79, 0x1e81: 0x90dd, 0x1e82: 0x90fd, 0x1e83: 0x911d, 0x1e84: 0x913d, 0x1e85: 0xeb99, + 0x1e86: 0xebb9, 0x1e87: 0xebb9, 0x1e88: 0xebd9, 0x1e89: 0xebf9, 0x1e8a: 0xec19, 0x1e8b: 0xec39, + 0x1e8c: 0xec59, 0x1e8d: 0x915d, 0x1e8e: 0xec79, 0x1e8f: 0xec99, 0x1e90: 0xecb9, 0x1e91: 0xecd9, + 0x1e92: 0x917d, 0x1e93: 0xecf9, 0x1e94: 0x919d, 0x1e95: 0x91bd, 0x1e96: 0xed19, 0x1e97: 0xed39, + 0x1e98: 0xed59, 0x1e99: 0xed79, 0x1e9a: 0xed99, 0x1e9b: 0xedb9, 0x1e9c: 0x91dd, 0x1e9d: 0x91fd, + 0x1e9e: 0x921d, 0x1e9f: 0x0040, 0x1ea0: 0xedd9, 0x1ea1: 0x923d, 0x1ea2: 0xedf9, 0x1ea3: 0xee19, + 0x1ea4: 0xee39, 0x1ea5: 0x925d, 0x1ea6: 0xee59, 0x1ea7: 0xee79, 0x1ea8: 0xee99, 0x1ea9: 0xeeb9, + 0x1eaa: 0xeed9, 0x1eab: 0x927d, 0x1eac: 0xeef9, 0x1ead: 0xef19, 0x1eae: 0xef39, 0x1eaf: 0xef59, + 0x1eb0: 0xef79, 0x1eb1: 0xef99, 0x1eb2: 0x929d, 0x1eb3: 0x92bd, 0x1eb4: 0xefb9, 0x1eb5: 0x92dd, + 0x1eb6: 0xefd9, 0x1eb7: 0x92fd, 0x1eb8: 0xeff9, 0x1eb9: 0xf019, 0x1eba: 0xf039, 0x1ebb: 0x931d, + 0x1ebc: 0x933d, 0x1ebd: 0xf059, 0x1ebe: 0x935d, 0x1ebf: 0xf079, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xf6b9, 0x1ec1: 0xf6d9, 0x1ec2: 0xf6f9, 0x1ec3: 0xf719, 0x1ec4: 0xf739, 0x1ec5: 0x951d, + 0x1ec6: 0xf759, 0x1ec7: 0xf779, 0x1ec8: 0xf799, 0x1ec9: 0xf7b9, 0x1eca: 0xf7d9, 0x1ecb: 0x953d, + 0x1ecc: 0x955d, 0x1ecd: 0xf7f9, 0x1ece: 0xf819, 0x1ecf: 0xf839, 0x1ed0: 0xf859, 0x1ed1: 0xf879, + 0x1ed2: 0xf899, 0x1ed3: 0x957d, 0x1ed4: 0xf8b9, 0x1ed5: 0xf8d9, 0x1ed6: 0xf8f9, 0x1ed7: 0xf919, + 0x1ed8: 0x959d, 0x1ed9: 0x95bd, 0x1eda: 0xf939, 0x1edb: 0xf959, 0x1edc: 0xf979, 0x1edd: 0x95dd, + 0x1ede: 0xf999, 0x1edf: 0xf9b9, 0x1ee0: 0x6815, 0x1ee1: 0x95fd, 0x1ee2: 0xf9d9, 0x1ee3: 0xf9f9, + 0x1ee4: 0xfa19, 0x1ee5: 0x961d, 0x1ee6: 0xfa39, 0x1ee7: 0xfa59, 0x1ee8: 0xfa79, 0x1ee9: 0xfa99, + 0x1eea: 0xfab9, 0x1eeb: 0xfad9, 0x1eec: 0xfaf9, 0x1eed: 0x963d, 0x1eee: 0xfb19, 0x1eef: 0xfb39, + 0x1ef0: 0xfb59, 0x1ef1: 0x965d, 0x1ef2: 0xfb79, 0x1ef3: 0xfb99, 0x1ef4: 0xfbb9, 0x1ef5: 0xfbd9, + 0x1ef6: 0x7b35, 0x1ef7: 0x967d, 0x1ef8: 0xfbf9, 0x1ef9: 0xfc19, 0x1efa: 0xfc39, 0x1efb: 0x969d, + 0x1efc: 0xfc59, 0x1efd: 0x96bd, 0x1efe: 0xfc79, 0x1eff: 0xfc79, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xfc99, 0x1f01: 0x96dd, 0x1f02: 0xfcb9, 0x1f03: 0xfcd9, 0x1f04: 0xfcf9, 0x1f05: 0xfd19, + 0x1f06: 0xfd39, 0x1f07: 0xfd59, 0x1f08: 0xfd79, 0x1f09: 0x96fd, 0x1f0a: 0xfd99, 0x1f0b: 0xfdb9, + 0x1f0c: 0xfdd9, 0x1f0d: 0xfdf9, 0x1f0e: 0xfe19, 0x1f0f: 0xfe39, 0x1f10: 0x971d, 0x1f11: 0xfe59, + 0x1f12: 0x973d, 0x1f13: 0x975d, 0x1f14: 0x977d, 0x1f15: 0xfe79, 0x1f16: 0xfe99, 0x1f17: 0xfeb9, + 0x1f18: 0xfed9, 0x1f19: 0xfef9, 0x1f1a: 0xff19, 0x1f1b: 0xff39, 0x1f1c: 0xff59, 0x1f1d: 0x979d, + 0x1f1e: 0x0040, 0x1f1f: 0x0040, 0x1f20: 0x0040, 0x1f21: 0x0040, 0x1f22: 0x0040, 0x1f23: 0x0040, + 0x1f24: 0x0040, 0x1f25: 0x0040, 0x1f26: 0x0040, 0x1f27: 0x0040, 0x1f28: 0x0040, 0x1f29: 0x0040, + 0x1f2a: 0x0040, 0x1f2b: 0x0040, 0x1f2c: 0x0040, 0x1f2d: 0x0040, 0x1f2e: 0x0040, 0x1f2f: 0x0040, + 0x1f30: 0x0040, 0x1f31: 0x0040, 0x1f32: 0x0040, 0x1f33: 0x0040, 0x1f34: 0x0040, 0x1f35: 0x0040, + 0x1f36: 0x0040, 0x1f37: 0x0040, 0x1f38: 0x0040, 0x1f39: 0x0040, 0x1f3a: 0x0040, 0x1f3b: 0x0040, + 0x1f3c: 0x0040, 0x1f3d: 0x0040, 0x1f3e: 0x0040, 0x1f3f: 0x0040, +} + +// idnaIndex: 35 blocks, 2240 entries, 4480 bytes +// Block 0 is the zero block. +var idnaIndex = [2240]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7b, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7c, 0xca: 0x7d, 0xcb: 0x07, 0xcc: 0x7e, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x7f, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x80, 0xd6: 0x81, 0xd7: 0x82, + 0xd8: 0x0f, 0xd9: 0x83, 0xda: 0x84, 0xdb: 0x10, 0xdc: 0x11, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, + // Block 0x4, offset 0x100 + 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x12, 0x126: 0x13, 0x127: 0x14, + 0x128: 0x15, 0x129: 0x16, 0x12a: 0x17, 0x12b: 0x18, 0x12c: 0x19, 0x12d: 0x1a, 0x12e: 0x1b, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1c, 0x132: 0x1d, 0x133: 0x1e, 0x134: 0x8f, 0x135: 0x1f, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x20, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x21, 0x13e: 0x22, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9b, 0x147: 0x9b, + 0x148: 0x9d, 0x149: 0x9e, 0x14a: 0x9f, 0x14b: 0xa0, 0x14c: 0xa1, 0x14d: 0xa2, 0x14e: 0xa3, 0x14f: 0xa4, + 0x150: 0xa5, 0x151: 0x9d, 0x152: 0x9d, 0x153: 0x9d, 0x154: 0x9d, 0x155: 0x9d, 0x156: 0x9d, 0x157: 0x9d, + 0x158: 0x9d, 0x159: 0xa6, 0x15a: 0xa7, 0x15b: 0xa8, 0x15c: 0xa9, 0x15d: 0xaa, 0x15e: 0xab, 0x15f: 0xac, + 0x160: 0xad, 0x161: 0xae, 0x162: 0xaf, 0x163: 0xb0, 0x164: 0xb1, 0x165: 0xb2, 0x166: 0xb3, 0x167: 0xb4, + 0x168: 0xb5, 0x169: 0xb6, 0x16a: 0xb7, 0x16b: 0xb8, 0x16c: 0xb9, 0x16d: 0xba, 0x16e: 0xbb, 0x16f: 0xbc, + 0x170: 0xbd, 0x171: 0xbe, 0x172: 0xbf, 0x173: 0xc0, 0x174: 0x23, 0x175: 0x24, 0x176: 0x25, 0x177: 0xc1, + 0x178: 0x26, 0x179: 0x26, 0x17a: 0x27, 0x17b: 0x26, 0x17c: 0xc2, 0x17d: 0x28, 0x17e: 0x29, 0x17f: 0x2a, + // Block 0x6, offset 0x180 + 0x180: 0x2b, 0x181: 0x2c, 0x182: 0x2d, 0x183: 0xc3, 0x184: 0x2e, 0x185: 0x2f, 0x186: 0xc4, 0x187: 0x9b, + 0x188: 0xc5, 0x189: 0xc6, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc7, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xc8, + 0x190: 0xc9, 0x191: 0x30, 0x192: 0x31, 0x193: 0x32, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xca, 0x1a9: 0xcb, 0x1aa: 0x9b, 0x1ab: 0xcc, 0x1ac: 0x9b, 0x1ad: 0xcd, 0x1ae: 0xce, 0x1af: 0xcf, + 0x1b0: 0xd0, 0x1b1: 0x33, 0x1b2: 0x26, 0x1b3: 0x34, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4, + 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x35, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x36, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x37, 0x1c6: 0x38, 0x1c7: 0xe0, + 0x1c8: 0xe1, 0x1c9: 0x39, 0x1ca: 0x3a, 0x1cb: 0x3b, 0x1cc: 0x3c, 0x1cd: 0x3d, 0x1ce: 0x3e, 0x1cf: 0x3f, + 0x1d0: 0x9d, 0x1d1: 0x9d, 0x1d2: 0x9d, 0x1d3: 0x9d, 0x1d4: 0x9d, 0x1d5: 0x9d, 0x1d6: 0x9d, 0x1d7: 0x9d, + 0x1d8: 0x9d, 0x1d9: 0x9d, 0x1da: 0x9d, 0x1db: 0x9d, 0x1dc: 0x9d, 0x1dd: 0x9d, 0x1de: 0x9d, 0x1df: 0x9d, + 0x1e0: 0x9d, 0x1e1: 0x9d, 0x1e2: 0x9d, 0x1e3: 0x9d, 0x1e4: 0x9d, 0x1e5: 0x9d, 0x1e6: 0x9d, 0x1e7: 0x9d, + 0x1e8: 0x9d, 0x1e9: 0x9d, 0x1ea: 0x9d, 0x1eb: 0x9d, 0x1ec: 0x9d, 0x1ed: 0x9d, 0x1ee: 0x9d, 0x1ef: 0x9d, + 0x1f0: 0x9d, 0x1f1: 0x9d, 0x1f2: 0x9d, 0x1f3: 0x9d, 0x1f4: 0x9d, 0x1f5: 0x9d, 0x1f6: 0x9d, 0x1f7: 0x9d, + 0x1f8: 0x9d, 0x1f9: 0x9d, 0x1fa: 0x9d, 0x1fb: 0x9d, 0x1fc: 0x9d, 0x1fd: 0x9d, 0x1fe: 0x9d, 0x1ff: 0x9d, + // Block 0x8, offset 0x200 + 0x200: 0x9d, 0x201: 0x9d, 0x202: 0x9d, 0x203: 0x9d, 0x204: 0x9d, 0x205: 0x9d, 0x206: 0x9d, 0x207: 0x9d, + 0x208: 0x9d, 0x209: 0x9d, 0x20a: 0x9d, 0x20b: 0x9d, 0x20c: 0x9d, 0x20d: 0x9d, 0x20e: 0x9d, 0x20f: 0x9d, + 0x210: 0x9d, 0x211: 0x9d, 0x212: 0x9d, 0x213: 0x9d, 0x214: 0x9d, 0x215: 0x9d, 0x216: 0x9d, 0x217: 0x9d, + 0x218: 0x9d, 0x219: 0x9d, 0x21a: 0x9d, 0x21b: 0x9d, 0x21c: 0x9d, 0x21d: 0x9d, 0x21e: 0x9d, 0x21f: 0x9d, + 0x220: 0x9d, 0x221: 0x9d, 0x222: 0x9d, 0x223: 0x9d, 0x224: 0x9d, 0x225: 0x9d, 0x226: 0x9d, 0x227: 0x9d, + 0x228: 0x9d, 0x229: 0x9d, 0x22a: 0x9d, 0x22b: 0x9d, 0x22c: 0x9d, 0x22d: 0x9d, 0x22e: 0x9d, 0x22f: 0x9d, + 0x230: 0x9d, 0x231: 0x9d, 0x232: 0x9d, 0x233: 0x9d, 0x234: 0x9d, 0x235: 0x9d, 0x236: 0xb0, 0x237: 0x9b, + 0x238: 0x9d, 0x239: 0x9d, 0x23a: 0x9d, 0x23b: 0x9d, 0x23c: 0x9d, 0x23d: 0x9d, 0x23e: 0x9d, 0x23f: 0x9d, + // Block 0x9, offset 0x240 + 0x240: 0x9d, 0x241: 0x9d, 0x242: 0x9d, 0x243: 0x9d, 0x244: 0x9d, 0x245: 0x9d, 0x246: 0x9d, 0x247: 0x9d, + 0x248: 0x9d, 0x249: 0x9d, 0x24a: 0x9d, 0x24b: 0x9d, 0x24c: 0x9d, 0x24d: 0x9d, 0x24e: 0x9d, 0x24f: 0x9d, + 0x250: 0x9d, 0x251: 0x9d, 0x252: 0x9d, 0x253: 0x9d, 0x254: 0x9d, 0x255: 0x9d, 0x256: 0x9d, 0x257: 0x9d, + 0x258: 0x9d, 0x259: 0x9d, 0x25a: 0x9d, 0x25b: 0x9d, 0x25c: 0x9d, 0x25d: 0x9d, 0x25e: 0x9d, 0x25f: 0x9d, + 0x260: 0x9d, 0x261: 0x9d, 0x262: 0x9d, 0x263: 0x9d, 0x264: 0x9d, 0x265: 0x9d, 0x266: 0x9d, 0x267: 0x9d, + 0x268: 0x9d, 0x269: 0x9d, 0x26a: 0x9d, 0x26b: 0x9d, 0x26c: 0x9d, 0x26d: 0x9d, 0x26e: 0x9d, 0x26f: 0x9d, + 0x270: 0x9d, 0x271: 0x9d, 0x272: 0x9d, 0x273: 0x9d, 0x274: 0x9d, 0x275: 0x9d, 0x276: 0x9d, 0x277: 0x9d, + 0x278: 0x9d, 0x279: 0x9d, 0x27a: 0x9d, 0x27b: 0x9d, 0x27c: 0x9d, 0x27d: 0x9d, 0x27e: 0x9d, 0x27f: 0x9d, + // Block 0xa, offset 0x280 + 0x280: 0x9d, 0x281: 0x9d, 0x282: 0x9d, 0x283: 0x9d, 0x284: 0x9d, 0x285: 0x9d, 0x286: 0x9d, 0x287: 0x9d, + 0x288: 0x9d, 0x289: 0x9d, 0x28a: 0x9d, 0x28b: 0x9d, 0x28c: 0x9d, 0x28d: 0x9d, 0x28e: 0x9d, 0x28f: 0x9d, + 0x290: 0x9d, 0x291: 0x9d, 0x292: 0x9d, 0x293: 0x9d, 0x294: 0x9d, 0x295: 0x9d, 0x296: 0x9d, 0x297: 0x9d, + 0x298: 0x9d, 0x299: 0x9d, 0x29a: 0x9d, 0x29b: 0x9d, 0x29c: 0x9d, 0x29d: 0x9d, 0x29e: 0x9d, 0x29f: 0x9d, + 0x2a0: 0x9d, 0x2a1: 0x9d, 0x2a2: 0x9d, 0x2a3: 0x9d, 0x2a4: 0x9d, 0x2a5: 0x9d, 0x2a6: 0x9d, 0x2a7: 0x9d, + 0x2a8: 0x9d, 0x2a9: 0x9d, 0x2aa: 0x9d, 0x2ab: 0x9d, 0x2ac: 0x9d, 0x2ad: 0x9d, 0x2ae: 0x9d, 0x2af: 0x9d, + 0x2b0: 0x9d, 0x2b1: 0x9d, 0x2b2: 0x9d, 0x2b3: 0x9d, 0x2b4: 0x9d, 0x2b5: 0x9d, 0x2b6: 0x9d, 0x2b7: 0x9d, + 0x2b8: 0x9d, 0x2b9: 0x9d, 0x2ba: 0x9d, 0x2bb: 0x9d, 0x2bc: 0x9d, 0x2bd: 0x9d, 0x2be: 0x9d, 0x2bf: 0xe2, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9d, 0x2c1: 0x9d, 0x2c2: 0x9d, 0x2c3: 0x9d, 0x2c4: 0x9d, 0x2c5: 0x9d, 0x2c6: 0x9d, 0x2c7: 0x9d, + 0x2c8: 0x9d, 0x2c9: 0x9d, 0x2ca: 0x9d, 0x2cb: 0x9d, 0x2cc: 0x9d, 0x2cd: 0x9d, 0x2ce: 0x9d, 0x2cf: 0x9d, + 0x2d0: 0x9d, 0x2d1: 0x9d, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9d, 0x2d5: 0x9d, 0x2d6: 0x9d, 0x2d7: 0x9d, + 0x2d8: 0xe5, 0x2d9: 0x40, 0x2da: 0x41, 0x2db: 0xe6, 0x2dc: 0x42, 0x2dd: 0x43, 0x2de: 0x44, 0x2df: 0xe7, + 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef, + 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7, + 0x2f0: 0x9d, 0x2f1: 0x9d, 0x2f2: 0x9d, 0x2f3: 0x9d, 0x2f4: 0x9d, 0x2f5: 0x9d, 0x2f6: 0x9d, 0x2f7: 0x9d, + 0x2f8: 0x9d, 0x2f9: 0x9d, 0x2fa: 0x9d, 0x2fb: 0x9d, 0x2fc: 0x9d, 0x2fd: 0x9d, 0x2fe: 0x9d, 0x2ff: 0x9d, + // Block 0xc, offset 0x300 + 0x300: 0x9d, 0x301: 0x9d, 0x302: 0x9d, 0x303: 0x9d, 0x304: 0x9d, 0x305: 0x9d, 0x306: 0x9d, 0x307: 0x9d, + 0x308: 0x9d, 0x309: 0x9d, 0x30a: 0x9d, 0x30b: 0x9d, 0x30c: 0x9d, 0x30d: 0x9d, 0x30e: 0x9d, 0x30f: 0x9d, + 0x310: 0x9d, 0x311: 0x9d, 0x312: 0x9d, 0x313: 0x9d, 0x314: 0x9d, 0x315: 0x9d, 0x316: 0x9d, 0x317: 0x9d, + 0x318: 0x9d, 0x319: 0x9d, 0x31a: 0x9d, 0x31b: 0x9d, 0x31c: 0x9d, 0x31d: 0x9d, 0x31e: 0xf8, 0x31f: 0xf9, + // Block 0xd, offset 0x340 + 0x340: 0xb8, 0x341: 0xb8, 0x342: 0xb8, 0x343: 0xb8, 0x344: 0xb8, 0x345: 0xb8, 0x346: 0xb8, 0x347: 0xb8, + 0x348: 0xb8, 0x349: 0xb8, 0x34a: 0xb8, 0x34b: 0xb8, 0x34c: 0xb8, 0x34d: 0xb8, 0x34e: 0xb8, 0x34f: 0xb8, + 0x350: 0xb8, 0x351: 0xb8, 0x352: 0xb8, 0x353: 0xb8, 0x354: 0xb8, 0x355: 0xb8, 0x356: 0xb8, 0x357: 0xb8, + 0x358: 0xb8, 0x359: 0xb8, 0x35a: 0xb8, 0x35b: 0xb8, 0x35c: 0xb8, 0x35d: 0xb8, 0x35e: 0xb8, 0x35f: 0xb8, + 0x360: 0xb8, 0x361: 0xb8, 0x362: 0xb8, 0x363: 0xb8, 0x364: 0xb8, 0x365: 0xb8, 0x366: 0xb8, 0x367: 0xb8, + 0x368: 0xb8, 0x369: 0xb8, 0x36a: 0xb8, 0x36b: 0xb8, 0x36c: 0xb8, 0x36d: 0xb8, 0x36e: 0xb8, 0x36f: 0xb8, + 0x370: 0xb8, 0x371: 0xb8, 0x372: 0xb8, 0x373: 0xb8, 0x374: 0xb8, 0x375: 0xb8, 0x376: 0xb8, 0x377: 0xb8, + 0x378: 0xb8, 0x379: 0xb8, 0x37a: 0xb8, 0x37b: 0xb8, 0x37c: 0xb8, 0x37d: 0xb8, 0x37e: 0xb8, 0x37f: 0xb8, + // Block 0xe, offset 0x380 + 0x380: 0xb8, 0x381: 0xb8, 0x382: 0xb8, 0x383: 0xb8, 0x384: 0xb8, 0x385: 0xb8, 0x386: 0xb8, 0x387: 0xb8, + 0x388: 0xb8, 0x389: 0xb8, 0x38a: 0xb8, 0x38b: 0xb8, 0x38c: 0xb8, 0x38d: 0xb8, 0x38e: 0xb8, 0x38f: 0xb8, + 0x390: 0xb8, 0x391: 0xb8, 0x392: 0xb8, 0x393: 0xb8, 0x394: 0xb8, 0x395: 0xb8, 0x396: 0xb8, 0x397: 0xb8, + 0x398: 0xb8, 0x399: 0xb8, 0x39a: 0xb8, 0x39b: 0xb8, 0x39c: 0xb8, 0x39d: 0xb8, 0x39e: 0xb8, 0x39f: 0xb8, + 0x3a0: 0xb8, 0x3a1: 0xb8, 0x3a2: 0xb8, 0x3a3: 0xb8, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd, + 0x3a8: 0x45, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x46, 0x3ac: 0x47, 0x3ad: 0x48, 0x3ae: 0x49, 0x3af: 0x4a, + 0x3b0: 0x100, 0x3b1: 0x4b, 0x3b2: 0x4c, 0x3b3: 0x4d, 0x3b4: 0x4e, 0x3b5: 0x4f, 0x3b6: 0x101, 0x3b7: 0x50, + 0x3b8: 0x51, 0x3b9: 0x52, 0x3ba: 0x53, 0x3bb: 0x54, 0x3bc: 0x55, 0x3bd: 0x56, 0x3be: 0x57, 0x3bf: 0x58, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9d, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107, + 0x3c8: 0xb8, 0x3c9: 0xb8, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d, + 0x3d0: 0x10e, 0x3d1: 0x9d, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xb8, 0x3d7: 0xb8, + 0x3d8: 0x9d, 0x3d9: 0x9d, 0x3da: 0x9d, 0x3db: 0x9d, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xb8, 0x3df: 0xb8, + 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xb8, 0x3e6: 0x11a, 0x3e7: 0x11b, + 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x59, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5a, 0x3ef: 0xb8, + 0x3f0: 0x9d, 0x3f1: 0x121, 0x3f2: 0x122, 0x3f3: 0x123, 0x3f4: 0xb8, 0x3f5: 0xb8, 0x3f6: 0xb8, 0x3f7: 0xb8, + 0x3f8: 0xb8, 0x3f9: 0x124, 0x3fa: 0xb8, 0x3fb: 0xb8, 0x3fc: 0xb8, 0x3fd: 0xb8, 0x3fe: 0xb8, 0x3ff: 0xb8, + // Block 0x10, offset 0x400 + 0x400: 0x125, 0x401: 0x126, 0x402: 0x127, 0x403: 0x128, 0x404: 0x129, 0x405: 0x12a, 0x406: 0x12b, 0x407: 0x12c, + 0x408: 0x12d, 0x409: 0xb8, 0x40a: 0x12e, 0x40b: 0x12f, 0x40c: 0x5b, 0x40d: 0x5c, 0x40e: 0xb8, 0x40f: 0xb8, + 0x410: 0x130, 0x411: 0x131, 0x412: 0x132, 0x413: 0x133, 0x414: 0xb8, 0x415: 0xb8, 0x416: 0x134, 0x417: 0x135, + 0x418: 0x136, 0x419: 0x137, 0x41a: 0x138, 0x41b: 0x139, 0x41c: 0x13a, 0x41d: 0xb8, 0x41e: 0xb8, 0x41f: 0xb8, + 0x420: 0xb8, 0x421: 0xb8, 0x422: 0x13b, 0x423: 0x13c, 0x424: 0xb8, 0x425: 0xb8, 0x426: 0xb8, 0x427: 0xb8, + 0x428: 0xb8, 0x429: 0xb8, 0x42a: 0xb8, 0x42b: 0x13d, 0x42c: 0xb8, 0x42d: 0xb8, 0x42e: 0xb8, 0x42f: 0xb8, + 0x430: 0x13e, 0x431: 0x13f, 0x432: 0x140, 0x433: 0xb8, 0x434: 0xb8, 0x435: 0xb8, 0x436: 0xb8, 0x437: 0xb8, + 0x438: 0xb8, 0x439: 0xb8, 0x43a: 0xb8, 0x43b: 0xb8, 0x43c: 0xb8, 0x43d: 0xb8, 0x43e: 0xb8, 0x43f: 0xb8, + // Block 0x11, offset 0x440 + 0x440: 0x9d, 0x441: 0x9d, 0x442: 0x9d, 0x443: 0x9d, 0x444: 0x9d, 0x445: 0x9d, 0x446: 0x9d, 0x447: 0x9d, + 0x448: 0x9d, 0x449: 0x9d, 0x44a: 0x9d, 0x44b: 0x9d, 0x44c: 0x9d, 0x44d: 0x9d, 0x44e: 0x141, 0x44f: 0xb8, + 0x450: 0x9b, 0x451: 0x142, 0x452: 0x9d, 0x453: 0x9d, 0x454: 0x9d, 0x455: 0x143, 0x456: 0xb8, 0x457: 0xb8, + 0x458: 0xb8, 0x459: 0xb8, 0x45a: 0xb8, 0x45b: 0xb8, 0x45c: 0xb8, 0x45d: 0xb8, 0x45e: 0xb8, 0x45f: 0xb8, + 0x460: 0xb8, 0x461: 0xb8, 0x462: 0xb8, 0x463: 0xb8, 0x464: 0xb8, 0x465: 0xb8, 0x466: 0xb8, 0x467: 0xb8, + 0x468: 0xb8, 0x469: 0xb8, 0x46a: 0xb8, 0x46b: 0xb8, 0x46c: 0xb8, 0x46d: 0xb8, 0x46e: 0xb8, 0x46f: 0xb8, + 0x470: 0xb8, 0x471: 0xb8, 0x472: 0xb8, 0x473: 0xb8, 0x474: 0xb8, 0x475: 0xb8, 0x476: 0xb8, 0x477: 0xb8, + 0x478: 0xb8, 0x479: 0xb8, 0x47a: 0xb8, 0x47b: 0xb8, 0x47c: 0xb8, 0x47d: 0xb8, 0x47e: 0xb8, 0x47f: 0xb8, + // Block 0x12, offset 0x480 + 0x480: 0x9d, 0x481: 0x9d, 0x482: 0x9d, 0x483: 0x9d, 0x484: 0x9d, 0x485: 0x9d, 0x486: 0x9d, 0x487: 0x9d, + 0x488: 0x9d, 0x489: 0x9d, 0x48a: 0x9d, 0x48b: 0x9d, 0x48c: 0x9d, 0x48d: 0x9d, 0x48e: 0x9d, 0x48f: 0x9d, + 0x490: 0x144, 0x491: 0xb8, 0x492: 0xb8, 0x493: 0xb8, 0x494: 0xb8, 0x495: 0xb8, 0x496: 0xb8, 0x497: 0xb8, + 0x498: 0xb8, 0x499: 0xb8, 0x49a: 0xb8, 0x49b: 0xb8, 0x49c: 0xb8, 0x49d: 0xb8, 0x49e: 0xb8, 0x49f: 0xb8, + 0x4a0: 0xb8, 0x4a1: 0xb8, 0x4a2: 0xb8, 0x4a3: 0xb8, 0x4a4: 0xb8, 0x4a5: 0xb8, 0x4a6: 0xb8, 0x4a7: 0xb8, + 0x4a8: 0xb8, 0x4a9: 0xb8, 0x4aa: 0xb8, 0x4ab: 0xb8, 0x4ac: 0xb8, 0x4ad: 0xb8, 0x4ae: 0xb8, 0x4af: 0xb8, + 0x4b0: 0xb8, 0x4b1: 0xb8, 0x4b2: 0xb8, 0x4b3: 0xb8, 0x4b4: 0xb8, 0x4b5: 0xb8, 0x4b6: 0xb8, 0x4b7: 0xb8, + 0x4b8: 0xb8, 0x4b9: 0xb8, 0x4ba: 0xb8, 0x4bb: 0xb8, 0x4bc: 0xb8, 0x4bd: 0xb8, 0x4be: 0xb8, 0x4bf: 0xb8, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xb8, 0x4c1: 0xb8, 0x4c2: 0xb8, 0x4c3: 0xb8, 0x4c4: 0xb8, 0x4c5: 0xb8, 0x4c6: 0xb8, 0x4c7: 0xb8, + 0x4c8: 0xb8, 0x4c9: 0xb8, 0x4ca: 0xb8, 0x4cb: 0xb8, 0x4cc: 0xb8, 0x4cd: 0xb8, 0x4ce: 0xb8, 0x4cf: 0xb8, + 0x4d0: 0x9d, 0x4d1: 0x9d, 0x4d2: 0x9d, 0x4d3: 0x9d, 0x4d4: 0x9d, 0x4d5: 0x9d, 0x4d6: 0x9d, 0x4d7: 0x9d, + 0x4d8: 0x9d, 0x4d9: 0x145, 0x4da: 0xb8, 0x4db: 0xb8, 0x4dc: 0xb8, 0x4dd: 0xb8, 0x4de: 0xb8, 0x4df: 0xb8, + 0x4e0: 0xb8, 0x4e1: 0xb8, 0x4e2: 0xb8, 0x4e3: 0xb8, 0x4e4: 0xb8, 0x4e5: 0xb8, 0x4e6: 0xb8, 0x4e7: 0xb8, + 0x4e8: 0xb8, 0x4e9: 0xb8, 0x4ea: 0xb8, 0x4eb: 0xb8, 0x4ec: 0xb8, 0x4ed: 0xb8, 0x4ee: 0xb8, 0x4ef: 0xb8, + 0x4f0: 0xb8, 0x4f1: 0xb8, 0x4f2: 0xb8, 0x4f3: 0xb8, 0x4f4: 0xb8, 0x4f5: 0xb8, 0x4f6: 0xb8, 0x4f7: 0xb8, + 0x4f8: 0xb8, 0x4f9: 0xb8, 0x4fa: 0xb8, 0x4fb: 0xb8, 0x4fc: 0xb8, 0x4fd: 0xb8, 0x4fe: 0xb8, 0x4ff: 0xb8, + // Block 0x14, offset 0x500 + 0x500: 0xb8, 0x501: 0xb8, 0x502: 0xb8, 0x503: 0xb8, 0x504: 0xb8, 0x505: 0xb8, 0x506: 0xb8, 0x507: 0xb8, + 0x508: 0xb8, 0x509: 0xb8, 0x50a: 0xb8, 0x50b: 0xb8, 0x50c: 0xb8, 0x50d: 0xb8, 0x50e: 0xb8, 0x50f: 0xb8, + 0x510: 0xb8, 0x511: 0xb8, 0x512: 0xb8, 0x513: 0xb8, 0x514: 0xb8, 0x515: 0xb8, 0x516: 0xb8, 0x517: 0xb8, + 0x518: 0xb8, 0x519: 0xb8, 0x51a: 0xb8, 0x51b: 0xb8, 0x51c: 0xb8, 0x51d: 0xb8, 0x51e: 0xb8, 0x51f: 0xb8, + 0x520: 0x9d, 0x521: 0x9d, 0x522: 0x9d, 0x523: 0x9d, 0x524: 0x9d, 0x525: 0x9d, 0x526: 0x9d, 0x527: 0x9d, + 0x528: 0x13d, 0x529: 0x146, 0x52a: 0xb8, 0x52b: 0x147, 0x52c: 0x148, 0x52d: 0x149, 0x52e: 0x14a, 0x52f: 0xb8, + 0x530: 0xb8, 0x531: 0xb8, 0x532: 0xb8, 0x533: 0xb8, 0x534: 0xb8, 0x535: 0xb8, 0x536: 0xb8, 0x537: 0xb8, + 0x538: 0xb8, 0x539: 0xb8, 0x53a: 0xb8, 0x53b: 0xb8, 0x53c: 0x9d, 0x53d: 0x14b, 0x53e: 0x14c, 0x53f: 0x14d, + // Block 0x15, offset 0x540 + 0x540: 0x9d, 0x541: 0x9d, 0x542: 0x9d, 0x543: 0x9d, 0x544: 0x9d, 0x545: 0x9d, 0x546: 0x9d, 0x547: 0x9d, + 0x548: 0x9d, 0x549: 0x9d, 0x54a: 0x9d, 0x54b: 0x9d, 0x54c: 0x9d, 0x54d: 0x9d, 0x54e: 0x9d, 0x54f: 0x9d, + 0x550: 0x9d, 0x551: 0x9d, 0x552: 0x9d, 0x553: 0x9d, 0x554: 0x9d, 0x555: 0x9d, 0x556: 0x9d, 0x557: 0x9d, + 0x558: 0x9d, 0x559: 0x9d, 0x55a: 0x9d, 0x55b: 0x9d, 0x55c: 0x9d, 0x55d: 0x9d, 0x55e: 0x9d, 0x55f: 0x14e, + 0x560: 0x9d, 0x561: 0x9d, 0x562: 0x9d, 0x563: 0x9d, 0x564: 0x9d, 0x565: 0x9d, 0x566: 0x9d, 0x567: 0x9d, + 0x568: 0x9d, 0x569: 0x9d, 0x56a: 0x9d, 0x56b: 0x14f, 0x56c: 0xb8, 0x56d: 0xb8, 0x56e: 0xb8, 0x56f: 0xb8, + 0x570: 0xb8, 0x571: 0xb8, 0x572: 0xb8, 0x573: 0xb8, 0x574: 0xb8, 0x575: 0xb8, 0x576: 0xb8, 0x577: 0xb8, + 0x578: 0xb8, 0x579: 0xb8, 0x57a: 0xb8, 0x57b: 0xb8, 0x57c: 0xb8, 0x57d: 0xb8, 0x57e: 0xb8, 0x57f: 0xb8, + // Block 0x16, offset 0x580 + 0x580: 0x150, 0x581: 0xb8, 0x582: 0xb8, 0x583: 0xb8, 0x584: 0xb8, 0x585: 0xb8, 0x586: 0xb8, 0x587: 0xb8, + 0x588: 0xb8, 0x589: 0xb8, 0x58a: 0xb8, 0x58b: 0xb8, 0x58c: 0xb8, 0x58d: 0xb8, 0x58e: 0xb8, 0x58f: 0xb8, + 0x590: 0xb8, 0x591: 0xb8, 0x592: 0xb8, 0x593: 0xb8, 0x594: 0xb8, 0x595: 0xb8, 0x596: 0xb8, 0x597: 0xb8, + 0x598: 0xb8, 0x599: 0xb8, 0x59a: 0xb8, 0x59b: 0xb8, 0x59c: 0xb8, 0x59d: 0xb8, 0x59e: 0xb8, 0x59f: 0xb8, + 0x5a0: 0xb8, 0x5a1: 0xb8, 0x5a2: 0xb8, 0x5a3: 0xb8, 0x5a4: 0xb8, 0x5a5: 0xb8, 0x5a6: 0xb8, 0x5a7: 0xb8, + 0x5a8: 0xb8, 0x5a9: 0xb8, 0x5aa: 0xb8, 0x5ab: 0xb8, 0x5ac: 0xb8, 0x5ad: 0xb8, 0x5ae: 0xb8, 0x5af: 0xb8, + 0x5b0: 0x9d, 0x5b1: 0x151, 0x5b2: 0x152, 0x5b3: 0xb8, 0x5b4: 0xb8, 0x5b5: 0xb8, 0x5b6: 0xb8, 0x5b7: 0xb8, + 0x5b8: 0xb8, 0x5b9: 0xb8, 0x5ba: 0xb8, 0x5bb: 0xb8, 0x5bc: 0xb8, 0x5bd: 0xb8, 0x5be: 0xb8, 0x5bf: 0xb8, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x153, 0x5c4: 0x154, 0x5c5: 0x155, 0x5c6: 0x156, 0x5c7: 0x157, + 0x5c8: 0x9b, 0x5c9: 0x158, 0x5ca: 0xb8, 0x5cb: 0xb8, 0x5cc: 0x9b, 0x5cd: 0x159, 0x5ce: 0xb8, 0x5cf: 0xb8, + 0x5d0: 0x5d, 0x5d1: 0x5e, 0x5d2: 0x5f, 0x5d3: 0x60, 0x5d4: 0x61, 0x5d5: 0x62, 0x5d6: 0x63, 0x5d7: 0x64, + 0x5d8: 0x65, 0x5d9: 0x66, 0x5da: 0x67, 0x5db: 0x68, 0x5dc: 0x69, 0x5dd: 0x6a, 0x5de: 0x6b, 0x5df: 0x6c, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x15a, 0x5e9: 0x15b, 0x5ea: 0x15c, 0x5eb: 0xb8, 0x5ec: 0xb8, 0x5ed: 0xb8, 0x5ee: 0xb8, 0x5ef: 0xb8, + 0x5f0: 0xb8, 0x5f1: 0xb8, 0x5f2: 0xb8, 0x5f3: 0xb8, 0x5f4: 0xb8, 0x5f5: 0xb8, 0x5f6: 0xb8, 0x5f7: 0xb8, + 0x5f8: 0xb8, 0x5f9: 0xb8, 0x5fa: 0xb8, 0x5fb: 0xb8, 0x5fc: 0xb8, 0x5fd: 0xb8, 0x5fe: 0xb8, 0x5ff: 0xb8, + // Block 0x18, offset 0x600 + 0x600: 0x15d, 0x601: 0xb8, 0x602: 0xb8, 0x603: 0xb8, 0x604: 0xb8, 0x605: 0xb8, 0x606: 0xb8, 0x607: 0xb8, + 0x608: 0xb8, 0x609: 0xb8, 0x60a: 0xb8, 0x60b: 0xb8, 0x60c: 0xb8, 0x60d: 0xb8, 0x60e: 0xb8, 0x60f: 0xb8, + 0x610: 0xb8, 0x611: 0xb8, 0x612: 0xb8, 0x613: 0xb8, 0x614: 0xb8, 0x615: 0xb8, 0x616: 0xb8, 0x617: 0xb8, + 0x618: 0xb8, 0x619: 0xb8, 0x61a: 0xb8, 0x61b: 0xb8, 0x61c: 0xb8, 0x61d: 0xb8, 0x61e: 0xb8, 0x61f: 0xb8, + 0x620: 0x9d, 0x621: 0x9d, 0x622: 0x9d, 0x623: 0x15e, 0x624: 0x6d, 0x625: 0x15f, 0x626: 0xb8, 0x627: 0xb8, + 0x628: 0xb8, 0x629: 0xb8, 0x62a: 0xb8, 0x62b: 0xb8, 0x62c: 0xb8, 0x62d: 0xb8, 0x62e: 0xb8, 0x62f: 0xb8, + 0x630: 0xb8, 0x631: 0xb8, 0x632: 0xb8, 0x633: 0xb8, 0x634: 0xb8, 0x635: 0xb8, 0x636: 0xb8, 0x637: 0xb8, + 0x638: 0x6e, 0x639: 0x6f, 0x63a: 0x70, 0x63b: 0x160, 0x63c: 0xb8, 0x63d: 0xb8, 0x63e: 0xb8, 0x63f: 0xb8, + // Block 0x19, offset 0x640 + 0x640: 0x161, 0x641: 0x9b, 0x642: 0x162, 0x643: 0x163, 0x644: 0x71, 0x645: 0x72, 0x646: 0x164, 0x647: 0x165, + 0x648: 0x73, 0x649: 0x166, 0x64a: 0xb8, 0x64b: 0xb8, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x167, 0x65c: 0x9b, 0x65d: 0x168, 0x65e: 0x9b, 0x65f: 0x169, + 0x660: 0x16a, 0x661: 0x16b, 0x662: 0x16c, 0x663: 0xb8, 0x664: 0x16d, 0x665: 0x16e, 0x666: 0x16f, 0x667: 0x170, + 0x668: 0xb8, 0x669: 0xb8, 0x66a: 0xb8, 0x66b: 0xb8, 0x66c: 0xb8, 0x66d: 0xb8, 0x66e: 0xb8, 0x66f: 0xb8, + 0x670: 0xb8, 0x671: 0xb8, 0x672: 0xb8, 0x673: 0xb8, 0x674: 0xb8, 0x675: 0xb8, 0x676: 0xb8, 0x677: 0xb8, + 0x678: 0xb8, 0x679: 0xb8, 0x67a: 0xb8, 0x67b: 0xb8, 0x67c: 0xb8, 0x67d: 0xb8, 0x67e: 0xb8, 0x67f: 0xb8, + // Block 0x1a, offset 0x680 + 0x680: 0x9d, 0x681: 0x9d, 0x682: 0x9d, 0x683: 0x9d, 0x684: 0x9d, 0x685: 0x9d, 0x686: 0x9d, 0x687: 0x9d, + 0x688: 0x9d, 0x689: 0x9d, 0x68a: 0x9d, 0x68b: 0x9d, 0x68c: 0x9d, 0x68d: 0x9d, 0x68e: 0x9d, 0x68f: 0x9d, + 0x690: 0x9d, 0x691: 0x9d, 0x692: 0x9d, 0x693: 0x9d, 0x694: 0x9d, 0x695: 0x9d, 0x696: 0x9d, 0x697: 0x9d, + 0x698: 0x9d, 0x699: 0x9d, 0x69a: 0x9d, 0x69b: 0x171, 0x69c: 0x9d, 0x69d: 0x9d, 0x69e: 0x9d, 0x69f: 0x9d, + 0x6a0: 0x9d, 0x6a1: 0x9d, 0x6a2: 0x9d, 0x6a3: 0x9d, 0x6a4: 0x9d, 0x6a5: 0x9d, 0x6a6: 0x9d, 0x6a7: 0x9d, + 0x6a8: 0x9d, 0x6a9: 0x9d, 0x6aa: 0x9d, 0x6ab: 0x9d, 0x6ac: 0x9d, 0x6ad: 0x9d, 0x6ae: 0x9d, 0x6af: 0x9d, + 0x6b0: 0x9d, 0x6b1: 0x9d, 0x6b2: 0x9d, 0x6b3: 0x9d, 0x6b4: 0x9d, 0x6b5: 0x9d, 0x6b6: 0x9d, 0x6b7: 0x9d, + 0x6b8: 0x9d, 0x6b9: 0x9d, 0x6ba: 0x9d, 0x6bb: 0x9d, 0x6bc: 0x9d, 0x6bd: 0x9d, 0x6be: 0x9d, 0x6bf: 0x9d, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9d, 0x6c1: 0x9d, 0x6c2: 0x9d, 0x6c3: 0x9d, 0x6c4: 0x9d, 0x6c5: 0x9d, 0x6c6: 0x9d, 0x6c7: 0x9d, + 0x6c8: 0x9d, 0x6c9: 0x9d, 0x6ca: 0x9d, 0x6cb: 0x9d, 0x6cc: 0x9d, 0x6cd: 0x9d, 0x6ce: 0x9d, 0x6cf: 0x9d, + 0x6d0: 0x9d, 0x6d1: 0x9d, 0x6d2: 0x9d, 0x6d3: 0x9d, 0x6d4: 0x9d, 0x6d5: 0x9d, 0x6d6: 0x9d, 0x6d7: 0x9d, + 0x6d8: 0x9d, 0x6d9: 0x9d, 0x6da: 0x9d, 0x6db: 0x9d, 0x6dc: 0x172, 0x6dd: 0x9d, 0x6de: 0x9d, 0x6df: 0x9d, + 0x6e0: 0x173, 0x6e1: 0x9d, 0x6e2: 0x9d, 0x6e3: 0x9d, 0x6e4: 0x9d, 0x6e5: 0x9d, 0x6e6: 0x9d, 0x6e7: 0x9d, + 0x6e8: 0x9d, 0x6e9: 0x9d, 0x6ea: 0x9d, 0x6eb: 0x9d, 0x6ec: 0x9d, 0x6ed: 0x9d, 0x6ee: 0x9d, 0x6ef: 0x9d, + 0x6f0: 0x9d, 0x6f1: 0x9d, 0x6f2: 0x9d, 0x6f3: 0x9d, 0x6f4: 0x9d, 0x6f5: 0x9d, 0x6f6: 0x9d, 0x6f7: 0x9d, + 0x6f8: 0x9d, 0x6f9: 0x9d, 0x6fa: 0x9d, 0x6fb: 0x9d, 0x6fc: 0x9d, 0x6fd: 0x9d, 0x6fe: 0x9d, 0x6ff: 0x9d, + // Block 0x1c, offset 0x700 + 0x700: 0x9d, 0x701: 0x9d, 0x702: 0x9d, 0x703: 0x9d, 0x704: 0x9d, 0x705: 0x9d, 0x706: 0x9d, 0x707: 0x9d, + 0x708: 0x9d, 0x709: 0x9d, 0x70a: 0x9d, 0x70b: 0x9d, 0x70c: 0x9d, 0x70d: 0x9d, 0x70e: 0x9d, 0x70f: 0x9d, + 0x710: 0x9d, 0x711: 0x9d, 0x712: 0x9d, 0x713: 0x9d, 0x714: 0x9d, 0x715: 0x9d, 0x716: 0x9d, 0x717: 0x9d, + 0x718: 0x9d, 0x719: 0x9d, 0x71a: 0x9d, 0x71b: 0x9d, 0x71c: 0x9d, 0x71d: 0x9d, 0x71e: 0x9d, 0x71f: 0x9d, + 0x720: 0x9d, 0x721: 0x9d, 0x722: 0x9d, 0x723: 0x9d, 0x724: 0x9d, 0x725: 0x9d, 0x726: 0x9d, 0x727: 0x9d, + 0x728: 0x9d, 0x729: 0x9d, 0x72a: 0x9d, 0x72b: 0x9d, 0x72c: 0x9d, 0x72d: 0x9d, 0x72e: 0x9d, 0x72f: 0x9d, + 0x730: 0x9d, 0x731: 0x9d, 0x732: 0x9d, 0x733: 0x9d, 0x734: 0x9d, 0x735: 0x9d, 0x736: 0x9d, 0x737: 0x9d, + 0x738: 0x9d, 0x739: 0x9d, 0x73a: 0x174, 0x73b: 0xb8, 0x73c: 0xb8, 0x73d: 0xb8, 0x73e: 0xb8, 0x73f: 0xb8, + // Block 0x1d, offset 0x740 + 0x740: 0xb8, 0x741: 0xb8, 0x742: 0xb8, 0x743: 0xb8, 0x744: 0xb8, 0x745: 0xb8, 0x746: 0xb8, 0x747: 0xb8, + 0x748: 0xb8, 0x749: 0xb8, 0x74a: 0xb8, 0x74b: 0xb8, 0x74c: 0xb8, 0x74d: 0xb8, 0x74e: 0xb8, 0x74f: 0xb8, + 0x750: 0xb8, 0x751: 0xb8, 0x752: 0xb8, 0x753: 0xb8, 0x754: 0xb8, 0x755: 0xb8, 0x756: 0xb8, 0x757: 0xb8, + 0x758: 0xb8, 0x759: 0xb8, 0x75a: 0xb8, 0x75b: 0xb8, 0x75c: 0xb8, 0x75d: 0xb8, 0x75e: 0xb8, 0x75f: 0xb8, + 0x760: 0x74, 0x761: 0x75, 0x762: 0x76, 0x763: 0x175, 0x764: 0x77, 0x765: 0x78, 0x766: 0x176, 0x767: 0x79, + 0x768: 0x7a, 0x769: 0xb8, 0x76a: 0xb8, 0x76b: 0xb8, 0x76c: 0xb8, 0x76d: 0xb8, 0x76e: 0xb8, 0x76f: 0xb8, + 0x770: 0xb8, 0x771: 0xb8, 0x772: 0xb8, 0x773: 0xb8, 0x774: 0xb8, 0x775: 0xb8, 0x776: 0xb8, 0x777: 0xb8, + 0x778: 0xb8, 0x779: 0xb8, 0x77a: 0xb8, 0x77b: 0xb8, 0x77c: 0xb8, 0x77d: 0xb8, 0x77e: 0xb8, 0x77f: 0xb8, + // Block 0x1e, offset 0x780 + 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, + 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, + 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, + 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, + 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, + 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, + 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, + 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, + 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, + 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, + 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x177, 0x801: 0x178, 0x802: 0xb8, 0x803: 0xb8, 0x804: 0x179, 0x805: 0x179, 0x806: 0x179, 0x807: 0x17a, + 0x808: 0xb8, 0x809: 0xb8, 0x80a: 0xb8, 0x80b: 0xb8, 0x80c: 0xb8, 0x80d: 0xb8, 0x80e: 0xb8, 0x80f: 0xb8, + 0x810: 0xb8, 0x811: 0xb8, 0x812: 0xb8, 0x813: 0xb8, 0x814: 0xb8, 0x815: 0xb8, 0x816: 0xb8, 0x817: 0xb8, + 0x818: 0xb8, 0x819: 0xb8, 0x81a: 0xb8, 0x81b: 0xb8, 0x81c: 0xb8, 0x81d: 0xb8, 0x81e: 0xb8, 0x81f: 0xb8, + 0x820: 0xb8, 0x821: 0xb8, 0x822: 0xb8, 0x823: 0xb8, 0x824: 0xb8, 0x825: 0xb8, 0x826: 0xb8, 0x827: 0xb8, + 0x828: 0xb8, 0x829: 0xb8, 0x82a: 0xb8, 0x82b: 0xb8, 0x82c: 0xb8, 0x82d: 0xb8, 0x82e: 0xb8, 0x82f: 0xb8, + 0x830: 0xb8, 0x831: 0xb8, 0x832: 0xb8, 0x833: 0xb8, 0x834: 0xb8, 0x835: 0xb8, 0x836: 0xb8, 0x837: 0xb8, + 0x838: 0xb8, 0x839: 0xb8, 0x83a: 0xb8, 0x83b: 0xb8, 0x83c: 0xb8, 0x83d: 0xb8, 0x83e: 0xb8, 0x83f: 0xb8, + // Block 0x21, offset 0x840 + 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, + 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, + 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, + 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, + 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, + 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, + 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, + 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, +} + +// idnaSparseOffset: 256 entries, 512 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x5c, 0x60, 0x6f, 0x74, 0x7b, 0x87, 0x95, 0xa3, 0xa8, 0xb1, 0xc1, 0xcf, 0xdc, 0xe8, 0xf9, 0x103, 0x10a, 0x117, 0x128, 0x12f, 0x13a, 0x149, 0x157, 0x161, 0x163, 0x167, 0x169, 0x175, 0x180, 0x188, 0x18e, 0x194, 0x199, 0x19e, 0x1a1, 0x1a5, 0x1ab, 0x1b0, 0x1bc, 0x1c6, 0x1cc, 0x1dd, 0x1e7, 0x1ea, 0x1f2, 0x1f5, 0x202, 0x20a, 0x20e, 0x215, 0x21d, 0x22d, 0x239, 0x23b, 0x245, 0x251, 0x25d, 0x269, 0x271, 0x276, 0x280, 0x291, 0x295, 0x2a0, 0x2a4, 0x2ad, 0x2b5, 0x2bb, 0x2c0, 0x2c3, 0x2c6, 0x2ca, 0x2d0, 0x2d4, 0x2d8, 0x2de, 0x2e5, 0x2eb, 0x2f3, 0x2fa, 0x305, 0x30f, 0x313, 0x316, 0x31c, 0x320, 0x322, 0x325, 0x327, 0x32a, 0x334, 0x337, 0x346, 0x34a, 0x34f, 0x352, 0x356, 0x35b, 0x360, 0x366, 0x36c, 0x37b, 0x381, 0x385, 0x394, 0x399, 0x3a1, 0x3ab, 0x3b6, 0x3be, 0x3cf, 0x3d8, 0x3e8, 0x3f5, 0x3ff, 0x404, 0x411, 0x415, 0x41a, 0x41c, 0x420, 0x422, 0x426, 0x42f, 0x435, 0x439, 0x449, 0x453, 0x458, 0x45b, 0x461, 0x468, 0x46d, 0x471, 0x477, 0x47c, 0x485, 0x48a, 0x490, 0x497, 0x49e, 0x4a5, 0x4a9, 0x4ae, 0x4b1, 0x4b6, 0x4c2, 0x4c8, 0x4cd, 0x4d4, 0x4dc, 0x4e1, 0x4e5, 0x4f5, 0x4fc, 0x500, 0x504, 0x50b, 0x50e, 0x511, 0x515, 0x519, 0x51f, 0x528, 0x534, 0x53b, 0x544, 0x54c, 0x553, 0x561, 0x56e, 0x57b, 0x584, 0x588, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5e5, 0x5ea, 0x5ed, 0x5f7, 0x600, 0x60c, 0x60f, 0x614, 0x617, 0x61a, 0x61d, 0x624, 0x62b, 0x62f, 0x63a, 0x63d, 0x643, 0x648, 0x64c, 0x64f, 0x652, 0x655, 0x65a, 0x664, 0x667, 0x66b, 0x67a, 0x686, 0x68a, 0x68f, 0x694, 0x698, 0x69d, 0x6a6, 0x6b1, 0x6b7, 0x6bf, 0x6c3, 0x6c7, 0x6cd, 0x6d3, 0x6d8, 0x6db, 0x6e9, 0x6f0, 0x6f3, 0x6f6, 0x6fa, 0x700, 0x705, 0x70f, 0x714, 0x717, 0x71a, 0x71d, 0x720, 0x724, 0x727, 0x737, 0x748, 0x74d, 0x74f, 0x751} + +// idnaSparseValues: 1876 entries, 7504 bytes +var idnaSparseValues = [1876]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x1308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x1308, lo: 0x91, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbe}, + {value: 0x1308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x1308, lo: 0x81, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x83}, + {value: 0x1308, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x1308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0208, lo: 0x81, hi: 0x87}, + {value: 0x0408, lo: 0x88, hi: 0x88}, + {value: 0x0208, lo: 0x89, hi: 0x8a}, + {value: 0x1308, lo: 0x8b, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xad}, + {value: 0x0208, lo: 0xae, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb0}, + {value: 0x0408, lo: 0xb1, hi: 0xb3}, + {value: 0x0008, lo: 0xb4, hi: 0xb4}, + {value: 0x0429, lo: 0xb5, hi: 0xb5}, + {value: 0x0451, lo: 0xb6, hi: 0xb6}, + {value: 0x0479, lo: 0xb7, hi: 0xb7}, + {value: 0x04a1, lo: 0xb8, hi: 0xb8}, + {value: 0x0208, lo: 0xb9, hi: 0xbf}, + // Block 0x9, offset 0x5c + {value: 0x0000, lo: 0x03}, + {value: 0x0208, lo: 0x80, hi: 0x87}, + {value: 0x0408, lo: 0x88, hi: 0x99}, + {value: 0x0208, lo: 0x9a, hi: 0xbf}, + // Block 0xa, offset 0x60 + {value: 0x0000, lo: 0x0e}, + {value: 0x1308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0408, lo: 0x8d, hi: 0x8d}, + {value: 0x0208, lo: 0x8e, hi: 0x98}, + {value: 0x0408, lo: 0x99, hi: 0x9b}, + {value: 0x0208, lo: 0x9c, hi: 0xaa}, + {value: 0x0408, lo: 0xab, hi: 0xac}, + {value: 0x0208, lo: 0xad, hi: 0xb0}, + {value: 0x0408, lo: 0xb1, hi: 0xb1}, + {value: 0x0208, lo: 0xb2, hi: 0xb2}, + {value: 0x0408, lo: 0xb3, hi: 0xb4}, + {value: 0x0208, lo: 0xb5, hi: 0xb7}, + {value: 0x0408, lo: 0xb8, hi: 0xb9}, + {value: 0x0208, lo: 0xba, hi: 0xbf}, + // Block 0xb, offset 0x6f + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x1308, lo: 0xa6, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xc, offset 0x74 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0208, lo: 0x8a, hi: 0xaa}, + {value: 0x1308, lo: 0xab, hi: 0xb3}, + {value: 0x0008, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xd, offset 0x7b + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x1308, lo: 0x96, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9a}, + {value: 0x1308, lo: 0x9b, hi: 0xa3}, + {value: 0x0008, lo: 0xa4, hi: 0xa4}, + {value: 0x1308, lo: 0xa5, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xa8}, + {value: 0x1308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xe, offset 0x87 + {value: 0x0000, lo: 0x0d}, + {value: 0x0408, lo: 0x80, hi: 0x80}, + {value: 0x0208, lo: 0x81, hi: 0x85}, + {value: 0x0408, lo: 0x86, hi: 0x87}, + {value: 0x0208, lo: 0x88, hi: 0x88}, + {value: 0x0408, lo: 0x89, hi: 0x89}, + {value: 0x0208, lo: 0x8a, hi: 0x93}, + {value: 0x0408, lo: 0x94, hi: 0x94}, + {value: 0x0208, lo: 0x95, hi: 0x95}, + {value: 0x0008, lo: 0x96, hi: 0x98}, + {value: 0x1308, lo: 0x99, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xf, offset 0x95 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xa9}, + {value: 0x0408, lo: 0xaa, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0408, lo: 0xae, hi: 0xae}, + {value: 0x0208, lo: 0xaf, hi: 0xb0}, + {value: 0x0408, lo: 0xb1, hi: 0xb2}, + {value: 0x0208, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0208, lo: 0xb6, hi: 0xb8}, + {value: 0x0408, lo: 0xb9, hi: 0xb9}, + {value: 0x0208, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0xa3 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x1308, lo: 0x94, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x1308, lo: 0xa3, hi: 0xbf}, + // Block 0x11, offset 0xa8 + {value: 0x0000, lo: 0x08}, + {value: 0x1308, lo: 0x80, hi: 0x82}, + {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x1308, lo: 0xba, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbb}, + {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0f}, + {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x1008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x1008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x1008, lo: 0x8a, hi: 0x8c}, + {value: 0x1b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x1008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x13, offset 0xc1 + {value: 0x0000, lo: 0x0d}, + {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x1008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x1308, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xcf + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x1308, lo: 0x81, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbf}, + // Block 0x15, offset 0xdc + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x16, offset 0xe8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x1b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x1008, lo: 0x8f, hi: 0x91}, + {value: 0x1308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x1308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x1008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x1008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x17, offset 0xf9 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x1308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x1308, lo: 0xb4, hi: 0xb9}, + {value: 0x1b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x18, offset 0x103 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x1308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x19, offset 0x10a + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x1308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1a, offset 0x117 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x1308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x1308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x1308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x1308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbf}, + // Block 0x1b, offset 0x128 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1c, offset 0x12f + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x1008, lo: 0xab, hi: 0xac}, + {value: 0x1308, lo: 0xad, hi: 0xb0}, + {value: 0x1008, lo: 0xb1, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb7}, + {value: 0x1008, lo: 0xb8, hi: 0xb8}, + {value: 0x1b08, lo: 0xb9, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbc}, + {value: 0x1308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1d, offset 0x13a + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x1008, lo: 0x96, hi: 0x97}, + {value: 0x1308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x1308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x1008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x1008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x1308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1e, offset 0x149 + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x1308, lo: 0x82, hi: 0x82}, + {value: 0x1008, lo: 0x83, hi: 0x84}, + {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x1008, lo: 0x87, hi: 0x8c}, + {value: 0x1308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x1008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x1008, lo: 0x9a, hi: 0x9c}, + {value: 0x1308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1f, offset 0x157 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x20, offset 0x161 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x21, offset 0x163 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbf}, + // Block 0x22, offset 0x167 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x169 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x175 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x180 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x188 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x18e + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x1308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x194 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x199 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x19e + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x1a1 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x1a5 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x1ab + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x1b0 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x1308, lo: 0x92, hi: 0x93}, + {value: 0x1b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb3}, + {value: 0x1b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1bc + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x1308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1c6 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x1340, lo: 0xb4, hi: 0xb5}, + {value: 0x1008, lo: 0xb6, hi: 0xb6}, + {value: 0x1308, lo: 0xb7, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1cc + {value: 0x0000, lo: 0x10}, + {value: 0x1008, lo: 0x80, hi: 0x85}, + {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x1008, lo: 0x87, hi: 0x88}, + {value: 0x1308, lo: 0x89, hi: 0x91}, + {value: 0x1b08, lo: 0x92, hi: 0x92}, + {value: 0x1308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x1308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1dd + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x13c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1e7 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x1308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1f5 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x1308, lo: 0xa0, hi: 0xa2}, + {value: 0x1008, lo: 0xa3, hi: 0xa6}, + {value: 0x1308, lo: 0xa7, hi: 0xa8}, + {value: 0x1008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x1008, lo: 0xb0, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb2}, + {value: 0x1008, lo: 0xb3, hi: 0xb8}, + {value: 0x1308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x202 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x20a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x20e + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x215 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x1308, lo: 0x97, hi: 0x98}, + {value: 0x1008, lo: 0x99, hi: 0x9a}, + {value: 0x1308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x21d + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x1008, lo: 0x95, hi: 0x95}, + {value: 0x1308, lo: 0x96, hi: 0x96}, + {value: 0x1008, lo: 0x97, hi: 0x97}, + {value: 0x1308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x1b08, lo: 0xa0, hi: 0xa0}, + {value: 0x1008, lo: 0xa1, hi: 0xa1}, + {value: 0x1308, lo: 0xa2, hi: 0xa2}, + {value: 0x1008, lo: 0xa3, hi: 0xa4}, + {value: 0x1308, lo: 0xa5, hi: 0xac}, + {value: 0x1008, lo: 0xad, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x1308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xbd}, + {value: 0x1318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x239 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x23b + {value: 0x0000, lo: 0x09}, + {value: 0x1308, lo: 0x80, hi: 0x83}, + {value: 0x1008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x1308, lo: 0xb4, hi: 0xb4}, + {value: 0x1008, lo: 0xb5, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbb}, + {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x1008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x1008, lo: 0x80, hi: 0x81}, + {value: 0x1308, lo: 0x82, hi: 0x82}, + {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x1808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x1308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x251 + {value: 0x0000, lo: 0x0b}, + {value: 0x1308, lo: 0x80, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x1008, lo: 0xa1, hi: 0xa1}, + {value: 0x1308, lo: 0xa2, hi: 0xa5}, + {value: 0x1008, lo: 0xa6, hi: 0xa7}, + {value: 0x1308, lo: 0xa8, hi: 0xa9}, + {value: 0x1808, lo: 0xaa, hi: 0xaa}, + {value: 0x1b08, lo: 0xab, hi: 0xab}, + {value: 0x1308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x25d + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x1308, lo: 0xa6, hi: 0xa6}, + {value: 0x1008, lo: 0xa7, hi: 0xa7}, + {value: 0x1308, lo: 0xa8, hi: 0xa9}, + {value: 0x1008, lo: 0xaa, hi: 0xac}, + {value: 0x1308, lo: 0xad, hi: 0xad}, + {value: 0x1008, lo: 0xae, hi: 0xae}, + {value: 0x1308, lo: 0xaf, hi: 0xb1}, + {value: 0x1808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x269 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x1008, lo: 0xa4, hi: 0xab}, + {value: 0x1308, lo: 0xac, hi: 0xb3}, + {value: 0x1008, lo: 0xb4, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x271 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x276 + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x280 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x1308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x1308, lo: 0x94, hi: 0xa0}, + {value: 0x1008, lo: 0xa1, hi: 0xa1}, + {value: 0x1308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x1308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x1008, lo: 0xb2, hi: 0xb3}, + {value: 0x1308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x1308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x291 + {value: 0x0000, lo: 0x03}, + {value: 0x1308, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x1308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x295 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x2a0 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x1318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x2a4 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x2ad + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x2b5 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2bb + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2c0 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x4e, offset 0x2c3 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4f, offset 0x2c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x50, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x51, offset 0x2d0 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x52, offset 0x2d4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x53, offset 0x2d8 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x54, offset 0x2de + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2e5 + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2eb + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x57, offset 0x2f3 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2fa + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x59, offset 0x305 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x1308, lo: 0xa0, hi: 0xbf}, + // Block 0x5a, offset 0x30f + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5b, offset 0x313 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0xbf}, + // Block 0x5c, offset 0x316 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5d, offset 0x31c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5e, offset 0x320 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5f, offset 0x322 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x60, offset 0x325 + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x61, offset 0x327 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x62, offset 0x32a + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x1308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x63, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x64, offset 0x337 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x65, offset 0x346 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x66, offset 0x34a + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x67, offset 0x34f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x68, offset 0x352 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x69, offset 0x356 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x6a, offset 0x35b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6b, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6c, offset 0x366 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x36c + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x1308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x1b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x1308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x1008, lo: 0xa3, hi: 0xa4}, + {value: 0x1308, lo: 0xa5, hi: 0xa6}, + {value: 0x1008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6e, offset 0x37b + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6f, offset 0x381 + {value: 0x0000, lo: 0x03}, + {value: 0x1008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x1008, lo: 0xb4, hi: 0xbf}, + // Block 0x70, offset 0x385 + {value: 0x0000, lo: 0x0e}, + {value: 0x1008, lo: 0x80, hi: 0x83}, + {value: 0x1b08, lo: 0x84, hi: 0x84}, + {value: 0x1308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x1308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x71, offset 0x394 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x1308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x72, offset 0x399 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x1308, lo: 0x87, hi: 0x91}, + {value: 0x1008, lo: 0x92, hi: 0x92}, + {value: 0x1808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x3a1 + {value: 0x0000, lo: 0x09}, + {value: 0x1308, lo: 0x80, hi: 0x82}, + {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xb3}, + {value: 0x1008, lo: 0xb4, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xb9}, + {value: 0x1008, lo: 0xba, hi: 0xbb}, + {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x1008, lo: 0xbd, hi: 0xbf}, + // Block 0x74, offset 0x3ab + {value: 0x0000, lo: 0x0a}, + {value: 0x1808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x1308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x75, offset 0x3b6 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x1308, lo: 0xa9, hi: 0xae}, + {value: 0x1008, lo: 0xaf, hi: 0xb0}, + {value: 0x1308, lo: 0xb1, hi: 0xb2}, + {value: 0x1008, lo: 0xb3, hi: 0xb4}, + {value: 0x1308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x76, offset 0x3be + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x1308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x1308, lo: 0x8c, hi: 0x8c}, + {value: 0x1008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbb}, + {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x1008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3cf + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x1308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x1308, lo: 0xbe, hi: 0xbf}, + // Block 0x78, offset 0x3d8 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x1308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x1008, lo: 0xab, hi: 0xab}, + {value: 0x1308, lo: 0xac, hi: 0xad}, + {value: 0x1008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x1008, lo: 0xb5, hi: 0xb5}, + {value: 0x1b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x79, offset 0x3e8 + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3f5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3ff + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7c, offset 0x404 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x1008, lo: 0xa3, hi: 0xa4}, + {value: 0x1308, lo: 0xa5, hi: 0xa5}, + {value: 0x1008, lo: 0xa6, hi: 0xa7}, + {value: 0x1308, lo: 0xa8, hi: 0xa8}, + {value: 0x1008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x1008, lo: 0xac, hi: 0xac}, + {value: 0x1b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7d, offset 0x411 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7e, offset 0x415 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7f, offset 0x41a + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x80, offset 0x41c + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x81, offset 0x420 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x82, offset 0x422 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x83, offset 0x426 + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x84, offset 0x42f + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x85, offset 0x435 + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x86, offset 0x439 + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x87, offset 0x449 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x88, offset 0x453 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x89, offset 0x458 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x8a, offset 0x45b + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8b, offset 0x461 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8c, offset 0x468 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x1308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8d, offset 0x46d + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8e, offset 0x471 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x1308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8f, offset 0x477 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x90, offset 0x47c + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x91, offset 0x485 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x92, offset 0x48a + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x93, offset 0x490 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x94, offset 0x497 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x95, offset 0x49e + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x96, offset 0x4a5 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x4a9 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x98, offset 0x4ae + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x99, offset 0x4b1 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x9a, offset 0x4b6 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x4c2 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x9c, offset 0x4c8 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0018, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9d, offset 0x4cd + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0008, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x9e, offset 0x4d4 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0018, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x9f, offset 0x4dc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0xa0, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xbf}, + // Block 0xa1, offset 0x4e5 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x1308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x1308, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0008, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x1308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa2, offset 0x4f5 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0xa3, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa4, offset 0x500 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa5, offset 0x504 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbf}, + // Block 0xa6, offset 0x50b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x50e + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x511 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x515 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x519 + {value: 0x0000, lo: 0x05}, + {value: 0x1008, lo: 0x80, hi: 0x80}, + {value: 0x1308, lo: 0x81, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x1308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x51f + {value: 0x0000, lo: 0x08}, + {value: 0x1308, lo: 0x80, hi: 0x85}, + {value: 0x1b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x528 + {value: 0x0000, lo: 0x0b}, + {value: 0x1308, lo: 0x80, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x1008, lo: 0xb0, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xb6}, + {value: 0x1008, lo: 0xb7, hi: 0xb8}, + {value: 0x1b08, lo: 0xb9, hi: 0xb9}, + {value: 0x1308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x534 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x53b + {value: 0x0000, lo: 0x08}, + {value: 0x1308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x1308, lo: 0xa7, hi: 0xab}, + {value: 0x1008, lo: 0xac, hi: 0xac}, + {value: 0x1308, lo: 0xad, hi: 0xb2}, + {value: 0x1b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x544 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x54c + {value: 0x0000, lo: 0x06}, + {value: 0x1308, lo: 0x80, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x1008, lo: 0xb3, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xbe}, + {value: 0x1008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x553 + {value: 0x0000, lo: 0x0d}, + {value: 0x1808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x1308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x561 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x1008, lo: 0xac, hi: 0xae}, + {value: 0x1308, lo: 0xaf, hi: 0xb1}, + {value: 0x1008, lo: 0xb2, hi: 0xb3}, + {value: 0x1308, lo: 0xb4, hi: 0xb4}, + {value: 0x1808, lo: 0xb5, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x1308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x56e + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x57b + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x1308, lo: 0x9f, hi: 0x9f}, + {value: 0x1008, lo: 0xa0, hi: 0xa2}, + {value: 0x1308, lo: 0xa3, hi: 0xa9}, + {value: 0x1b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x584 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x1008, lo: 0xb5, hi: 0xb7}, + {value: 0x1308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x588 + {value: 0x0000, lo: 0x0d}, + {value: 0x1008, lo: 0x80, hi: 0x81}, + {value: 0x1b08, lo: 0x82, hi: 0x82}, + {value: 0x1308, lo: 0x83, hi: 0x84}, + {value: 0x1008, lo: 0x85, hi: 0x85}, + {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x596 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x1008, lo: 0xb0, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xb8}, + {value: 0x1008, lo: 0xb9, hi: 0xb9}, + {value: 0x1308, lo: 0xba, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbe}, + {value: 0x1308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x59e + {value: 0x0000, lo: 0x0a}, + {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x1008, lo: 0x81, hi: 0x81}, + {value: 0x1b08, lo: 0x82, hi: 0x82}, + {value: 0x1308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x5a9 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x1008, lo: 0xaf, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x1008, lo: 0xb8, hi: 0xbb}, + {value: 0x1308, lo: 0xbc, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x5b2 + {value: 0x0000, lo: 0x05}, + {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x1308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x5b8 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x1008, lo: 0xb0, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbc}, + {value: 0x1308, lo: 0xbd, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5c0 + {value: 0x0000, lo: 0x08}, + {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5c9 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x1308, lo: 0xab, hi: 0xab}, + {value: 0x1008, lo: 0xac, hi: 0xac}, + {value: 0x1308, lo: 0xad, hi: 0xad}, + {value: 0x1008, lo: 0xae, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb5}, + {value: 0x1808, lo: 0xb6, hi: 0xb6}, + {value: 0x1308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5d3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5d6 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x1308, lo: 0x9d, hi: 0x9f}, + {value: 0x1008, lo: 0xa0, hi: 0xa1}, + {value: 0x1308, lo: 0xa2, hi: 0xa5}, + {value: 0x1008, lo: 0xa6, hi: 0xa6}, + {value: 0x1308, lo: 0xa7, hi: 0xaa}, + {value: 0x1b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5e2 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5e5 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5ea + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc3, offset 0x5ed + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x1008, lo: 0xaf, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x1308, lo: 0xb8, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc4, offset 0x5f7 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc5, offset 0x600 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x1308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x1008, lo: 0xa9, hi: 0xa9}, + {value: 0x1308, lo: 0xaa, hi: 0xb0}, + {value: 0x1008, lo: 0xb1, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb3}, + {value: 0x1008, lo: 0xb4, hi: 0xb4}, + {value: 0x1308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc6, offset 0x60c + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xc7, offset 0x60f + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xc8, offset 0x614 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xc9, offset 0x617 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xca, offset 0x61a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xcb, offset 0x61d + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xcc, offset 0x624 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xcd, offset 0x62b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xce, offset 0x62f + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xcf, offset 0x63a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd0, offset 0x63d + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x1008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd1, offset 0x643 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x1308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd2, offset 0x648 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0xd3, offset 0x64c + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd4, offset 0x64f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xd5, offset 0x652 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0xbf}, + // Block 0xd6, offset 0x655 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xd7, offset 0x65a + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x1308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xd8, offset 0x664 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd9, offset 0x667 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xda, offset 0x66b + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x1018, lo: 0xa5, hi: 0xa6}, + {value: 0x1318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x1018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x1318, lo: 0xbb, hi: 0xbf}, + // Block 0xdb, offset 0x67a + {value: 0x0000, lo: 0x0b}, + {value: 0x1318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x1318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x1318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xdc, offset 0x686 + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xdd, offset 0x68a + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x1318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xde, offset 0x68f + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xdf, offset 0x694 + {value: 0x0000, lo: 0x03}, + {value: 0x1308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x1308, lo: 0xbb, hi: 0xbf}, + // Block 0xe0, offset 0x698 + {value: 0x0000, lo: 0x04}, + {value: 0x1308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x1308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe1, offset 0x69d + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x1308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x1308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x1308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe2, offset 0x6a6 + {value: 0x0000, lo: 0x0a}, + {value: 0x1308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x1308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x1308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x1308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x1308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xe3, offset 0x6b1 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8f}, + {value: 0x1308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xe4, offset 0x6b7 + {value: 0x0000, lo: 0x07}, + {value: 0x0208, lo: 0x80, hi: 0x83}, + {value: 0x1308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xe5, offset 0x6bf + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6c3 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xe7, offset 0x6c7 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xe8, offset 0x6cd + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe9, offset 0x6d3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xea, offset 0x6d8 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x0d}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xec, offset 0x6e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xed, offset 0x6f0 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xee, offset 0x6f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xef, offset 0x6f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf0, offset 0x6fa + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf1, offset 0x700 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf2, offset 0x705 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xf4, offset 0x714 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xf5, offset 0x717 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0xbf}, + // Block 0xf6, offset 0x71a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xf7, offset 0x71d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xf8, offset 0x720 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xf9, offset 0x724 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xfa, offset 0x727 + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0xfb, offset 0x737 + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfc, offset 0x748 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0xfd, offset 0x74d + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0xfe, offset 0x74f + {value: 0x0000, lo: 0x01}, + {value: 0x13c0, lo: 0x80, hi: 0xbf}, + // Block 0xff, offset 0x751 + {value: 0x0000, lo: 0x02}, + {value: 0x13c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 41559 bytes (40KiB); checksum: F4A1FA4E diff --git a/fn/vendor/golang.org/x/net/idna/trie.go b/fn/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 000000000..c4ef847e7 --- /dev/null +++ b/fn/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/fn/vendor/golang.org/x/net/idna/trieval.go b/fn/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 000000000..63cb03b59 --- /dev/null +++ b/fn/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,114 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..13 unused +// 12 modifier (including virama) +// 11 virama modifier +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + viramaModifier = 0x0800 + modifier = 0x1000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not defined currently in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(viramaModifier|catSmallMask) == viramaModifier +} diff --git a/fn/vendor/golang.org/x/net/internal/iana/const.go b/fn/vendor/golang.org/x/net/internal/iana/const.go index 7fe88225c..c9df24d95 100644 --- a/fn/vendor/golang.org/x/net/internal/iana/const.go +++ b/fn/vendor/golang.org/x/net/internal/iana/const.go @@ -4,7 +4,7 @@ // Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). package iana // import "golang.org/x/net/internal/iana" -// Differentiated Services Field Codepoints (DSCP), Updated: 2013-06-25 +// Differentiated Services Field Codepoints (DSCP), Updated: 2017-05-12 const ( DiffServCS0 = 0x0 // CS0 DiffServCS1 = 0x20 // CS1 @@ -26,7 +26,7 @@ const ( DiffServAF41 = 0x88 // AF41 DiffServAF42 = 0x90 // AF42 DiffServAF43 = 0x98 // AF43 - DiffServEFPHB = 0xb8 // EF PHB + DiffServEF = 0xb8 // EF DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT ) @@ -38,7 +38,7 @@ const ( CongestionExperienced = 0x3 // CE (Congestion Experienced) ) -// Protocol Numbers, Updated: 2015-06-23 +// Protocol Numbers, Updated: 2016-06-22 const ( ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option @@ -54,7 +54,6 @@ const ( ProtocolBBNRCCMON = 10 // BBN RCC Monitoring ProtocolNVPII = 11 // Network Voice Protocol ProtocolPUP = 12 // PUP - ProtocolARGUS = 13 // ARGUS ProtocolEMCON = 14 // EMCON ProtocolXNET = 15 // Cross Net Debugger ProtocolCHAOS = 16 // Chaos diff --git a/fn/vendor/golang.org/x/net/internal/iana/gen.go b/fn/vendor/golang.org/x/net/internal/iana/gen.go index 2d8c07ca1..86c78b3bb 100644 --- a/fn/vendor/golang.org/x/net/internal/iana/gen.go +++ b/fn/vendor/golang.org/x/net/internal/iana/gen.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/internal/nettest/error_stub.go b/fn/vendor/golang.org/x/net/internal/nettest/error_stub.go deleted file mode 100644 index 3c74d812f..000000000 --- a/fn/vendor/golang.org/x/net/internal/nettest/error_stub.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 - -package nettest - -func protocolNotSupported(err error) bool { - return false -} diff --git a/fn/vendor/golang.org/x/net/internal/nettest/helper_bsd.go b/fn/vendor/golang.org/x/net/internal/nettest/helper_bsd.go new file mode 100644 index 000000000..a6e433b58 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/nettest/helper_bsd.go @@ -0,0 +1,53 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package nettest + +import ( + "runtime" + "strconv" + "strings" + "syscall" +) + +var darwinVersion int + +func init() { + if runtime.GOOS == "darwin" { + // See http://support.apple.com/kb/HT1633. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + darwinVersion, _ = strconv.Atoi(ss[0]) + } +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + switch runtime.GOOS { + case "freebsd": + // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. + // Even after the fix, it looks like the latest + // kernels don't deliver link-local scoped multicast + // packets correctly. + return false + case "darwin": + return !causesIPv6Crash() + default: + return true + } +} + +func causesIPv6Crash() bool { + // We see some kernel crash when running IPv6 with IP-level + // options on Darwin kernel version 12 or below. + // See golang.org/issues/17015. + return darwinVersion < 13 +} diff --git a/fn/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go b/fn/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go new file mode 100644 index 000000000..bc7da5e0d --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux solaris + +package nettest + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return true +} + +func causesIPv6Crash() bool { + return false +} diff --git a/fn/vendor/golang.org/x/net/internal/nettest/error_posix.go b/fn/vendor/golang.org/x/net/internal/nettest/helper_posix.go similarity index 100% rename from fn/vendor/golang.org/x/net/internal/nettest/error_posix.go rename to fn/vendor/golang.org/x/net/internal/nettest/helper_posix.go diff --git a/fn/vendor/golang.org/x/net/internal/nettest/helper_stub.go b/fn/vendor/golang.org/x/net/internal/nettest/helper_stub.go new file mode 100644 index 000000000..ea61b6f39 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/nettest/helper_stub.go @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +import ( + "fmt" + "runtime" +) + +func maxOpenFiles() int { + return defaultMaxOpenFiles +} + +func supportsRawIPSocket() (string, bool) { + return fmt.Sprintf("not supported on %s", runtime.GOOS), false +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return false +} + +func causesIPv6Crash() bool { + return false +} + +func protocolNotSupported(err error) bool { + return false +} diff --git a/fn/vendor/golang.org/x/net/internal/nettest/rlimit_unix.go b/fn/vendor/golang.org/x/net/internal/nettest/helper_unix.go similarity index 67% rename from fn/vendor/golang.org/x/net/internal/nettest/rlimit_unix.go rename to fn/vendor/golang.org/x/net/internal/nettest/helper_unix.go index eb4312ce3..ed13e448b 100644 --- a/fn/vendor/golang.org/x/net/internal/nettest/rlimit_unix.go +++ b/fn/vendor/golang.org/x/net/internal/nettest/helper_unix.go @@ -6,7 +6,12 @@ package nettest -import "syscall" +import ( + "fmt" + "os" + "runtime" + "syscall" +) func maxOpenFiles() int { var rlim syscall.Rlimit @@ -15,3 +20,10 @@ func maxOpenFiles() int { } return int(rlim.Cur) } + +func supportsRawIPSocket() (string, bool) { + if os.Getuid() != 0 { + return fmt.Sprintf("must be root on %s", runtime.GOOS), false + } + return "", true +} diff --git a/fn/vendor/golang.org/x/net/internal/nettest/stack_windows.go b/fn/vendor/golang.org/x/net/internal/nettest/helper_windows.go similarity index 80% rename from fn/vendor/golang.org/x/net/internal/nettest/stack_windows.go rename to fn/vendor/golang.org/x/net/internal/nettest/helper_windows.go index a21f4993e..3dcb727c9 100644 --- a/fn/vendor/golang.org/x/net/internal/nettest/stack_windows.go +++ b/fn/vendor/golang.org/x/net/internal/nettest/helper_windows.go @@ -10,9 +10,11 @@ import ( "syscall" ) -// SupportsRawIPSocket reports whether the platform supports raw IP -// sockets. -func SupportsRawIPSocket() (string, bool) { +func maxOpenFiles() int { + return 4 * defaultMaxOpenFiles /* actually it's 16581375 */ +} + +func supportsRawIPSocket() (string, bool) { // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx: // Note: To use a socket of type SOCK_RAW requires administrative privileges. // Users running Winsock applications that use raw sockets must be a member of @@ -30,3 +32,11 @@ func SupportsRawIPSocket() (string, bool) { syscall.Closesocket(s) return "", true } + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return true +} + +func causesIPv6Crash() bool { + return false +} diff --git a/fn/vendor/golang.org/x/net/internal/nettest/interface.go b/fn/vendor/golang.org/x/net/internal/nettest/interface.go index 53ae13a98..8e6333afe 100644 --- a/fn/vendor/golang.org/x/net/internal/nettest/interface.go +++ b/fn/vendor/golang.org/x/net/internal/nettest/interface.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/internal/nettest/rlimit_stub.go b/fn/vendor/golang.org/x/net/internal/nettest/rlimit_stub.go deleted file mode 100644 index 102bef930..000000000 --- a/fn/vendor/golang.org/x/net/internal/nettest/rlimit_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 - -package nettest - -func maxOpenFiles() int { return defaultMaxOpenFiles } diff --git a/fn/vendor/golang.org/x/net/internal/nettest/rlimit_windows.go b/fn/vendor/golang.org/x/net/internal/nettest/rlimit_windows.go deleted file mode 100644 index de927b56e..000000000 --- a/fn/vendor/golang.org/x/net/internal/nettest/rlimit_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package nettest - -func maxOpenFiles() int { return 4 * defaultMaxOpenFiles /* actually it's 16581375 */ } diff --git a/fn/vendor/golang.org/x/net/internal/nettest/stack.go b/fn/vendor/golang.org/x/net/internal/nettest/stack.go index e07c015f3..cc92c035b 100644 --- a/fn/vendor/golang.org/x/net/internal/nettest/stack.go +++ b/fn/vendor/golang.org/x/net/internal/nettest/stack.go @@ -2,31 +2,52 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package nettest provides utilities for IP testing. +// Package nettest provides utilities for network testing. package nettest // import "golang.org/x/net/internal/nettest" -import "net" +import ( + "fmt" + "io/ioutil" + "net" + "os" + "runtime" +) + +var ( + supportsIPv4 bool + supportsIPv6 bool +) + +func init() { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + ln.Close() + supportsIPv4 = true + } + if ln, err := net.Listen("tcp6", "[::1]:0"); err == nil { + ln.Close() + supportsIPv6 = true + } +} // SupportsIPv4 reports whether the platform supports IPv4 networking // functionality. -func SupportsIPv4() bool { - ln, err := net.Listen("tcp4", "127.0.0.1:0") - if err != nil { - return false - } - ln.Close() - return true -} +func SupportsIPv4() bool { return supportsIPv4 } // SupportsIPv6 reports whether the platform supports IPv6 networking // functionality. -func SupportsIPv6() bool { - ln, err := net.Listen("tcp6", "[::1]:0") - if err != nil { - return false - } - ln.Close() - return true +func SupportsIPv6() bool { return supportsIPv6 } + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + return supportsRawIPSocket() +} + +// SupportsIPv6MulticastDeliveryOnLoopback reports whether the +// platform supports IPv6 multicast packet delivery on software +// loopback interface. +func SupportsIPv6MulticastDeliveryOnLoopback() bool { + return supportsIPv6MulticastDeliveryOnLoopback() } // ProtocolNotSupported reports whether err is a protocol not @@ -34,3 +55,93 @@ func SupportsIPv6() bool { func ProtocolNotSupported(err error) bool { return protocolNotSupported(err) } + +// TestableNetwork reports whether network is testable on the current +// platform configuration. +func TestableNetwork(network string) bool { + // This is based on logic from standard library's + // net/platform_test.go. + switch network { + case "unix", "unixgram": + switch runtime.GOOS { + case "android", "nacl", "plan9", "windows": + return false + } + if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { + return false + } + case "unixpacket": + switch runtime.GOOS { + case "android", "darwin", "freebsd", "nacl", "plan9", "windows": + return false + } + } + return true +} + +// NewLocalListener returns a listener which listens to a loopback IP +// address or local file system path. +// Network must be "tcp", "tcp4", "tcp6", "unix" or "unixpacket". +func NewLocalListener(network string) (net.Listener, error) { + switch network { + case "tcp": + if supportsIPv4 { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + return ln, nil + } + } + if supportsIPv6 { + return net.Listen("tcp6", "[::1]:0") + } + case "tcp4": + if supportsIPv4 { + return net.Listen("tcp4", "127.0.0.1:0") + } + case "tcp6": + if supportsIPv6 { + return net.Listen("tcp6", "[::1]:0") + } + case "unix", "unixpacket": + return net.Listen(network, localPath()) + } + return nil, fmt.Errorf("%s is not supported", network) +} + +// NewLocalPacketListener returns a packet listener which listens to a +// loopback IP address or local file system path. +// Network must be "udp", "udp4", "udp6" or "unixgram". +func NewLocalPacketListener(network string) (net.PacketConn, error) { + switch network { + case "udp": + if supportsIPv4 { + if c, err := net.ListenPacket("udp4", "127.0.0.1:0"); err == nil { + return c, nil + } + } + if supportsIPv6 { + return net.ListenPacket("udp6", "[::1]:0") + } + case "udp4": + if supportsIPv4 { + return net.ListenPacket("udp4", "127.0.0.1:0") + } + case "udp6": + if supportsIPv6 { + return net.ListenPacket("udp6", "[::1]:0") + } + case "unixgram": + return net.ListenPacket(network, localPath()) + } + return nil, fmt.Errorf("%s is not supported", network) +} + +func localPath() string { + f, err := ioutil.TempFile("", "nettest") + if err != nil { + panic(err) + } + path := f.Name() + f.Close() + os.Remove(path) + return path +} diff --git a/fn/vendor/golang.org/x/net/internal/nettest/stack_stub.go b/fn/vendor/golang.org/x/net/internal/nettest/stack_stub.go deleted file mode 100644 index 1b5fde1a3..000000000 --- a/fn/vendor/golang.org/x/net/internal/nettest/stack_stub.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 - -package nettest - -import ( - "fmt" - "runtime" -) - -// SupportsRawIPSocket reports whether the platform supports raw IP -// sockets. -func SupportsRawIPSocket() (string, bool) { - return fmt.Sprintf("not supported on %s", runtime.GOOS), false -} diff --git a/fn/vendor/golang.org/x/net/internal/nettest/stack_unix.go b/fn/vendor/golang.org/x/net/internal/nettest/stack_unix.go deleted file mode 100644 index af89229f4..000000000 --- a/fn/vendor/golang.org/x/net/internal/nettest/stack_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package nettest - -import ( - "fmt" - "os" - "runtime" -) - -// SupportsRawIPSocket reports whether the platform supports raw IP -// sockets. -func SupportsRawIPSocket() (string, bool) { - if os.Getuid() != 0 { - return fmt.Sprintf("must be root on %s", runtime.GOOS), false - } - return "", true -} diff --git a/fn/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr.go new file mode 100644 index 000000000..1eb07d26d --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +func (h *cmsghdr) len() int { return int(h.Len) } +func (h *cmsghdr) lvl() int { return int(h.Level) } +func (h *cmsghdr) typ() int { return int(h.Type) } diff --git a/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go new file mode 100644 index 000000000..d1d0c2de5 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go new file mode 100644 index 000000000..bac66811d --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go new file mode 100644 index 000000000..63f0534fa --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint64(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go new file mode 100644 index 000000000..7dedd430e --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go new file mode 100644 index 000000000..a4e71226f --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type cmsghdr struct{} + +const sizeofCmsghdr = 0 + +func (h *cmsghdr) len() int { return 0 } +func (h *cmsghdr) lvl() int { return 0 } +func (h *cmsghdr) typ() int { return 0 } + +func (h *cmsghdr) set(l, lvl, typ int) {} diff --git a/fn/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/fn/vendor/golang.org/x/net/internal/socket/defs_darwin.go new file mode 100644 index 000000000..14e28c0b4 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/defs_darwin.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/fn/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go new file mode 100644 index 000000000..14e28c0b4 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/fn/vendor/golang.org/x/net/internal/socket/defs_freebsd.go new file mode 100644 index 000000000..14e28c0b4 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/defs_freebsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/defs_linux.go b/fn/vendor/golang.org/x/net/internal/socket/defs_linux.go new file mode 100644 index 000000000..ce9ec2f6d --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/defs_linux.go @@ -0,0 +1,49 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include +#include + +#define _GNU_SOURCE +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/fn/vendor/golang.org/x/net/internal/socket/defs_netbsd.go new file mode 100644 index 000000000..3f8433569 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/defs_netbsd.go @@ -0,0 +1,47 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/fn/vendor/golang.org/x/net/internal/socket/defs_openbsd.go new file mode 100644 index 000000000..14e28c0b4 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/defs_openbsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/fn/vendor/golang.org/x/net/internal/socket/defs_solaris.go new file mode 100644 index 000000000..14e28c0b4 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/defs_solaris.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/error_unix.go b/fn/vendor/golang.org/x/net/internal/socket/error_unix.go new file mode 100644 index 000000000..93dff9180 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -0,0 +1,31 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "syscall" + +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return errno +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/error_windows.go b/fn/vendor/golang.org/x/net/internal/socket/error_windows.go new file mode 100644 index 000000000..6a6379a8b --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/error_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "syscall" + +var ( + errERROR_IO_PENDING error = syscall.ERROR_IO_PENDING + errEINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.ERROR_IO_PENDING: + return errERROR_IO_PENDING + case syscall.EINVAL: + return errEINVAL + } + return errno +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/fn/vendor/golang.org/x/net/internal/socket/iovec_32bit.go new file mode 100644 index 000000000..d6a570c90 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint32(len(b)) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/fn/vendor/golang.org/x/net/internal/socket/iovec_64bit.go new file mode 100644 index 000000000..2ae435e64 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint64(len(b)) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/fn/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go new file mode 100644 index 000000000..100a62820 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + v.Base = (*int8)(unsafe.Pointer(&b[0])) + v.Len = uint64(len(b)) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/fn/vendor/golang.org/x/net/internal/socket/iovec_stub.go new file mode 100644 index 000000000..c87d2a933 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type iovec struct{} + +func (v *iovec) set(b []byte) {} diff --git a/fn/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/fn/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go new file mode 100644 index 000000000..2e80a9cb7 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,!netbsd + +package socket + +import "net" + +type mmsghdr struct{} + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + return nil +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/fn/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go new file mode 100644 index 000000000..3c42ea7ad --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux netbsd + +package socket + +import "net" + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + for i := range hs { + vs := make([]iovec, len(ms[i].Buffers)) + var sa []byte + if parseFn != nil { + sa = make([]byte, sizeofSockaddrInet6) + } + if marshalFn != nil { + sa = marshalFn(ms[i].Addr) + } + hs[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa) + } + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + for i := range hs { + ms[i].N = int(hs[i].Len) + ms[i].NN = hs[i].Hdr.controllen() + ms[i].Flags = hs[i].Hdr.flags() + if parseFn != nil { + var err error + ms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint) + if err != nil { + return err + } + } + } + return nil +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/fn/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go new file mode 100644 index 000000000..5567afc88 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -0,0 +1,39 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.Control = (*byte)(unsafe.Pointer(&oob[0])) + h.Controllen = uint32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/fn/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go new file mode 100644 index 000000000..3fcb04280 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -0,0 +1,12 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package socket + +func (h *msghdr) setIov(vs []iovec) { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/fn/vendor/golang.org/x/net/internal/socket/msghdr_linux.go new file mode 100644 index 000000000..5a38798cc --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.setControl(oob) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/fn/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go new file mode 100644 index 000000000..9f671aec0 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + h.Iov = &vs[0] + h.Iovlen = uint32(len(vs)) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint32(len(b)) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/fn/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go new file mode 100644 index 000000000..9f7870621 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + h.Iov = &vs[0] + h.Iovlen = uint64(len(vs)) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint64(len(b)) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go b/fn/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go new file mode 100644 index 000000000..be354ff84 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func (h *msghdr) setIov(vs []iovec) { + h.Iov = &vs[0] + h.Iovlen = uint32(len(vs)) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/fn/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go new file mode 100644 index 000000000..d1b059397 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -0,0 +1,34 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + if len(oob) > 0 { + h.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + h.Accrightslen = int32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) controllen() int { + return int(h.Accrightslen) +} + +func (h *msghdr) flags() int { + return int(NativeEndian.Uint32(h.Pad_cgo_2[:])) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/fn/vendor/golang.org/x/net/internal/socket/msghdr_stub.go new file mode 100644 index 000000000..64e817335 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type msghdr struct{} + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {} +func (h *msghdr) name() []byte { return nil } +func (h *msghdr) controllen() int { return 0 } +func (h *msghdr) flags() int { return 0 } diff --git a/fn/vendor/golang.org/x/net/internal/socket/rawconn.go b/fn/vendor/golang.org/x/net/internal/socket/rawconn.go new file mode 100644 index 000000000..d6871d55f --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -0,0 +1,66 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "syscall" +) + +// A Conn represents a raw connection. +type Conn struct { + network string + c syscall.RawConn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + var err error + var cc Conn + switch c := c.(type) { + case *net.TCPConn: + cc.network = "tcp" + cc.c, err = c.SyscallConn() + case *net.UDPConn: + cc.network = "udp" + cc.c, err = c.SyscallConn() + case *net.IPConn: + cc.network = "ip" + cc.c, err = c.SyscallConn() + default: + return nil, errors.New("unknown connection type") + } + if err != nil { + return nil, err + } + return &cc, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + var operr error + var n int + fn := func(s uintptr) { + n, operr = getsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return 0, err + } + return n, os.NewSyscallError("getsockopt", operr) +} + +func (o *Option) set(c *Conn, b []byte) error { + var operr error + fn := func(s uintptr) { + operr = setsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return err + } + return os.NewSyscallError("setsockopt", operr) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/fn/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go new file mode 100644 index 000000000..499164a3f --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -0,0 +1,74 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build linux + +package socket + +import ( + "net" + "os" + "syscall" +) + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var parseFn func([]byte, string) (net.Addr, error) + if c.network != "tcp" { + parseFn = parseInetAddr + } + if err := hs.pack(ms, parseFn, nil); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("recvmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil { + return n, err + } + return n, nil +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var marshalFn func(net.Addr) []byte + if c.network != "tcp" { + marshalFn = marshalInetAddr + } + if err := hs.pack(ms, nil, marshalFn); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("sendmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], nil, ""); err != nil { + return n, err + } + return n, nil +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/fn/vendor/golang.org/x/net/internal/socket/rawconn_msg.go new file mode 100644 index 000000000..b21d2e641 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "os" + "syscall" +) + +func (c *Conn) recvMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if c.network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("recvmsg", operr) + } + if c.network != "tcp" { + var err error + m.Addr, err = parseInetAddr(sa[:], c.network) + if err != nil { + return err + } + } + m.N = n + m.NN = h.controllen() + m.Flags = h.flags() + return nil +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if m.Addr != nil { + sa = marshalInetAddr(m.Addr) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("sendmsg", operr) + } + m.N = n + m.NN = len(m.OOB) + return nil +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/fn/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go new file mode 100644 index 000000000..f78832aa4 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !linux + +package socket + +import "errors" + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/fn/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go new file mode 100644 index 000000000..96733cbe1 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/rawconn_stub.go b/fn/vendor/golang.org/x/net/internal/socket/rawconn_stub.go new file mode 100644 index 000000000..d2add1a0a --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/rawconn_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/reflect.go b/fn/vendor/golang.org/x/net/internal/socket/reflect.go new file mode 100644 index 000000000..bb179f11d --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/reflect.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "reflect" + "runtime" +) + +// A Conn represents a raw connection. +type Conn struct { + c net.Conn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + return &Conn{c: c}, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + s, err := socketOf(c.c) + if err != nil { + return 0, err + } + n, err := getsockopt(s, o.Level, o.Name, b) + return n, os.NewSyscallError("getsockopt", err) +} + +func (o *Option) set(c *Conn, b []byte) error { + s, err := socketOf(c.c) + if err != nil { + return err + } + return os.NewSyscallError("setsockopt", setsockopt(s, o.Level, o.Name, b)) +} + +func socketOf(c net.Conn) (uintptr, error) { + switch c.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + v := reflect.ValueOf(c) + switch e := v.Elem(); e.Kind() { + case reflect.Struct: + fd := e.FieldByName("conn").FieldByName("fd") + switch e := fd.Elem(); e.Kind() { + case reflect.Struct: + sysfd := e.FieldByName("sysfd") + if runtime.GOOS == "windows" { + return uintptr(sysfd.Uint()), nil + } + return uintptr(sysfd.Int()), nil + } + } + } + return 0, errors.New("invalid type") +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/socket.go b/fn/vendor/golang.org/x/net/internal/socket/socket.go new file mode 100644 index 000000000..729dea14b --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/socket.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socket provides a portable interface for socket system +// calls. +package socket // import "golang.org/x/net/internal/socket" + +import ( + "errors" + "net" + "unsafe" +) + +// An Option represents a sticky socket option. +type Option struct { + Level int // level + Name int // name; must be equal or greater than 1 + Len int // length of value in bytes; must be equal or greater than 1 +} + +// Get reads a value for the option from the kernel. +// It returns the number of bytes written into b. +func (o *Option) Get(c *Conn, b []byte) (int, error) { + if o.Name < 1 || o.Len < 1 { + return 0, errors.New("invalid option") + } + if len(b) < o.Len { + return 0, errors.New("short buffer") + } + return o.get(c, b) +} + +// GetInt returns an integer value for the option. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) GetInt(c *Conn) (int, error) { + if o.Len != 1 && o.Len != 4 { + return 0, errors.New("invalid option") + } + var b []byte + var bb [4]byte + if o.Len == 1 { + b = bb[:1] + } else { + b = bb[:4] + } + n, err := o.get(c, b) + if err != nil { + return 0, err + } + if n != o.Len { + return 0, errors.New("invalid option length") + } + if o.Len == 1 { + return int(b[0]), nil + } + return int(NativeEndian.Uint32(b[:4])), nil +} + +// Set writes the option and value to the kernel. +func (o *Option) Set(c *Conn, b []byte) error { + if o.Name < 1 || o.Len < 1 { + return errors.New("invalid option") + } + if len(b) < o.Len { + return errors.New("short buffer") + } + return o.set(c, b) +} + +// SetInt writes the option and value to the kernel. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) SetInt(c *Conn, v int) error { + if o.Len != 1 && o.Len != 4 { + return errors.New("invalid option") + } + var b []byte + if o.Len == 1 { + b = []byte{byte(v)} + } else { + var bb [4]byte + NativeEndian.PutUint32(bb[:o.Len], uint32(v)) + b = bb[:4] + } + return o.set(c, b) +} + +func controlHeaderLen() int { + return roundup(sizeofCmsghdr) +} + +func controlMessageLen(dataLen int) int { + return roundup(sizeofCmsghdr) + dataLen +} + +// ControlMessageSpace returns the whole length of control message. +func ControlMessageSpace(dataLen int) int { + return roundup(sizeofCmsghdr) + roundup(dataLen) +} + +// A ControlMessage represents the head message in a stream of control +// messages. +// +// A control message comprises of a header, data and a few padding +// fields to conform to the interface to the kernel. +// +// See RFC 3542 for further information. +type ControlMessage []byte + +// Data returns the data field of the control message at the head on +// w. +func (m ControlMessage) Data(dataLen int) []byte { + l := controlHeaderLen() + if len(m) < l || len(m) < l+dataLen { + return nil + } + return m[l : l+dataLen] +} + +// Next returns the control message at the next on w. +// +// Next works only for standard control messages. +func (m ControlMessage) Next(dataLen int) ControlMessage { + l := ControlMessageSpace(dataLen) + if len(m) < l { + return nil + } + return m[l:] +} + +// MarshalHeader marshals the header fields of the control message at +// the head on w. +func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { + if len(m) < controlHeaderLen() { + return errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(dataLen), lvl, typ) + return nil +} + +// ParseHeader parses and returns the header fields of the control +// message at the head on w. +func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { + l := controlHeaderLen() + if len(m) < l { + return 0, 0, 0, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil +} + +// Marshal marshals the control message at the head on w, and returns +// the next control message. +func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) { + l := len(data) + if len(m) < ControlMessageSpace(l) { + return nil, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(l), lvl, typ) + if l > 0 { + copy(m.Data(l), data) + } + return m.Next(l), nil +} + +// Parse parses w as a single or multiple control messages. +// +// Parse works for both standard and compatible messages. +func (m ControlMessage) Parse() ([]ControlMessage, error) { + var ms []ControlMessage + for len(m) >= controlHeaderLen() { + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + l := h.len() + if l <= 0 { + return nil, errors.New("invalid header length") + } + if uint64(l) < uint64(controlHeaderLen()) { + return nil, errors.New("invalid message length") + } + if uint64(l) > uint64(len(m)) { + return nil, errors.New("short buffer") + } + // On message reception: + // + // |<- ControlMessageSpace --------------->| + // |<- controlMessageLen ---------->| | + // |<- controlHeaderLen ->| | | + // +---------------+------+---------+------+ + // | Header | PadH | Data | PadD | + // +---------------+------+---------+------+ + // + // On compatible message reception: + // + // | ... |<- controlMessageLen ----------->| + // | ... |<- controlHeaderLen ->| | + // +-----+---------------+------+----------+ + // | ... | Header | PadH | Data | + // +-----+---------------+------+----------+ + ms = append(ms, ControlMessage(m[:l])) + ll := l - controlHeaderLen() + if len(m) >= ControlMessageSpace(ll) { + m = m[ControlMessageSpace(ll):] + } else { + m = m[controlMessageLen(ll):] + } + } + return ms, nil +} + +// NewControlMessage returns a new stream of control messages. +func NewControlMessage(dataLen []int) ControlMessage { + var l int + for i := range dataLen { + l += ControlMessageSpace(dataLen[i]) + } + return make([]byte, l) +} + +// A Message represents an IO message. +type Message struct { + // When writing, the Buffers field must contain at least one + // byte to write. + // When reading, the Buffers field will always contain a byte + // to read. + Buffers [][]byte + + // OOB contains protocol-specific control or miscellaneous + // ancillary data known as out-of-band data. + OOB []byte + + // Addr specifies a destination address when writing. + // It can be nil when the underlying protocol of the raw + // connection uses connection-oriented communication. + // After a successful read, it may contain the source address + // on the received packet. + Addr net.Addr + + N int // # of bytes read or written from/to Buffers + NN int // # of bytes read or written from/to OOB + Flags int // protocol-specific information on the received message +} + +// RecvMsg wraps recvmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +func (c *Conn) RecvMsg(m *Message, flags int) error { + return c.recvMsg(m, flags) +} + +// SendMsg wraps sendmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +func (c *Conn) SendMsg(m *Message, flags int) error { + return c.sendMsg(m, flags) +} + +// RecvMsgs wraps recvmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// Only Linux supports this. +func (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) { + return c.recvMsgs(ms, flags) +} + +// SendMsgs wraps sendmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// Only Linux supports this. +func (c *Conn) SendMsgs(ms []Message, flags int) (int, error) { + return c.sendMsgs(ms, flags) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go b/fn/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go new file mode 100644 index 000000000..109fed762 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go @@ -0,0 +1,256 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socket" +) + +type mockControl struct { + Level int + Type int + Data []byte +} + +func TestControlMessage(t *testing.T) { + for _, tt := range []struct { + cs []mockControl + }{ + { + []mockControl{ + {Level: 1, Type: 1}, + }, + }, + { + []mockControl{ + {Level: 2, Type: 2, Data: []byte{0xfe}}, + }, + }, + { + []mockControl{ + {Level: 3, Type: 3, Data: []byte{0xfe, 0xff, 0xff, 0xfe}}, + }, + }, + { + []mockControl{ + {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, + }, + }, + { + []mockControl{ + {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, + {Level: 2, Type: 2, Data: []byte{0xfe}}, + }, + }, + } { + var w []byte + var tailPadLen int + mm := socket.NewControlMessage([]int{0}) + for i, c := range tt.cs { + m := socket.NewControlMessage([]int{len(c.Data)}) + l := len(m) - len(mm) + if i == len(tt.cs)-1 && l > len(c.Data) { + tailPadLen = l - len(c.Data) + } + w = append(w, m...) + } + + var err error + ww := make([]byte, len(w)) + copy(ww, w) + m := socket.ControlMessage(ww) + for _, c := range tt.cs { + if err = m.MarshalHeader(c.Level, c.Type, len(c.Data)); err != nil { + t.Fatalf("(%v).MarshalHeader() = %v", tt.cs, err) + } + copy(m.Data(len(c.Data)), c.Data) + m = m.Next(len(c.Data)) + } + m = socket.ControlMessage(w) + for _, c := range tt.cs { + m, err = m.Marshal(c.Level, c.Type, c.Data) + if err != nil { + t.Fatalf("(%v).Marshal() = %v", tt.cs, err) + } + } + if !bytes.Equal(ww, w) { + t.Fatalf("got %#v; want %#v", ww, w) + } + + ws := [][]byte{w} + if tailPadLen > 0 { + // Test a message with no tail padding. + nopad := w[:len(w)-tailPadLen] + ws = append(ws, [][]byte{nopad}...) + } + for _, w := range ws { + ms, err := socket.ControlMessage(w).Parse() + if err != nil { + t.Fatalf("(%v).Parse() = %v", tt.cs, err) + } + for i, m := range ms { + lvl, typ, dataLen, err := m.ParseHeader() + if err != nil { + t.Fatalf("(%v).ParseHeader() = %v", tt.cs, err) + } + if lvl != tt.cs[i].Level || typ != tt.cs[i].Type || dataLen != len(tt.cs[i].Data) { + t.Fatalf("%v: got %d, %d, %d; want %d, %d, %d", tt.cs[i], lvl, typ, dataLen, tt.cs[i].Level, tt.cs[i].Type, len(tt.cs[i].Data)) + } + } + } + } +} + +func TestUDP(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + + t.Run("Message", func(t *testing.T) { + testUDPMessage(t, c.(net.Conn)) + }) + switch runtime.GOOS { + case "linux": + t.Run("Messages", func(t *testing.T) { + testUDPMessages(t, c.(net.Conn)) + }) + } +} + +func testUDPMessage(t *testing.T, c net.Conn) { + cc, err := socket.NewConn(c) + if err != nil { + t.Fatal(err) + } + data := []byte("HELLO-R-U-THERE") + wm := socket.Message{ + Buffers: bytes.SplitAfter(data, []byte("-")), + Addr: c.LocalAddr(), + } + if err := cc.SendMsg(&wm, 0); err != nil { + t.Fatal(err) + } + b := make([]byte, 32) + rm := socket.Message{ + Buffers: [][]byte{b[:1], b[1:3], b[3:7], b[7:11], b[11:]}, + } + if err := cc.RecvMsg(&rm, 0); err != nil { + t.Fatal(err) + } + if !bytes.Equal(b[:rm.N], data) { + t.Fatalf("got %#v; want %#v", b[:rm.N], data) + } +} + +func testUDPMessages(t *testing.T, c net.Conn) { + cc, err := socket.NewConn(c) + if err != nil { + t.Fatal(err) + } + data := []byte("HELLO-R-U-THERE") + wmbs := bytes.SplitAfter(data, []byte("-")) + wms := []socket.Message{ + {Buffers: wmbs[:1], Addr: c.LocalAddr()}, + {Buffers: wmbs[1:], Addr: c.LocalAddr()}, + } + n, err := cc.SendMsgs(wms, 0) + if err != nil { + t.Fatal(err) + } + if n != len(wms) { + t.Fatalf("got %d; want %d", n, len(wms)) + } + b := make([]byte, 32) + rmbs := [][][]byte{{b[:len(wmbs[0])]}, {b[len(wmbs[0]):]}} + rms := []socket.Message{ + {Buffers: rmbs[0]}, + {Buffers: rmbs[1]}, + } + n, err = cc.RecvMsgs(rms, 0) + if err != nil { + t.Fatal(err) + } + if n != len(rms) { + t.Fatalf("got %d; want %d", n, len(rms)) + } + nn := 0 + for i := 0; i < n; i++ { + nn += rms[i].N + } + if !bytes.Equal(b[:nn], data) { + t.Fatalf("got %#v; want %#v", b[:nn], data) + } +} + +func BenchmarkUDP(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + b.Fatal(err) + } + data := []byte("HELLO-R-U-THERE") + wm := socket.Message{ + Buffers: [][]byte{data}, + Addr: c.LocalAddr(), + } + rm := socket.Message{ + Buffers: [][]byte{make([]byte, 128)}, + OOB: make([]byte, 128), + } + + for M := 1; M <= 1<<9; M = M << 1 { + b.Run(fmt.Sprintf("Iter-%d", M), func(b *testing.B) { + for i := 0; i < b.N; i++ { + for j := 0; j < M; j++ { + if err := cc.SendMsg(&wm, 0); err != nil { + b.Fatal(err) + } + if err := cc.RecvMsg(&rm, 0); err != nil { + b.Fatal(err) + } + } + } + }) + switch runtime.GOOS { + case "linux": + wms := make([]socket.Message, M) + for i := range wms { + wms[i].Buffers = [][]byte{data} + wms[i].Addr = c.LocalAddr() + } + rms := make([]socket.Message, M) + for i := range rms { + rms[i].Buffers = [][]byte{make([]byte, 128)} + rms[i].OOB = make([]byte, 128) + } + b.Run(fmt.Sprintf("Batch-%d", M), func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := cc.SendMsgs(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := cc.RecvMsgs(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + } + } +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/socket_test.go b/fn/vendor/golang.org/x/net/internal/socket/socket_test.go new file mode 100644 index 000000000..bf3751b5e --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/socket_test.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket_test + +import ( + "net" + "runtime" + "syscall" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socket" +) + +func TestSocket(t *testing.T) { + t.Run("Option", func(t *testing.T) { + testSocketOption(t, &socket.Option{Level: syscall.SOL_SOCKET, Name: syscall.SO_RCVBUF, Len: 4}) + }) +} + +func testSocketOption(t *testing.T, so *socket.Option) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + t.Fatal(err) + } + const N = 2048 + if err := so.SetInt(cc, N); err != nil { + t.Fatal(err) + } + n, err := so.GetInt(cc) + if err != nil { + t.Fatal(err) + } + if n < N { + t.Fatalf("got %d; want greater than or equal to %d", n, N) + } +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys.go b/fn/vendor/golang.org/x/net/internal/socket/sys.go new file mode 100644 index 000000000..4f0eead13 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // NativeEndian is the machine native endian implementation of + // ByteOrder. + NativeEndian binary.ByteOrder + + kernelAlign int +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } + kernelAlign = probeProtocolStack() +} + +func roundup(l int) int { + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/fn/vendor/golang.org/x/net/internal/socket/sys_bsd.go new file mode 100644 index 000000000..f13e14ff3 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd openbsd + +package socket + +import "errors" + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/fn/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go new file mode 100644 index 000000000..f723fa36a --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd netbsd openbsd + +package socket + +import "unsafe" + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_darwin.go b/fn/vendor/golang.org/x/net/internal/socket/sys_darwin.go new file mode 100644 index 000000000..b17d223bf --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_darwin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go b/fn/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go new file mode 100644 index 000000000..b17d223bf --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux.go new file mode 100644 index 000000000..1559521e0 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!s390x,!386 + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_386.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_386.go new file mode 100644 index 000000000..235b2cc08 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_386.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 4 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/fn/vendor/golang.org/x/net/ipv6/thunk_linux_386.s b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_386.s similarity index 59% rename from fn/vendor/golang.org/x/net/ipv6/thunk_linux_386.s rename to fn/vendor/golang.org/x/net/internal/socket/sys_linux_386.s index daa78bc02..93e7d75ec 100644 --- a/fn/vendor/golang.org/x/net/ipv6/thunk_linux_386.s +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_386.s @@ -2,7 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.2 +#include "textflag.h" -TEXT ·socketcall(SB),4,$0-36 +TEXT ·socketcall(SB),NOSPLIT,$0-36 JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go new file mode 100644 index 000000000..9decee2e5 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x12b + sysSENDMMSG = 0x133 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go new file mode 100644 index 000000000..d753b436d --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x16d + sysSENDMMSG = 0x176 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go new file mode 100644 index 000000000..b67089436 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go new file mode 100644 index 000000000..9c0d74014 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go new file mode 100644 index 000000000..071a4aba8 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go new file mode 100644 index 000000000..071a4aba8 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go new file mode 100644 index 000000000..9c0d74014 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go new file mode 100644 index 000000000..21c1e3f00 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go new file mode 100644 index 000000000..21c1e3f00 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go new file mode 100644 index 000000000..327979efb --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 8 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s new file mode 100644 index 000000000..06d75628c --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-72 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-72 + JMP syscall·rawsocketcall(SB) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_netbsd.go b/fn/vendor/golang.org/x/net/internal/socket/sys_netbsd.go new file mode 100644 index 000000000..431851c12 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_netbsd.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +const ( + sysRECVMMSG = 0x1db + sysSENDMMSG = 0x1dc +) + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_posix.go b/fn/vendor/golang.org/x/net/internal/socket/sys_posix.go new file mode 100644 index 000000000..9a0dbcfb9 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -0,0 +1,168 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + "strconv" + "sync" + "time" +) + +func marshalInetAddr(a net.Addr) []byte { + switch a := a.(type) { + case *net.TCPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.UDPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.IPAddr: + return marshalSockaddr(a.IP, 0, a.Zone) + default: + return nil + } +} + +func marshalSockaddr(ip net.IP, port int, zone string) []byte { + if ip4 := ip.To4(); ip4 != nil { + b := make([]byte, sizeofSockaddrInet) + switch runtime.GOOS { + case "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) + default: + b[0] = sizeofSockaddrInet + b[1] = sysAF_INET + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[4:8], ip4) + return b + } + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + b := make([]byte, sizeofSockaddrInet6) + switch runtime.GOOS { + case "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) + default: + b[0] = sizeofSockaddrInet6 + b[1] = sysAF_INET6 + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[8:24], ip6) + if zone != "" { + NativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone))) + } + return b + } + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + if len(b) < 2 { + return nil, errors.New("invalid address") + } + var af int + switch runtime.GOOS { + case "linux", "solaris", "windows": + af = int(NativeEndian.Uint16(b[:2])) + default: + af = int(b[1]) + } + var ip net.IP + var zone string + if af == sysAF_INET { + if len(b) < sizeofSockaddrInet { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv4len) + copy(ip, b[4:8]) + } + if af == sysAF_INET6 { + if len(b) < sizeofSockaddrInet6 { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv6len) + copy(ip, b[8:24]) + if id := int(NativeEndian.Uint32(b[24:28])); id > 0 { + zone = zoneCache.name(id) + } + } + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + default: + return &net.IPAddr{IP: ip, Zone: zone}, nil + } +} + +// An ipv6ZoneCache represents a cache holding partial network +// interface information. It is used for reducing the cost of IPv6 +// addressing scope zone resolution. +// +// Multiple names sharing the index are managed by first-come +// first-served basis for consistency. +type ipv6ZoneCache struct { + sync.RWMutex // guard the following + lastFetched time.Time // last time routing information was fetched + toIndex map[string]int // interface name to its index + toName map[int]string // interface index to its name +} + +var zoneCache = ipv6ZoneCache{ + toIndex: make(map[string]int), + toName: make(map[int]string), +} + +func (zc *ipv6ZoneCache) update(ift []net.Interface) { + zc.Lock() + defer zc.Unlock() + now := time.Now() + if zc.lastFetched.After(now.Add(-60 * time.Second)) { + return + } + zc.lastFetched = now + if len(ift) == 0 { + var err error + if ift, err = net.Interfaces(); err != nil { + return + } + } + zc.toIndex = make(map[string]int, len(ift)) + zc.toName = make(map[int]string, len(ift)) + for _, ifi := range ift { + zc.toIndex[ifi.Name] = ifi.Index + if _, ok := zc.toName[ifi.Index]; !ok { + zc.toName[ifi.Index] = ifi.Name + } + } +} + +func (zc *ipv6ZoneCache) name(zone int) string { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + name, ok := zoneCache.toName[zone] + if !ok { + name = strconv.Itoa(zone) + } + return name +} + +func (zc *ipv6ZoneCache) index(zone string) int { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + index, ok := zoneCache.toIndex[zone] + if !ok { + index, _ = strconv.Atoi(zone) + } + return index +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/fn/vendor/golang.org/x/net/internal/socket/sys_solaris.go new file mode 100644 index 000000000..cced74e60 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "runtime" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" + +//go:linkname procGetsockopt libc___xnet_getsockopt +//go:linkname procSetsockopt libc_setsockopt +//go:linkname procRecvmsg libc___xnet_recvmsg +//go:linkname procSendmsg libc___xnet_sendmsg + +var ( + procGetsockopt uintptr + procSetsockopt uintptr + procRecvmsg uintptr + procSendmsg uintptr +) + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s b/fn/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s new file mode 100644 index 000000000..a18ac5ed7 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_stub.go b/fn/vendor/golang.org/x/net/internal/socket/sys_stub.go new file mode 100644 index 000000000..d9f06d00e --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import ( + "errors" + "net" + "runtime" + "unsafe" +) + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64p32", "mips64p32": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +func marshalInetAddr(ip net.IP, port int, zone string) []byte { + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + return nil, errors.New("not implemented") +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + return 0, errors.New("not implemented") +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return errors.New("not implemented") +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_unix.go b/fn/vendor/golang.org/x/net/internal/socket/sys_unix.go new file mode 100644 index 000000000..18eba3085 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!s390x,!386 netbsd openbsd + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/sys_windows.go b/fn/vendor/golang.org/x/net/internal/socket/sys_windows.go new file mode 100644 index 000000000..54a470ebe --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x17 + + sysSOCK_RAW = 0x3 +) + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l))) + return int(l), err +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go new file mode 100644 index 000000000..26f8feff3 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go new file mode 100644 index 000000000..e2987f7db --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go new file mode 100644 index 000000000..26f8feff3 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go new file mode 100644 index 000000000..c582abd57 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go new file mode 100644 index 000000000..04a24886c --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go new file mode 100644 index 000000000..35c7cb9c9 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go new file mode 100644 index 000000000..04a24886c --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go new file mode 100644 index 000000000..430206930 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go new file mode 100644 index 000000000..430206930 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go new file mode 100644 index 000000000..430206930 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go new file mode 100644 index 000000000..430206930 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go new file mode 100644 index 000000000..db60491fe --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go new file mode 100644 index 000000000..2a1a79985 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -0,0 +1,68 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go new file mode 100644 index 000000000..206ea2d11 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go new file mode 100644 index 000000000..1c836361e --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go new file mode 100644 index 000000000..a6c0bf464 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go new file mode 100644 index 000000000..1c836361e --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/fn/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go new file mode 100644 index 000000000..327c63290 --- /dev/null +++ b/fn/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -0,0 +1,60 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_RAW = 0x4 +) + +type iovec struct { + Base *int8 + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x20 +) diff --git a/fn/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/fn/vendor/golang.org/x/net/internal/timeseries/timeseries.go index 1119f3448..685f0e7ea 100644 --- a/fn/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ b/fn/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -371,7 +371,7 @@ func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observabl } } - // Failed to find a level that covers the desired range. So just + // Failed to find a level that covers the desired range. So just // extract from the last level, even if it doesn't cover the entire // desired range. ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) diff --git a/fn/vendor/golang.org/x/net/ipv4/batch.go b/fn/vendor/golang.org/x/net/ipv4/batch.go new file mode 100644 index 000000000..b44549928 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/batch.go @@ -0,0 +1,191 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// RawConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +// +// Unlike the ReadFrom method, it doesn't strip the IPv4 header +// followed by option headers from the received IPv4 datagram when the +// underlying transport is net.IPConn. Each Buffers field of Message +// must be large enough to accommodate an IPv4 header and option +// headers. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/fn/vendor/golang.org/x/net/ipv4/bpf_test.go b/fn/vendor/golang.org/x/net/ipv4/bpf_test.go new file mode 100644 index 000000000..b44da9054 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/bpf_test.go @@ -0,0 +1,93 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + + l, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv4.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp4", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/fn/vendor/golang.org/x/net/ipv4/control.go b/fn/vendor/golang.org/x/net/ipv4/control.go index 8cadfd7f3..a2b02ca95 100644 --- a/fn/vendor/golang.org/x/net/ipv4/control.go +++ b/fn/vendor/golang.org/x/net/ipv4/control.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,6 +8,9 @@ import ( "fmt" "net" "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) type rawOpt struct { @@ -51,6 +54,77 @@ func (cm *ControlMessage) String() string { return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) } +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var m socket.ControlMessage + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + m = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length}) + } + if len(m) > 0 { + ctlOpts[ctlPacketInfo].marshal(m, cm) + } + return m +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIP { + continue + } + switch { + case typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length: + ctlOpts[ctlTTL].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length: + ctlOpts[ctlDst].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length: + ctlOpts[ctlInterface].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlInterface].length) + } + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + // Ancillary data socket options const ( ctlTTL = iota // header field diff --git a/fn/vendor/golang.org/x/net/ipv4/control_bsd.go b/fn/vendor/golang.org/x/net/ipv4/control_bsd.go index 33d8bc8b3..77e7ad5be 100644 --- a/fn/vendor/golang.org/x/net/ipv4/control_bsd.go +++ b/fn/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,26 +12,26 @@ import ( "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) func marshalDst(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIP - m.Type = sysIP_RECVDSTADDR - m.SetLen(syscall.CmsgLen(net.IPv4len)) - return b[syscall.CmsgSpace(net.IPv4len):] + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len) + return m.Next(net.IPv4len) } func parseDst(cm *ControlMessage, b []byte) { - cm.Dst = b[:net.IPv4len] + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, b[:net.IPv4len]) } func marshalInterface(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIP - m.Type = sysIP_RECVIF - m.SetLen(syscall.CmsgLen(syscall.SizeofSockaddrDatalink)) - return b[syscall.CmsgSpace(syscall.SizeofSockaddrDatalink):] + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink) + return m.Next(syscall.SizeofSockaddrDatalink) } func parseInterface(cm *ControlMessage, b []byte) { diff --git a/fn/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/fn/vendor/golang.org/x/net/ipv4/control_pktinfo.go index 444782f39..425338f35 100644 --- a/fn/vendor/golang.org/x/net/ipv4/control_pktinfo.go +++ b/fn/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -2,24 +2,23 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin linux +// +build darwin linux solaris package ipv4 import ( - "syscall" + "net" "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIP - m.Type = sysIP_PKTINFO - m.SetLen(syscall.CmsgLen(sysSizeofInetPktinfo)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) if cm != nil { - pi := (*sysInetPktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) if ip := cm.Src.To4(); ip != nil { copy(pi.Spec_dst[:], ip) } @@ -27,11 +26,14 @@ func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { pi.setIfindex(cm.IfIndex) } } - return b[syscall.CmsgSpace(sysSizeofInetPktinfo):] + return m.Next(sizeofInetPktinfo) } func parsePacketInfo(cm *ControlMessage, b []byte) { - pi := (*sysInetPktinfo)(unsafe.Pointer(&b[0])) + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) cm.IfIndex = int(pi.Ifindex) - cm.Dst = pi.Addr[:] + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) } diff --git a/fn/vendor/golang.org/x/net/ipv4/control_stub.go b/fn/vendor/golang.org/x/net/ipv4/control_stub.go index 4d8507194..5a2f7d8d3 100644 --- a/fn/vendor/golang.org/x/net/ipv4/control_stub.go +++ b/fn/vendor/golang.org/x/net/ipv4/control_stub.go @@ -1,23 +1,13 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 -func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { return errOpNoSupport } - -func newControlMessage(opt *rawOpt) []byte { - return nil -} - -func parseControlMessage(b []byte) (*ControlMessage, error) { - return nil, errOpNoSupport -} - -func marshalControlMessage(cm *ControlMessage) []byte { - return nil -} diff --git a/fn/vendor/golang.org/x/net/ipv4/control_test.go b/fn/vendor/golang.org/x/net/ipv4/control_test.go new file mode 100644 index 000000000..f87fe124b --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/control_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "testing" + + "golang.org/x/net/ipv4" +) + +func TestControlMessageParseWithFuzz(t *testing.T) { + var cm ipv4.ControlMessage + for _, fuzz := range []string{ + "\f\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00", + "\f\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00", + } { + cm.Parse([]byte(fuzz)) + } +} diff --git a/fn/vendor/golang.org/x/net/ipv4/control_unix.go b/fn/vendor/golang.org/x/net/ipv4/control_unix.go index 3000c52e4..e1ae8167b 100644 --- a/fn/vendor/golang.org/x/net/ipv4/control_unix.go +++ b/fn/vendor/golang.org/x/net/ipv4/control_unix.go @@ -1,24 +1,23 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd +// +build darwin dragonfly freebsd linux netbsd openbsd solaris package ipv4 import ( - "os" - "syscall" "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) -func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { opt.Lock() defer opt.Unlock() - if cf&FlagTTL != 0 && sockOpts[ssoReceiveTTL].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceiveTTL], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -27,9 +26,9 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { opt.clear(FlagTTL) } } - if sockOpts[ssoPacketInfo].name > 0 { + if so, ok := sockOpts[ssoPacketInfo]; ok { if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { - if err := setInt(fd, &sockOpts[ssoPacketInfo], boolint(on)); err != nil { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -39,8 +38,8 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { } } } else { - if cf&FlagDst != 0 && sockOpts[ssoReceiveDst].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceiveDst], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -49,8 +48,8 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { opt.clear(FlagDst) } } - if cf&FlagInterface != 0 && sockOpts[ssoReceiveInterface].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceiveInterface], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -63,100 +62,10 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { return nil } -func newControlMessage(opt *rawOpt) (oob []byte) { - opt.RLock() - var l int - if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlTTL].length) - } - if ctlOpts[ctlPacketInfo].name > 0 { - if opt.isset(FlagSrc | FlagDst | FlagInterface) { - l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) - } - } else { - if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlDst].length) - } - if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlInterface].length) - } - } - if l > 0 { - oob = make([]byte, l) - b := oob - if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { - b = ctlOpts[ctlTTL].marshal(b, nil) - } - if ctlOpts[ctlPacketInfo].name > 0 { - if opt.isset(FlagSrc | FlagDst | FlagInterface) { - b = ctlOpts[ctlPacketInfo].marshal(b, nil) - } - } else { - if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { - b = ctlOpts[ctlDst].marshal(b, nil) - } - if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { - b = ctlOpts[ctlInterface].marshal(b, nil) - } - } - } - opt.RUnlock() - return -} - -func parseControlMessage(b []byte) (*ControlMessage, error) { - if len(b) == 0 { - return nil, nil - } - cmsgs, err := syscall.ParseSocketControlMessage(b) - if err != nil { - return nil, os.NewSyscallError("parse socket control message", err) - } - cm := &ControlMessage{} - for _, m := range cmsgs { - if m.Header.Level != iana.ProtocolIP { - continue - } - switch int(m.Header.Type) { - case ctlOpts[ctlTTL].name: - ctlOpts[ctlTTL].parse(cm, m.Data[:]) - case ctlOpts[ctlDst].name: - ctlOpts[ctlDst].parse(cm, m.Data[:]) - case ctlOpts[ctlInterface].name: - ctlOpts[ctlInterface].parse(cm, m.Data[:]) - case ctlOpts[ctlPacketInfo].name: - ctlOpts[ctlPacketInfo].parse(cm, m.Data[:]) - } - } - return cm, nil -} - -func marshalControlMessage(cm *ControlMessage) (oob []byte) { - if cm == nil { - return nil - } - var l int - pktinfo := false - if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { - pktinfo = true - l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) - } - if l > 0 { - oob = make([]byte, l) - b := oob - if pktinfo { - b = ctlOpts[ctlPacketInfo].marshal(b, cm) - } - } - return -} - func marshalTTL(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIP - m.Type = sysIP_RECVTTL - m.SetLen(syscall.CmsgLen(1)) - return b[syscall.CmsgSpace(1):] + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1) + return m.Next(1) } func parseTTL(cm *ControlMessage, b []byte) { diff --git a/fn/vendor/golang.org/x/net/ipv4/control_windows.go b/fn/vendor/golang.org/x/net/ipv4/control_windows.go index 800f63779..ce55c6644 100644 --- a/fn/vendor/golang.org/x/net/ipv4/control_windows.go +++ b/fn/vendor/golang.org/x/net/ipv4/control_windows.go @@ -1,27 +1,16 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv4 -import "syscall" +import ( + "syscall" -func setControlMessage(fd syscall.Handle, opt *rawOpt, cf ControlFlags, on bool) error { + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { // TODO(mikio): implement this return syscall.EWINDOWS } - -func newControlMessage(opt *rawOpt) []byte { - // TODO(mikio): implement this - return nil -} - -func parseControlMessage(b []byte) (*ControlMessage, error) { - // TODO(mikio): implement this - return nil, syscall.EWINDOWS -} - -func marshalControlMessage(cm *ControlMessage) []byte { - // TODO(mikio): implement this - return nil -} diff --git a/fn/vendor/golang.org/x/net/ipv4/defs_darwin.go b/fn/vendor/golang.org/x/net/ipv4/defs_darwin.go index 731d56a71..c8f2e05b8 100644 --- a/fn/vendor/golang.org/x/net/ipv4/defs_darwin.go +++ b/fn/vendor/golang.org/x/net/ipv4/defs_darwin.go @@ -49,29 +49,29 @@ const ( sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo - sysSizeofIPMreq = C.sizeof_struct_ip_mreq - sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn - sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req ) -type sysSockaddrStorage C.struct_sockaddr_storage +type sockaddrStorage C.struct_sockaddr_storage -type sysSockaddrInet C.struct_sockaddr_in +type sockaddrInet C.struct_sockaddr_in -type sysInetPktinfo C.struct_in_pktinfo +type inetPktinfo C.struct_in_pktinfo -type sysIPMreq C.struct_ip_mreq +type ipMreq C.struct_ip_mreq -type sysIPMreqn C.struct_ip_mreqn +type ipMreqn C.struct_ip_mreqn -type sysIPMreqSource C.struct_ip_mreq_source +type ipMreqSource C.struct_ip_mreq_source -type sysGroupReq C.struct_group_req +type groupReq C.struct_group_req -type sysGroupSourceReq C.struct_group_source_req +type groupSourceReq C.struct_group_source_req diff --git a/fn/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/fn/vendor/golang.org/x/net/ipv4/defs_dragonfly.go index 08e3b855d..f30544ea2 100644 --- a/fn/vendor/golang.org/x/net/ipv4/defs_dragonfly.go +++ b/fn/vendor/golang.org/x/net/ipv4/defs_dragonfly.go @@ -32,7 +32,7 @@ const ( sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreq = C.sizeof_struct_ip_mreq ) -type sysIPMreq C.struct_ip_mreq +type ipMreq C.struct_ip_mreq diff --git a/fn/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/fn/vendor/golang.org/x/net/ipv4/defs_freebsd.go index f12ca327b..4dd57d865 100644 --- a/fn/vendor/golang.org/x/net/ipv4/defs_freebsd.go +++ b/fn/vendor/golang.org/x/net/ipv4/defs_freebsd.go @@ -50,26 +50,26 @@ const ( sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sysSizeofIPMreq = C.sizeof_struct_ip_mreq - sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn - sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req ) -type sysSockaddrStorage C.struct_sockaddr_storage +type sockaddrStorage C.struct_sockaddr_storage -type sysSockaddrInet C.struct_sockaddr_in +type sockaddrInet C.struct_sockaddr_in -type sysIPMreq C.struct_ip_mreq +type ipMreq C.struct_ip_mreq -type sysIPMreqn C.struct_ip_mreqn +type ipMreqn C.struct_ip_mreqn -type sysIPMreqSource C.struct_ip_mreq_source +type ipMreqSource C.struct_ip_mreq_source -type sysGroupReq C.struct_group_req +type groupReq C.struct_group_req -type sysGroupSourceReq C.struct_group_source_req +type groupSourceReq C.struct_group_source_req diff --git a/fn/vendor/golang.org/x/net/ipv4/defs_linux.go b/fn/vendor/golang.org/x/net/ipv4/defs_linux.go index fdba148a2..beb11071a 100644 --- a/fn/vendor/golang.org/x/net/ipv4/defs_linux.go +++ b/fn/vendor/golang.org/x/net/ipv4/defs_linux.go @@ -14,6 +14,8 @@ package ipv4 #include #include #include +#include +#include */ import "C" @@ -76,36 +78,45 @@ const ( sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING - sysSizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage - sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo - sysSizeofSockExtendedErr = C.sizeof_struct_sock_extended_err + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER - sysSizeofIPMreq = C.sizeof_struct_ip_mreq - sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn - sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err - sysSizeofICMPFilter = C.sizeof_struct_icmp_filter + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPFilter = C.sizeof_struct_icmp_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog ) -type sysKernelSockaddrStorage C.struct___kernel_sockaddr_storage +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage -type sysSockaddrInet C.struct_sockaddr_in +type sockaddrInet C.struct_sockaddr_in -type sysInetPktinfo C.struct_in_pktinfo +type inetPktinfo C.struct_in_pktinfo -type sysSockExtendedErr C.struct_sock_extended_err +type sockExtendedErr C.struct_sock_extended_err -type sysIPMreq C.struct_ip_mreq +type ipMreq C.struct_ip_mreq -type sysIPMreqn C.struct_ip_mreqn +type ipMreqn C.struct_ip_mreqn -type sysIPMreqSource C.struct_ip_mreq_source +type ipMreqSource C.struct_ip_mreq_source -type sysGroupReq C.struct_group_req +type groupReq C.struct_group_req -type sysGroupSourceReq C.struct_group_source_req +type groupSourceReq C.struct_group_source_req -type sysICMPFilter C.struct_icmp_filter +type icmpFilter C.struct_icmp_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/fn/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/fn/vendor/golang.org/x/net/ipv4/defs_netbsd.go index 8642354f4..8f8af1b89 100644 --- a/fn/vendor/golang.org/x/net/ipv4/defs_netbsd.go +++ b/fn/vendor/golang.org/x/net/ipv4/defs_netbsd.go @@ -31,7 +31,7 @@ const ( sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreq = C.sizeof_struct_ip_mreq ) -type sysIPMreq C.struct_ip_mreq +type ipMreq C.struct_ip_mreq diff --git a/fn/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/fn/vendor/golang.org/x/net/ipv4/defs_openbsd.go index 8642354f4..8f8af1b89 100644 --- a/fn/vendor/golang.org/x/net/ipv4/defs_openbsd.go +++ b/fn/vendor/golang.org/x/net/ipv4/defs_openbsd.go @@ -31,7 +31,7 @@ const ( sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreq = C.sizeof_struct_ip_mreq ) -type sysIPMreq C.struct_ip_mreq +type ipMreq C.struct_ip_mreq diff --git a/fn/vendor/golang.org/x/net/ipv4/defs_solaris.go b/fn/vendor/golang.org/x/net/ipv4/defs_solaris.go index bb74afa49..aeb33e9c8 100644 --- a/fn/vendor/golang.org/x/net/ipv4/defs_solaris.go +++ b/fn/vendor/golang.org/x/net/ipv4/defs_solaris.go @@ -9,30 +9,24 @@ package ipv4 /* +#include + #include */ import "C" const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVSLLA = C.IP_RECVSLLA - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_NEXTHOP = C.IP_NEXTHOP - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_RECVPKTINFO = C.IP_RECVPKTINFO - sysIP_DONTFRAG = C.IP_DONTFRAG - sysIP_BOUND_IF = C.IP_BOUND_IF - sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC - sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL - sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVSLLA = C.IP_RECVSLLA + sysIP_RECVTTL = C.IP_RECVTTL sysIP_MULTICAST_IF = C.IP_MULTICAST_IF sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL @@ -43,15 +37,48 @@ const ( sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_NEXTHOP = C.IP_NEXTHOP - sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + sysIP_DONTFRAG = C.IP_DONTFRAG - sysSizeofIPMreq = C.sizeof_struct_ip_mreq - sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC + sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL + sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF + + sysIP_REUSEADDR = C.IP_REUSEADDR + sysIP_DONTROUTE = C.IP_DONTROUTE + sysIP_BROADCAST = C.IP_BROADCAST + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req ) -type sysInetPktinfo C.struct_in_pktinfo +type sockaddrStorage C.struct_sockaddr_storage -type sysIPMreq C.struct_ip_mreq +type sockaddrInet C.struct_sockaddr_in -type sysIPMreqSource C.struct_ip_mreq_source +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/fn/vendor/golang.org/x/net/ipv4/dgramopt_posix.go b/fn/vendor/golang.org/x/net/ipv4/dgramopt.go similarity index 70% rename from fn/vendor/golang.org/x/net/ipv4/dgramopt_posix.go rename to fn/vendor/golang.org/x/net/ipv4/dgramopt.go index 103c4f6da..54d77d5fe 100644 --- a/fn/vendor/golang.org/x/net/ipv4/dgramopt_posix.go +++ b/fn/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -1,14 +1,14 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd windows - package ipv4 import ( "net" "syscall" + + "golang.org/x/net/bpf" ) // MulticastTTL returns the time-to-live field value for outgoing @@ -17,11 +17,11 @@ func (c *dgramOpt) MulticastTTL() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoMulticastTTL]) + return so.GetInt(c.Conn) } // SetMulticastTTL sets the time-to-live field value for future @@ -30,11 +30,11 @@ func (c *dgramOpt) SetMulticastTTL(ttl int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoMulticastTTL], ttl) + return so.SetInt(c.Conn, ttl) } // MulticastInterface returns the default interface for multicast @@ -43,11 +43,11 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { if !c.ok() { return nil, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return nil, err + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport } - return getInterface(fd, &sockOpts[ssoMulticastInterface]) + return so.getMulticastInterface(c.Conn) } // SetMulticastInterface sets the default interface for future @@ -56,11 +56,11 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport } - return setInterface(fd, &sockOpts[ssoMulticastInterface], ifi) + return so.setMulticastInterface(c.Conn, ifi) } // MulticastLoopback reports whether transmitted multicast packets @@ -69,11 +69,11 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { if !c.ok() { return false, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return false, err + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport } - on, err := getInt(fd, &sockOpts[ssoMulticastLoopback]) + on, err := so.GetInt(c.Conn) if err != nil { return false, err } @@ -86,11 +86,11 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoMulticastLoopback], boolint(on)) + return so.SetInt(c.Conn, boolint(on)) } // JoinGroup joins the group address group on the interface ifi. @@ -106,15 +106,15 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { return errMissingAddress } - return setGroup(fd, &sockOpts[ssoJoinGroup], ifi, grp) + return so.setGroup(c.Conn, ifi, grp) } // LeaveGroup leaves the group address group on the interface ifi @@ -124,15 +124,15 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { return errMissingAddress } - return setGroup(fd, &sockOpts[ssoLeaveGroup], ifi, grp) + return so.setGroup(c.Conn, ifi, grp) } // JoinSourceSpecificGroup joins the source-specific group comprising @@ -145,9 +145,9 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { @@ -157,7 +157,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoJoinSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // LeaveSourceSpecificGroup leaves the source-specific group on the @@ -166,9 +166,9 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { @@ -178,7 +178,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // ExcludeSourceSpecificGroup excludes the source-specific group from @@ -188,9 +188,9 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { @@ -200,7 +200,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoBlockSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // IncludeSourceSpecificGroup includes the excluded source-specific @@ -209,9 +209,9 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP4(group) if grp == nil { @@ -221,7 +221,7 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // ICMPFilter returns an ICMP filter. @@ -230,11 +230,11 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { if !c.ok() { return nil, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return nil, err + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport } - return getICMPFilter(fd, &sockOpts[ssoICMPFilter]) + return so.getICMPFilter(c.Conn) } // SetICMPFilter deploys the ICMP filter. @@ -243,9 +243,23 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport } - return setICMPFilter(fd, &sockOpts[ssoICMPFilter], f) + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) } diff --git a/fn/vendor/golang.org/x/net/ipv4/dgramopt_stub.go b/fn/vendor/golang.org/x/net/ipv4/dgramopt_stub.go deleted file mode 100644 index b74df6931..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/dgramopt_stub.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv4 - -import "net" - -// MulticastTTL returns the time-to-live field value for outgoing -// multicast packets. -func (c *dgramOpt) MulticastTTL() (int, error) { - return 0, errOpNoSupport -} - -// SetMulticastTTL sets the time-to-live field value for future -// outgoing multicast packets. -func (c *dgramOpt) SetMulticastTTL(ttl int) error { - return errOpNoSupport -} - -// MulticastInterface returns the default interface for multicast -// packet transmissions. -func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { - return nil, errOpNoSupport -} - -// SetMulticastInterface sets the default interface for future -// multicast packet transmissions. -func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { - return errOpNoSupport -} - -// MulticastLoopback reports whether transmitted multicast packets -// should be copied and send back to the originator. -func (c *dgramOpt) MulticastLoopback() (bool, error) { - return false, errOpNoSupport -} - -// SetMulticastLoopback sets whether transmitted multicast packets -// should be copied and send back to the originator. -func (c *dgramOpt) SetMulticastLoopback(on bool) error { - return errOpNoSupport -} - -// JoinGroup joins the group address group on the interface ifi. -// By default all sources that can cast data to group are accepted. -// It's possible to mute and unmute data transmission from a specific -// source by using ExcludeSourceSpecificGroup and -// IncludeSourceSpecificGroup. -// JoinGroup uses the system assigned multicast interface when ifi is -// nil, although this is not recommended because the assignment -// depends on platforms and sometimes it might require routing -// configuration. -func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { - return errOpNoSupport -} - -// LeaveGroup leaves the group address group on the interface ifi -// regardless of whether the group is any-source group or -// source-specific group. -func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { - return errOpNoSupport -} - -// JoinSourceSpecificGroup joins the source-specific group comprising -// group and source on the interface ifi. -// JoinSourceSpecificGroup uses the system assigned multicast -// interface when ifi is nil, although this is not recommended because -// the assignment depends on platforms and sometimes it might require -// routing configuration. -func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// LeaveSourceSpecificGroup leaves the source-specific group on the -// interface ifi. -func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// ExcludeSourceSpecificGroup excludes the source-specific group from -// the already joined any-source groups by JoinGroup on the interface -// ifi. -func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// IncludeSourceSpecificGroup includes the excluded source-specific -// group by ExcludeSourceSpecificGroup again on the interface ifi. -func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// ICMPFilter returns an ICMP filter. -// Currently only Linux supports this. -func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { - return nil, errOpNoSupport -} - -// SetICMPFilter deploys the ICMP filter. -// Currently only Linux supports this. -func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { - return errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv4/doc.go b/fn/vendor/golang.org/x/net/ipv4/doc.go index 9a79badfe..b43935a5a 100644 --- a/fn/vendor/golang.org/x/net/ipv4/doc.go +++ b/fn/vendor/golang.org/x/net/ipv4/doc.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -21,10 +21,10 @@ // // The options for unicasting are available for net.TCPConn, // net.UDPConn and net.IPConn which are created as network connections -// that use the IPv4 transport. When a single TCP connection carrying +// that use the IPv4 transport. When a single TCP connection carrying // a data flow of multiple packets needs to indicate the flow is -// important, ipv4.Conn is used to set the type-of-service field on -// the IPv4 header for each packet. +// important, Conn is used to set the type-of-service field on the +// IPv4 header for each packet. // // ln, err := net.Listen("tcp4", "0.0.0.0:1024") // if err != nil { @@ -56,7 +56,7 @@ // // The options for multicasting are available for net.UDPConn and // net.IPconn which are created as network connections that use the -// IPv4 transport. A few network facilities must be prepared before +// IPv4 transport. A few network facilities must be prepared before // you begin multicasting, at a minimum joining network interfaces and // multicast groups. // @@ -80,7 +80,7 @@ // defer c.Close() // // Second, the application joins multicast groups, starts listening to -// the groups on the specified network interfaces. Note that the +// the groups on the specified network interfaces. Note that the // service port for transport layer protocol does not matter with this // operation as joining groups affects only network and link layer // protocols, such as IPv4 and Ethernet. @@ -94,10 +94,10 @@ // } // // The application might set per packet control message transmissions -// between the protocol stack within the kernel. When the application +// between the protocol stack within the kernel. When the application // needs a destination address on an incoming packet, -// SetControlMessage of ipv4.PacketConn is used to enable control -// message transmissons. +// SetControlMessage of PacketConn is used to enable control message +// transmissions. // // if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { // // error handling @@ -145,7 +145,7 @@ // More multicasting // // An application that uses PacketConn or RawConn may join multiple -// multicast groups. For example, a UDP listener with port 1024 might +// multicast groups. For example, a UDP listener with port 1024 might // join two different groups across over two different network // interfaces by using: // @@ -166,7 +166,7 @@ // } // // It is possible for multiple UDP listeners that listen on the same -// UDP port to join the same multicast group. The net package will +// UDP port to join the same multicast group. The net package will // provide a socket that listens to a wildcard address with reusable // UDP port when an appropriate multicast address prefix is passed to // the net.ListenPacket or net.ListenUDP. @@ -240,3 +240,5 @@ // In the fallback case, ExcludeSourceSpecificGroup and // IncludeSourceSpecificGroup may return an error. package ipv4 // import "golang.org/x/net/ipv4" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/fn/vendor/golang.org/x/net/ipv4/endpoint.go b/fn/vendor/golang.org/x/net/ipv4/endpoint.go index bc45bf054..2ab877363 100644 --- a/fn/vendor/golang.org/x/net/ipv4/endpoint.go +++ b/fn/vendor/golang.org/x/net/ipv4/endpoint.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,8 +8,15 @@ import ( "net" "syscall" "time" + + "golang.org/x/net/internal/socket" ) +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn and RawConn are +// not implemented. + // A Conn represents a network endpoint that uses the IPv4 transport. // It is used to control basic IP-level socket options such as TOS and // TTL. @@ -18,21 +25,22 @@ type Conn struct { } type genericOpt struct { - net.Conn + *socket.Conn } func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } // NewConn returns a new Conn. func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) return &Conn{ - genericOpt: genericOpt{Conn: c}, + genericOpt: genericOpt{Conn: cc}, } } // A PacketConn represents a packet network endpoint that uses the -// IPv4 transport. It is used to control several IP-level socket -// options including multicasting. It also provides datagram based +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based // network I/O methods specific to the IPv4 and higher layer protocols // such as UDP. type PacketConn struct { @@ -42,21 +50,17 @@ type PacketConn struct { } type dgramOpt struct { - net.PacketConn + *socket.Conn } -func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil } +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } // SetControlMessage sets the per packet IP-level socket options. func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.payloadHandler.ok() { return syscall.EINVAL } - fd, err := c.payloadHandler.sysfd() - if err != nil { - return err - } - return setControlMessage(fd, &c.payloadHandler.rawOpt, cf, on) + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) } // SetDeadline sets the read and write deadlines associated with the @@ -97,22 +101,18 @@ func (c *PacketConn) Close() error { // NewPacketConn returns a new PacketConn using c as its underlying // transport. func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) p := &PacketConn{ - genericOpt: genericOpt{Conn: c.(net.Conn)}, - dgramOpt: dgramOpt{PacketConn: c}, - payloadHandler: payloadHandler{PacketConn: c}, - } - if _, ok := c.(*net.IPConn); ok && sockOpts[ssoStripHeader].name > 0 { - if fd, err := p.payloadHandler.sysfd(); err == nil { - setInt(fd, &sockOpts[ssoStripHeader], boolint(true)) - } + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, } return p } // A RawConn represents a packet network endpoint that uses the IPv4 -// transport. It is used to control several IP-level socket options -// including IPv4 header manipulation. It also provides datagram +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram // based network I/O methods specific to the IPv4 and higher layer // protocols that handle IPv4 datagram directly such as OSPF, GRE. type RawConn struct { @@ -126,11 +126,7 @@ func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.packetHandler.ok() { return syscall.EINVAL } - fd, err := c.packetHandler.sysfd() - if err != nil { - return err - } - return setControlMessage(fd, &c.packetHandler.rawOpt, cf, on) + return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) } // SetDeadline sets the read and write deadlines associated with the @@ -139,7 +135,7 @@ func (c *RawConn) SetDeadline(t time.Time) error { if !c.packetHandler.ok() { return syscall.EINVAL } - return c.packetHandler.c.SetDeadline(t) + return c.packetHandler.IPConn.SetDeadline(t) } // SetReadDeadline sets the read deadline associated with the @@ -148,7 +144,7 @@ func (c *RawConn) SetReadDeadline(t time.Time) error { if !c.packetHandler.ok() { return syscall.EINVAL } - return c.packetHandler.c.SetReadDeadline(t) + return c.packetHandler.IPConn.SetReadDeadline(t) } // SetWriteDeadline sets the write deadline associated with the @@ -157,7 +153,7 @@ func (c *RawConn) SetWriteDeadline(t time.Time) error { if !c.packetHandler.ok() { return syscall.EINVAL } - return c.packetHandler.c.SetWriteDeadline(t) + return c.packetHandler.IPConn.SetWriteDeadline(t) } // Close closes the endpoint. @@ -165,22 +161,26 @@ func (c *RawConn) Close() error { if !c.packetHandler.ok() { return syscall.EINVAL } - return c.packetHandler.c.Close() + return c.packetHandler.IPConn.Close() } // NewRawConn returns a new RawConn using c as its underlying // transport. func NewRawConn(c net.PacketConn) (*RawConn, error) { - r := &RawConn{ - genericOpt: genericOpt{Conn: c.(net.Conn)}, - dgramOpt: dgramOpt{PacketConn: c}, - packetHandler: packetHandler{c: c.(*net.IPConn)}, - } - fd, err := r.packetHandler.sysfd() + cc, err := socket.NewConn(c.(net.Conn)) if err != nil { return nil, err } - if err := setInt(fd, &sockOpts[ssoHeaderPrepend], boolint(true)); err != nil { + r := &RawConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, + } + so, ok := sockOpts[ssoHeaderPrepend] + if !ok { + return nil, errOpNoSupport + } + if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { return nil, err } return r, nil diff --git a/fn/vendor/golang.org/x/net/ipv4/example_test.go b/fn/vendor/golang.org/x/net/ipv4/example_test.go index 4f5e2f312..ddc7577e8 100644 --- a/fn/vendor/golang.org/x/net/ipv4/example_test.go +++ b/fn/vendor/golang.org/x/net/ipv4/example_test.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/ipv4/gen.go b/fn/vendor/golang.org/x/net/ipv4/gen.go index 4785212a7..ffb44fe68 100644 --- a/fn/vendor/golang.org/x/net/ipv4/gen.go +++ b/fn/vendor/golang.org/x/net/ipv4/gen.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -52,15 +52,6 @@ func genzsys() error { if err != nil { return err } - // The ipv4 pacakge still supports go1.2, and so we need to - // take care of additional platforms in go1.3 and above for - // working with go1.2. - switch { - case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris": - b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv4\n"), 1) - case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le"): - b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv4\n"), 1) - } b, err = format.Source(b) if err != nil { return err diff --git a/fn/vendor/golang.org/x/net/ipv4/genericopt_posix.go b/fn/vendor/golang.org/x/net/ipv4/genericopt.go similarity index 61% rename from fn/vendor/golang.org/x/net/ipv4/genericopt_posix.go rename to fn/vendor/golang.org/x/net/ipv4/genericopt.go index fefa0be36..119bf841b 100644 --- a/fn/vendor/golang.org/x/net/ipv4/genericopt_posix.go +++ b/fn/vendor/golang.org/x/net/ipv4/genericopt.go @@ -1,9 +1,7 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd windows - package ipv4 import "syscall" @@ -13,11 +11,11 @@ func (c *genericOpt) TOS() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoTOS] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoTOS]) + return so.GetInt(c.Conn) } // SetTOS sets the type-of-service field value for future outgoing @@ -26,11 +24,11 @@ func (c *genericOpt) SetTOS(tos int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoTOS] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoTOS], tos) + return so.SetInt(c.Conn, tos) } // TTL returns the time-to-live field value for outgoing packets. @@ -38,11 +36,11 @@ func (c *genericOpt) TTL() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoTTL] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoTTL]) + return so.GetInt(c.Conn) } // SetTTL sets the time-to-live field value for future outgoing @@ -51,9 +49,9 @@ func (c *genericOpt) SetTTL(ttl int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoTTL] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoTTL], ttl) + return so.SetInt(c.Conn, ttl) } diff --git a/fn/vendor/golang.org/x/net/ipv4/genericopt_stub.go b/fn/vendor/golang.org/x/net/ipv4/genericopt_stub.go deleted file mode 100644 index 1817badb1..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/genericopt_stub.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv4 - -// TOS returns the type-of-service field value for outgoing packets. -func (c *genericOpt) TOS() (int, error) { - return 0, errOpNoSupport -} - -// SetTOS sets the type-of-service field value for future outgoing -// packets. -func (c *genericOpt) SetTOS(tos int) error { - return errOpNoSupport -} - -// TTL returns the time-to-live field value for outgoing packets. -func (c *genericOpt) TTL() (int, error) { - return 0, errOpNoSupport -} - -// SetTTL sets the time-to-live field value for future outgoing -// packets. -func (c *genericOpt) SetTTL(ttl int) error { - return errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv4/header.go b/fn/vendor/golang.org/x/net/ipv4/header.go index d10817647..8bb0f0f4d 100644 --- a/fn/vendor/golang.org/x/net/ipv4/header.go +++ b/fn/vendor/golang.org/x/net/ipv4/header.go @@ -1,15 +1,17 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv4 import ( + "encoding/binary" "fmt" "net" "runtime" "syscall" - "unsafe" + + "golang.org/x/net/internal/socket" ) const ( @@ -49,7 +51,7 @@ func (h *Header) String() string { return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) } -// Marshal returns the binary encoding of the IPv4 header h. +// Marshal returns the binary encoding of h. func (h *Header) Marshal() ([]byte, error) { if h == nil { return nil, syscall.EINVAL @@ -63,18 +65,25 @@ func (h *Header) Marshal() ([]byte, error) { b[1] = byte(h.TOS) flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) switch runtime.GOOS { - case "darwin", "dragonfly", "freebsd", "netbsd": - // TODO(mikio): fix potential misaligned memory access - *(*uint16)(unsafe.Pointer(&b[2:3][0])) = uint16(h.TotalLen) - *(*uint16)(unsafe.Pointer(&b[6:7][0])) = uint16(flagsAndFragOff) + case "darwin", "dragonfly", "netbsd": + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "freebsd": + if freebsdVersion < 1100000 { + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } else { + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } default: - b[2], b[3] = byte(h.TotalLen>>8), byte(h.TotalLen) - b[6], b[7] = byte(flagsAndFragOff>>8), byte(flagsAndFragOff) + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) } - b[4], b[5] = byte(h.ID>>8), byte(h.ID) + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) b[8] = byte(h.TTL) b[9] = byte(h.Protocol) - b[10], b[11] = byte(h.Checksum>>8), byte(h.Checksum) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) if ip := h.Src.To4(); ip != nil { copy(b[12:16], ip[:net.IPv4len]) } @@ -89,52 +98,62 @@ func (h *Header) Marshal() ([]byte, error) { return b, nil } -// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. -var freebsdVersion uint32 - -// ParseHeader parses b as an IPv4 header. -func ParseHeader(b []byte) (*Header, error) { - if len(b) < HeaderLen { - return nil, errHeaderTooShort +// Parse parses b as an IPv4 header and sotres the result in h. +func (h *Header) Parse(b []byte) error { + if h == nil || len(b) < HeaderLen { + return errHeaderTooShort } hdrlen := int(b[0]&0x0f) << 2 if hdrlen > len(b) { - return nil, errBufferTooShort - } - h := &Header{ - Version: int(b[0] >> 4), - Len: hdrlen, - TOS: int(b[1]), - ID: int(b[4])<<8 | int(b[5]), - TTL: int(b[8]), - Protocol: int(b[9]), - Checksum: int(b[10])<<8 | int(b[11]), - Src: net.IPv4(b[12], b[13], b[14], b[15]), - Dst: net.IPv4(b[16], b[17], b[18], b[19]), + return errBufferTooShort } + h.Version = int(b[0] >> 4) + h.Len = hdrlen + h.TOS = int(b[1]) + h.ID = int(binary.BigEndian.Uint16(b[4:6])) + h.TTL = int(b[8]) + h.Protocol = int(b[9]) + h.Checksum = int(binary.BigEndian.Uint16(b[10:12])) + h.Src = net.IPv4(b[12], b[13], b[14], b[15]) + h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) switch runtime.GOOS { case "darwin", "dragonfly", "netbsd": - // TODO(mikio): fix potential misaligned memory access - h.TotalLen = int(*(*uint16)(unsafe.Pointer(&b[2:3][0]))) + hdrlen - // TODO(mikio): fix potential misaligned memory access - h.FragOff = int(*(*uint16)(unsafe.Pointer(&b[6:7][0]))) + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) case "freebsd": - // TODO(mikio): fix potential misaligned memory access - h.TotalLen = int(*(*uint16)(unsafe.Pointer(&b[2:3][0]))) - if freebsdVersion < 1000000 { - h.TotalLen += hdrlen + if freebsdVersion < 1100000 { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + } else { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) } - // TODO(mikio): fix potential misaligned memory access - h.FragOff = int(*(*uint16)(unsafe.Pointer(&b[6:7][0]))) default: - h.TotalLen = int(b[2])<<8 | int(b[3]) - h.FragOff = int(b[6])<<8 | int(b[7]) + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) } h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 h.FragOff = h.FragOff & 0x1fff - if hdrlen-HeaderLen > 0 { - h.Options = make([]byte, hdrlen-HeaderLen) - copy(h.Options, b[HeaderLen:]) + optlen := hdrlen - HeaderLen + if optlen > 0 && len(b) >= hdrlen { + if cap(h.Options) < optlen { + h.Options = make([]byte, optlen) + } else { + h.Options = h.Options[:optlen] + } + copy(h.Options, b[HeaderLen:hdrlen]) + } + return nil +} + +// ParseHeader parses b as an IPv4 header. +func ParseHeader(b []byte) (*Header, error) { + h := new(Header) + if err := h.Parse(b); err != nil { + return nil, err } return h, nil } diff --git a/fn/vendor/golang.org/x/net/ipv4/header_test.go b/fn/vendor/golang.org/x/net/ipv4/header_test.go index ac89358cb..a246aeea1 100644 --- a/fn/vendor/golang.org/x/net/ipv4/header_test.go +++ b/fn/vendor/golang.org/x/net/ipv4/header_test.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,114 +6,223 @@ package ipv4 import ( "bytes" + "encoding/binary" "net" "reflect" "runtime" "strings" "testing" + + "golang.org/x/net/internal/socket" ) -var ( - wireHeaderFromKernel = [HeaderLen]byte{ - 0x45, 0x01, 0xbe, 0xef, - 0xca, 0xfe, 0x45, 0xdc, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - } - wireHeaderToKernel = [HeaderLen]byte{ - 0x45, 0x01, 0xbe, 0xef, - 0xca, 0xfe, 0x45, 0xdc, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - } - wireHeaderFromTradBSDKernel = [HeaderLen]byte{ - 0x45, 0x01, 0xdb, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - } - wireHeaderFromFreeBSD10Kernel = [HeaderLen]byte{ - 0x45, 0x01, 0xef, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - } - wireHeaderToTradBSDKernel = [HeaderLen]byte{ - 0x45, 0x01, 0xef, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - } +type headerTest struct { + wireHeaderFromKernel []byte + wireHeaderToKernel []byte + wireHeaderFromTradBSDKernel []byte + wireHeaderToTradBSDKernel []byte + wireHeaderFromFreeBSD10Kernel []byte + wireHeaderToFreeBSD10Kernel []byte + *Header +} + +var headerLittleEndianTests = []headerTest{ // TODO(mikio): Add platform dependent wire header formats when // we support new platforms. + { + wireHeaderFromKernel: []byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToKernel: []byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromTradBSDKernel: []byte{ + 0x45, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToTradBSDKernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromFreeBSD10Kernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToFreeBSD10Kernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + }, + }, - testHeader = &Header{ - Version: Version, - Len: HeaderLen, - TOS: 1, - TotalLen: 0xbeef, - ID: 0xcafe, - Flags: DontFragment, - FragOff: 1500, - TTL: 255, - Protocol: 1, - Checksum: 0xdead, - Src: net.IPv4(172, 16, 254, 254), - Dst: net.IPv4(192, 168, 0, 1), - } -) + // with option headers + { + wireHeaderFromKernel: []byte{ + 0x46, 0x01, 0xbe, 0xf3, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToKernel: []byte{ + 0x46, 0x01, 0xbe, 0xf3, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderFromTradBSDKernel: []byte{ + 0x46, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToTradBSDKernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderFromFreeBSD10Kernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToFreeBSD10Kernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen + 4, + TOS: 1, + TotalLen: 0xbef3, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + Options: []byte{0xff, 0xfe, 0xfe, 0xff}, + }, + }, +} func TestMarshalHeader(t *testing.T) { - b, err := testHeader.Marshal() - if err != nil { - t.Fatal(err) + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") } - var wh []byte - switch runtime.GOOS { - case "darwin", "dragonfly", "netbsd": - wh = wireHeaderToTradBSDKernel[:] - case "freebsd": - if freebsdVersion < 1000000 { - wh = wireHeaderToTradBSDKernel[:] - } else { - wh = wireHeaderFromFreeBSD10Kernel[:] + + for _, tt := range headerLittleEndianTests { + b, err := tt.Header.Marshal() + if err != nil { + t.Fatal(err) + } + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderToTradBSDKernel + case "freebsd": + switch { + case freebsdVersion < 1000000: + wh = tt.wireHeaderToTradBSDKernel + case 1000000 <= freebsdVersion && freebsdVersion < 1100000: + wh = tt.wireHeaderToFreeBSD10Kernel + default: + wh = tt.wireHeaderToKernel + } + default: + wh = tt.wireHeaderToKernel + } + if !bytes.Equal(b, wh) { + t.Fatalf("got %#v; want %#v", b, wh) } - default: - wh = wireHeaderToKernel[:] - } - if !bytes.Equal(b, wh) { - t.Fatalf("got %#v; want %#v", b, wh) } } func TestParseHeader(t *testing.T) { - var wh []byte - switch runtime.GOOS { - case "darwin", "dragonfly", "netbsd": - wh = wireHeaderFromTradBSDKernel[:] - case "freebsd": - if freebsdVersion < 1000000 { - wh = wireHeaderFromTradBSDKernel[:] - } else { - wh = wireHeaderFromFreeBSD10Kernel[:] + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for big endian machine yet") + } + + for _, tt := range headerLittleEndianTests { + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderFromTradBSDKernel + case "freebsd": + switch { + case freebsdVersion < 1000000: + wh = tt.wireHeaderFromTradBSDKernel + case 1000000 <= freebsdVersion && freebsdVersion < 1100000: + wh = tt.wireHeaderFromFreeBSD10Kernel + default: + wh = tt.wireHeaderFromKernel + } + default: + wh = tt.wireHeaderFromKernel + } + h, err := ParseHeader(wh) + if err != nil { + t.Fatal(err) + } + if err := h.Parse(wh); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) } - default: - wh = wireHeaderFromKernel[:] - } - h, err := ParseHeader(wh) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(h, testHeader) { - t.Fatalf("got %#v; want %#v", h, testHeader) - } - s := h.String() - if strings.Contains(s, ",") { - t.Fatalf("should be space-separated values: %s", s) } } diff --git a/fn/vendor/golang.org/x/net/ipv4/helper.go b/fn/vendor/golang.org/x/net/ipv4/helper.go index 8a7ee9008..a5052e324 100644 --- a/fn/vendor/golang.org/x/net/ipv4/helper.go +++ b/fn/vendor/golang.org/x/net/ipv4/helper.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -18,6 +18,9 @@ var ( errOpNoSupport = errors.New("operation not supported") errNoSuchInterface = errors.New("no such interface") errNoSuchMulticastInterface = errors.New("no such multicast interface") + + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 ) func boolint(b bool) int { @@ -40,3 +43,21 @@ func netAddrToIP4(a net.Addr) net.IP { } return nil } + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/fn/vendor/golang.org/x/net/ipv4/helper_stub.go b/fn/vendor/golang.org/x/net/ipv4/helper_stub.go deleted file mode 100644 index dc2120cf2..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/helper_stub.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv4 - -func (c *genericOpt) sysfd() (int, error) { - return 0, errOpNoSupport -} - -func (c *dgramOpt) sysfd() (int, error) { - return 0, errOpNoSupport -} - -func (c *payloadHandler) sysfd() (int, error) { - return 0, errOpNoSupport -} - -func (c *packetHandler) sysfd() (int, error) { - return 0, errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv4/helper_unix.go b/fn/vendor/golang.org/x/net/ipv4/helper_unix.go deleted file mode 100644 index 345ca7dc7..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/helper_unix.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package ipv4 - -import ( - "net" - "reflect" -) - -func (c *genericOpt) sysfd() (int, error) { - switch p := c.Conn.(type) { - case *net.TCPConn, *net.UDPConn, *net.IPConn: - return sysfd(p) - } - return 0, errInvalidConnType -} - -func (c *dgramOpt) sysfd() (int, error) { - switch p := c.PacketConn.(type) { - case *net.UDPConn, *net.IPConn: - return sysfd(p.(net.Conn)) - } - return 0, errInvalidConnType -} - -func (c *payloadHandler) sysfd() (int, error) { - return sysfd(c.PacketConn.(net.Conn)) -} - -func (c *packetHandler) sysfd() (int, error) { - return sysfd(c.c) -} - -func sysfd(c net.Conn) (int, error) { - cv := reflect.ValueOf(c) - switch ce := cv.Elem(); ce.Kind() { - case reflect.Struct: - netfd := ce.FieldByName("conn").FieldByName("fd") - switch fe := netfd.Elem(); fe.Kind() { - case reflect.Struct: - fd := fe.FieldByName("sysfd") - return int(fd.Int()), nil - } - } - return 0, errInvalidConnType -} diff --git a/fn/vendor/golang.org/x/net/ipv4/helper_windows.go b/fn/vendor/golang.org/x/net/ipv4/helper_windows.go deleted file mode 100644 index 322b2a5e4..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/helper_windows.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "net" - "reflect" - "syscall" -) - -func (c *genericOpt) sysfd() (syscall.Handle, error) { - switch p := c.Conn.(type) { - case *net.TCPConn, *net.UDPConn, *net.IPConn: - return sysfd(p) - } - return syscall.InvalidHandle, errInvalidConnType -} - -func (c *dgramOpt) sysfd() (syscall.Handle, error) { - switch p := c.PacketConn.(type) { - case *net.UDPConn, *net.IPConn: - return sysfd(p.(net.Conn)) - } - return syscall.InvalidHandle, errInvalidConnType -} - -func (c *payloadHandler) sysfd() (syscall.Handle, error) { - return sysfd(c.PacketConn.(net.Conn)) -} - -func (c *packetHandler) sysfd() (syscall.Handle, error) { - return sysfd(c.c) -} - -func sysfd(c net.Conn) (syscall.Handle, error) { - cv := reflect.ValueOf(c) - switch ce := cv.Elem(); ce.Kind() { - case reflect.Struct: - netfd := ce.FieldByName("conn").FieldByName("fd") - switch fe := netfd.Elem(); fe.Kind() { - case reflect.Struct: - fd := fe.FieldByName("sysfd") - return syscall.Handle(fd.Uint()), nil - } - } - return syscall.InvalidHandle, errInvalidConnType -} diff --git a/fn/vendor/golang.org/x/net/ipv4/icmp.go b/fn/vendor/golang.org/x/net/ipv4/icmp.go index dbd05cff2..097bea846 100644 --- a/fn/vendor/golang.org/x/net/ipv4/icmp.go +++ b/fn/vendor/golang.org/x/net/ipv4/icmp.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -31,7 +31,7 @@ func (typ ICMPType) Protocol() int { // A router means a node that forwards IP packets not explicitly // addressed to itself, and a host means a node that is not a router. type ICMPFilter struct { - sysICMPFilter + icmpFilter } // Accept accepts incoming ICMP packets including the type field value diff --git a/fn/vendor/golang.org/x/net/ipv4/icmp_linux.go b/fn/vendor/golang.org/x/net/ipv4/icmp_linux.go index c91225335..6e1c5c80a 100644 --- a/fn/vendor/golang.org/x/net/ipv4/icmp_linux.go +++ b/fn/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -4,15 +4,15 @@ package ipv4 -func (f *sysICMPFilter) accept(typ ICMPType) { +func (f *icmpFilter) accept(typ ICMPType) { f.Data &^= 1 << (uint32(typ) & 31) } -func (f *sysICMPFilter) block(typ ICMPType) { +func (f *icmpFilter) block(typ ICMPType) { f.Data |= 1 << (uint32(typ) & 31) } -func (f *sysICMPFilter) setAll(block bool) { +func (f *icmpFilter) setAll(block bool) { if block { f.Data = 1<<32 - 1 } else { @@ -20,6 +20,6 @@ func (f *sysICMPFilter) setAll(block bool) { } } -func (f *sysICMPFilter) willBlock(typ ICMPType) bool { +func (f *icmpFilter) willBlock(typ ICMPType) bool { return f.Data&(1<<(uint32(typ)&31)) != 0 } diff --git a/fn/vendor/golang.org/x/net/ipv4/icmp_stub.go b/fn/vendor/golang.org/x/net/ipv4/icmp_stub.go index 9ee9b6a32..21bb29ab3 100644 --- a/fn/vendor/golang.org/x/net/ipv4/icmp_stub.go +++ b/fn/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -6,20 +6,20 @@ package ipv4 -const sysSizeofICMPFilter = 0x0 +const sizeofICMPFilter = 0x0 -type sysICMPFilter struct { +type icmpFilter struct { } -func (f *sysICMPFilter) accept(typ ICMPType) { +func (f *icmpFilter) accept(typ ICMPType) { } -func (f *sysICMPFilter) block(typ ICMPType) { +func (f *icmpFilter) block(typ ICMPType) { } -func (f *sysICMPFilter) setAll(block bool) { +func (f *icmpFilter) setAll(block bool) { } -func (f *sysICMPFilter) willBlock(typ ICMPType) bool { +func (f *icmpFilter) willBlock(typ ICMPType) bool { return false } diff --git a/fn/vendor/golang.org/x/net/ipv4/mocktransponder_test.go b/fn/vendor/golang.org/x/net/ipv4/mocktransponder_test.go deleted file mode 100644 index e55aaee91..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/mocktransponder_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "net" - "testing" -) - -func acceptor(t *testing.T, ln net.Listener, done chan<- bool) { - defer func() { done <- true }() - - c, err := ln.Accept() - if err != nil { - t.Error(err) - return - } - c.Close() -} diff --git a/fn/vendor/golang.org/x/net/ipv4/multicast_test.go b/fn/vendor/golang.org/x/net/ipv4/multicast_test.go index d2bcf8533..bcf49736b 100644 --- a/fn/vendor/golang.org/x/net/ipv4/multicast_test.go +++ b/fn/vendor/golang.org/x/net/ipv4/multicast_test.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -166,7 +166,11 @@ func TestPacketConnReadWriteMulticastICMP(t *testing.T) { if _, err := p.MulticastLoopback(); err != nil { t.Fatal(err) } - cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + cf := ipv4.FlagDst | ipv4.FlagInterface + if runtime.GOOS != "solaris" { + // Solaris never allows to modify ICMP properties. + cf |= ipv4.FlagTTL + } for i, toggle := range []bool{true, false, true} { wb, err := (&icmp.Message{ diff --git a/fn/vendor/golang.org/x/net/ipv4/multicastlistener_test.go b/fn/vendor/golang.org/x/net/ipv4/multicastlistener_test.go index e342bf1d9..e43fbbe08 100644 --- a/fn/vendor/golang.org/x/net/ipv4/multicastlistener_test.go +++ b/fn/vendor/golang.org/x/net/ipv4/multicastlistener_test.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -21,7 +21,7 @@ var udpMultipleGroupListenerTests = []net.Addr{ func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if testing.Short() { @@ -61,7 +61,7 @@ func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if testing.Short() { @@ -69,13 +69,16 @@ func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { } for _, gaddr := range udpMultipleGroupListenerTests { - c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") // wildcard address with reusable port + c1, err := net.ListenPacket("udp4", "224.0.0.0:0") // wildcard address with reusable port if err != nil { t.Fatal(err) } defer c1.Close() - - c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") // wildcard address with reusable port + _, port, err := net.SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c2, err := net.ListenPacket("udp4", net.JoinHostPort("224.0.0.0", port)) // wildcard address with reusable port if err != nil { t.Fatal(err) } @@ -113,7 +116,7 @@ func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if testing.Short() { @@ -131,16 +134,29 @@ func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { if err != nil { t.Fatal(err) } + port := "0" for i, ifi := range ift { ip, ok := nettest.IsMulticastCapable("ip4", &ifi) if !ok { continue } - c, err := net.ListenPacket("udp4", ip.String()+":"+"1024") // unicast address with non-reusable port + c, err := net.ListenPacket("udp4", net.JoinHostPort(ip.String(), port)) // unicast address with non-reusable port if err != nil { - t.Fatal(err) + // The listen may fail when the serivce is + // already in use, but it's fine because the + // purpose of this is not to test the + // bookkeeping of IP control block inside the + // kernel. + t.Log(err) + continue } defer c.Close() + if port == "0" { + _, port, err = net.SplitHostPort(c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + } p := ipv4.NewPacketConn(c) if err := p.JoinGroup(&ifi, &gaddr); err != nil { t.Fatal(err) @@ -156,7 +172,7 @@ func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { func TestIPSingleRawConnWithSingleGroupListener(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if testing.Short() { @@ -201,7 +217,7 @@ func TestIPSingleRawConnWithSingleGroupListener(t *testing.T) { func TestIPPerInterfaceSingleRawConnWithSingleGroupListener(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if testing.Short() { diff --git a/fn/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go b/fn/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go index c76dbe4de..f7efac24c 100644 --- a/fn/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go +++ b/fn/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -26,7 +26,7 @@ var packetConnMulticastSocketOptionTests = []struct { func TestPacketConnMulticastSocketOptions(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris": + case "nacl", "plan9": t.Skipf("not supported on %s", runtime.GOOS) } ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) @@ -66,7 +66,7 @@ var rawConnMulticastSocketOptionTests = []struct { func TestRawConnMulticastSocketOptions(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris": + case "nacl", "plan9": t.Skipf("not supported on %s", runtime.GOOS) } if m, ok := nettest.SupportsRawIPSocket(); !ok { diff --git a/fn/vendor/golang.org/x/net/ipv4/packet.go b/fn/vendor/golang.org/x/net/ipv4/packet.go index 09864314e..f00f5b052 100644 --- a/fn/vendor/golang.org/x/net/ipv4/packet.go +++ b/fn/vendor/golang.org/x/net/ipv4/packet.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -7,42 +7,30 @@ package ipv4 import ( "net" "syscall" + + "golang.org/x/net/internal/socket" ) +// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn +// are not implemented. + // A packetHandler represents the IPv4 datagram handler. type packetHandler struct { - c *net.IPConn + *net.IPConn + *socket.Conn rawOpt } -func (c *packetHandler) ok() bool { return c != nil && c.c != nil } +func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil } // ReadFrom reads an IPv4 datagram from the endpoint c, copying the -// datagram into b. It returns the received datagram as the IPv4 +// datagram into b. It returns the received datagram as the IPv4 // header h, the payload p and the control message cm. func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { if !c.ok() { return nil, nil, nil, syscall.EINVAL } - oob := newControlMessage(&c.rawOpt) - n, oobn, _, src, err := c.c.ReadMsgIP(b, oob) - if err != nil { - return nil, nil, nil, err - } - var hs []byte - if hs, p, err = slicePacket(b[:n]); err != nil { - return nil, nil, nil, err - } - if h, err = ParseHeader(hs); err != nil { - return nil, nil, nil, err - } - if cm, err = parseControlMessage(oob[:oobn]); err != nil { - return nil, nil, nil, err - } - if src != nil && cm != nil { - cm.Src = src.IP - } - return + return c.readFrom(b) } func slicePacket(b []byte) (h, p []byte, err error) { @@ -54,14 +42,14 @@ func slicePacket(b []byte) (h, p []byte, err error) { } // WriteTo writes an IPv4 datagram through the endpoint c, copying the -// datagram from the IPv4 header h and the payload p. The control +// datagram from the IPv4 header h and the payload p. The control // message cm allows the datagram path and the outgoing interface to be -// specified. Currently only Darwin and Linux support this. The cm +// specified. Currently only Darwin and Linux support this. The cm // may be nil if control of the outgoing datagram is not required. // // The IPv4 header h must contain appropriate fields that include: // -// Version = ipv4.Version +// Version = // Len = // TOS = // TotalLen = @@ -77,21 +65,5 @@ func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { if !c.ok() { return syscall.EINVAL } - oob := marshalControlMessage(cm) - wh, err := h.Marshal() - if err != nil { - return err - } - dst := &net.IPAddr{} - if cm != nil { - if ip := cm.Dst.To4(); ip != nil { - dst.IP = ip - } - } - if dst.IP == nil { - dst.IP = h.Dst - } - wh = append(wh, p...) - _, _, err = c.c.WriteMsgIP(wh, oob, dst) - return err + return c.writeTo(h, p, cm) } diff --git a/fn/vendor/golang.org/x/net/ipv4/packet_go1_8.go b/fn/vendor/golang.org/x/net/ipv4/packet_go1_8.go new file mode 100644 index 000000000..b47d18683 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/packet_go1_8.go @@ -0,0 +1,56 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4 + +import "net" + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + n, nn, _, src, err := c.ReadMsgIP(b, oob) + if err != nil { + return nil, nil, nil, err + } + var hs []byte + if hs, p, err = slicePacket(b[:n]); err != nil { + return nil, nil, nil, err + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, err + } + if nn > 0 { + cm = new(ControlMessage) + if err := cm.Parse(oob[:nn]); err != nil { + return nil, nil, nil, err + } + } + if src != nil && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + oob := cm.Marshal() + wh, err := h.Marshal() + if err != nil { + return err + } + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + wh = append(wh, p...) + _, _, err = c.WriteMsgIP(wh, oob, dst) + return err +} diff --git a/fn/vendor/golang.org/x/net/ipv4/packet_go1_9.go b/fn/vendor/golang.org/x/net/ipv4/packet_go1_9.go new file mode 100644 index 000000000..082c36d73 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/packet_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil +} diff --git a/fn/vendor/golang.org/x/net/ipv4/payload.go b/fn/vendor/golang.org/x/net/ipv4/payload.go index d7698cbd3..f95f811ac 100644 --- a/fn/vendor/golang.org/x/net/ipv4/payload.go +++ b/fn/vendor/golang.org/x/net/ipv4/payload.go @@ -1,15 +1,23 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv4 -import "net" +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. // A payloadHandler represents the IPv4 datagram payload handler. type payloadHandler struct { net.PacketConn + *socket.Conn rawOpt } -func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil } +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/fn/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/fn/vendor/golang.org/x/net/ipv4/payload_cmsg.go index d358fc3ac..3f06d7606 100644 --- a/fn/vendor/golang.org/x/net/ipv4/payload_cmsg.go +++ b/fn/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -1,8 +1,8 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !plan9,!solaris,!windows +// +build !nacl,!plan9,!windows package ipv4 @@ -12,70 +12,25 @@ import ( ) // ReadFrom reads a payload of the received IPv4 datagram, from the -// endpoint c, copying the payload into b. It returns the number of +// endpoint c, copying the payload into b. It returns the number of // bytes copied into b, the control message cm and the source address // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { return 0, nil, nil, syscall.EINVAL } - oob := newControlMessage(&c.rawOpt) - var oobn int - switch c := c.PacketConn.(type) { - case *net.UDPConn: - if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { - return 0, nil, nil, err - } - case *net.IPConn: - if sockOpts[ssoStripHeader].name > 0 { - if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil { - return 0, nil, nil, err - } - } else { - nb := make([]byte, maxHeaderLen+len(b)) - if n, oobn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { - return 0, nil, nil, err - } - hdrlen := int(nb[0]&0x0f) << 2 - copy(b, nb[hdrlen:]) - n -= hdrlen - } - default: - return 0, nil, nil, errInvalidConnType - } - if cm, err = parseControlMessage(oob[:oobn]); err != nil { - return 0, nil, nil, err - } - if cm != nil { - cm.Src = netAddrToIP4(src) - } - return + return c.readFrom(b) } // WriteTo writes a payload of the IPv4 datagram, to the destination -// address dst through the endpoint c, copying the payload from b. It -// returns the number of bytes written. The control message cm allows +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows // the datagram path and the outgoing interface to be specified. -// Currently only Darwin and Linux support this. The cm may be nil if +// Currently only Darwin and Linux support this. The cm may be nil if // control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { return 0, syscall.EINVAL } - oob := marshalControlMessage(cm) - if dst == nil { - return 0, errMissingAddress - } - switch c := c.PacketConn.(type) { - case *net.UDPConn: - n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) - case *net.IPConn: - n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) - default: - return 0, errInvalidConnType - } - if err != nil { - return 0, err - } - return + return c.writeTo(b, cm, dst) } diff --git a/fn/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/fn/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go new file mode 100644 index 000000000..d26ccd90c --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + nb := make([]byte, maxHeaderLen+len(b)) + if n, nn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { + return 0, nil, nil, err + } + hdrlen := int(nb[0]&0x0f) << 2 + copy(b, nb[hdrlen:]) + n -= hdrlen + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP4(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/fn/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/fn/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go new file mode 100644 index 000000000..2f1931183 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/fn/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/fn/vendor/golang.org/x/net/ipv4/payload_nocmsg.go index d128c9c2e..3926de70b 100644 --- a/fn/vendor/golang.org/x/net/ipv4/payload_nocmsg.go +++ b/fn/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -1,8 +1,8 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build plan9 solaris windows +// +build nacl plan9 windows package ipv4 @@ -12,7 +12,7 @@ import ( ) // ReadFrom reads a payload of the received IPv4 datagram, from the -// endpoint c, copying the payload into b. It returns the number of +// endpoint c, copying the payload into b. It returns the number of // bytes copied into b, the control message cm and the source address // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { @@ -26,10 +26,10 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. } // WriteTo writes a payload of the IPv4 datagram, to the destination -// address dst through the endpoint c, copying the payload from b. It -// returns the number of bytes written. The control message cm allows +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows // the datagram path and the outgoing interface to be specified. -// Currently only Darwin and Linux support this. The cm may be nil if +// Currently only Darwin and Linux support this. The cm may be nil if // control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { diff --git a/fn/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go b/fn/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go new file mode 100644 index 000000000..1cd926e7f --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go @@ -0,0 +1,248 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + b.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + t.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr) { + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/fn/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go b/fn/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go new file mode 100644 index 000000000..365de022a --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go @@ -0,0 +1,388 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + b.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv4.Message{ + { + Buffers: [][]byte{payload}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv4.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv4.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv4.Message{ + { + Buffers: [][]byte{datagram}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv4.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv4.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + t.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr, batch bool) { + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + batchReader := func() { + defer wg.Done() + ms := []ipv4.Message{ + { + Buffers: [][]byte{make([]byte, 128)}, + OOB: ipv4.NewControlMessage(cf), + }, + } + n, err := p.ReadBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + var cm ipv4.ControlMessage + if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { + t.Error(err) + return + } + var b []byte + if _, ok := dst.(*net.IPAddr); ok { + var h ipv4.Header + if err := h.Parse(ms[0].Buffers[0][:ms[0].N]); err != nil { + t.Error(err) + return + } + b = ms[0].Buffers[0][h.Len:ms[0].N] + } else { + b = ms[0].Buffers[0][:ms[0].N] + } + if !bytes.Equal(b, data) { + t.Errorf("got %#v; want %#v", b, data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + batchWriter := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + ms := []ipv4.Message{ + { + Buffers: [][]byte{data}, + OOB: cm.Marshal(), + Addr: dst, + }, + } + n, err := p.WriteBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + if ms[0].N != len(data) { + t.Errorf("got %d; want %d", ms[0].N, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + if batch { + go batchWriter(i%2 != 0) + } else { + go writer(i%2 != 0) + } + + } + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Wait() +} diff --git a/fn/vendor/golang.org/x/net/ipv4/readwrite_test.go b/fn/vendor/golang.org/x/net/ipv4/readwrite_test.go index 247d06c1a..3896a8ae4 100644 --- a/fn/vendor/golang.org/x/net/ipv4/readwrite_test.go +++ b/fn/vendor/golang.org/x/net/ipv4/readwrite_test.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -16,86 +16,56 @@ import ( "golang.org/x/net/ipv4" ) -func benchmarkUDPListener() (net.PacketConn, net.Addr, error) { - c, err := net.ListenPacket("udp4", "127.0.0.1:0") +func BenchmarkReadWriteUnicast(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") if err != nil { - return nil, nil, err - } - dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String()) - if err != nil { - c.Close() - return nil, nil, err - } - return c, dst, nil -} - -func BenchmarkReadWriteNetUDP(b *testing.B) { - c, dst, err := benchmarkUDPListener() - if err != nil { - b.Fatal(err) + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) } defer c.Close() + dst := c.LocalAddr() wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) - b.ResetTimer() - for i := 0; i < b.N; i++ { - benchmarkReadWriteNetUDP(b, c, wb, rb, dst) - } -} -func benchmarkReadWriteNetUDP(b *testing.B, c net.PacketConn, wb, rb []byte, dst net.Addr) { - if _, err := c.WriteTo(wb, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(rb); err != nil { - b.Fatal(err) - } -} + b.Run("NetUDP", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("IPv4UDP", func(b *testing.B) { + p := ipv4.NewPacketConn(c) + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + cm := ipv4.ControlMessage{TTL: 1} + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } -func BenchmarkReadWriteIPv4UDP(b *testing.B) { - c, dst, err := benchmarkUDPListener() - if err != nil { - b.Fatal(err) - } - defer c.Close() - - p := ipv4.NewPacketConn(c) - defer p.Close() - cf := ipv4.FlagTTL | ipv4.FlagInterface - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - - wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) - b.ResetTimer() - for i := 0; i < b.N; i++ { - benchmarkReadWriteIPv4UDP(b, p, wb, rb, dst, ifi) - } -} - -func benchmarkReadWriteIPv4UDP(b *testing.B, p *ipv4.PacketConn, wb, rb []byte, dst net.Addr, ifi *net.Interface) { - cm := ipv4.ControlMessage{TTL: 1} - if ifi != nil { - cm.IfIndex = ifi.Index - } - if n, err := p.WriteTo(wb, &cm, dst); err != nil { - b.Fatal(err) - } else if n != len(wb) { - b.Fatalf("got %v; want %v", n, len(wb)) - } - if _, _, _, err := p.ReadFrom(rb); err != nil { - b.Fatal(err) - } + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) } func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } - c, err := net.ListenPacket("udp4", "127.0.0.1:0") + c, err := nettest.NewLocalPacketListener("udp4") if err != nil { t.Fatal(err) } @@ -103,11 +73,7 @@ func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { p := ipv4.NewPacketConn(c) defer p.Close() - dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String()) - if err != nil { - t.Fatal(err) - } - + dst := c.LocalAddr() ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface wb := []byte("HELLO-R-U-THERE") @@ -152,7 +118,7 @@ func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { t.Error(err) return } else if n != len(wb) { - t.Errorf("short write: %v", n) + t.Errorf("got %d; want %d", n, len(wb)) return } } diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt.go b/fn/vendor/golang.org/x/net/ipv4/sockopt.go index ace37d30f..22e90c039 100644 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt.go +++ b/fn/vendor/golang.org/x/net/ipv4/sockopt.go @@ -4,6 +4,8 @@ package ipv4 +import "golang.org/x/net/internal/socket" + // Sticky socket options const ( ssoTOS = iota // header field for unicast packet @@ -24,16 +26,12 @@ const ( ssoLeaveSourceGroup // source-specific multicast ssoBlockSourceGroup // any-source or source-specific multicast ssoUnblockSourceGroup // any-source or source-specific multicast - ssoMax + ssoAttachFilter // attach BPF for filtering inbound traffic ) // Sticky socket option value types const ( - ssoTypeByte = iota + 1 - ssoTypeInt - ssoTypeInterface - ssoTypeICMPFilter - ssoTypeIPMreq + ssoTypeIPMreq = iota + 1 ssoTypeIPMreqn ssoTypeGroupReq ssoTypeGroupSourceReq @@ -41,6 +39,6 @@ const ( // A sockOpt represents a binding for sticky socket option. type sockOpt struct { - name int // option name, must be equal or greater than 1 - typ int // option value type, must be equal or greater than 1 + socket.Option + typ int // hint for option value type; optional } diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go b/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go deleted file mode 100644 index 45551528b..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!windows - -package ipv4 - -import "net" - -func setsockoptIPMreq(fd, name int, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport -} - -func getsockoptInterface(fd, name int) (*net.Interface, error) { - return nil, errOpNoSupport -} - -func setsockoptInterface(fd, name int, ifi *net.Interface) error { - return errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go b/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go deleted file mode 100644 index fefa901e6..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package ipv4 - -import ( - "net" - "os" - "unsafe" - - "golang.org/x/net/internal/iana" -) - -func setsockoptIPMreq(fd, name int, ifi *net.Interface, grp net.IP) error { - mreq := sysIPMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} - if err := setIPMreqInterface(&mreq, ifi); err != nil { - return err - } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreq), sysSizeofIPMreq)) -} - -func getsockoptInterface(fd, name int) (*net.Interface, error) { - var b [4]byte - l := sysSockoptLen(4) - if err := getsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) - if err != nil { - return nil, err - } - return ifi, nil -} - -func setsockoptInterface(fd, name int, ifi *net.Interface) error { - ip, err := netInterfaceToIP4(ifi) - if err != nil { - return err - } - var b [4]byte - copy(b[:], ip) - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), sysSockoptLen(4))) -} diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go b/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go deleted file mode 100644 index 431930df7..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "net" - "os" - "syscall" - "unsafe" - - "golang.org/x/net/internal/iana" -) - -func setsockoptIPMreq(fd syscall.Handle, name int, ifi *net.Interface, grp net.IP) error { - mreq := sysIPMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} - if err := setIPMreqInterface(&mreq, ifi); err != nil { - return err - } - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&mreq)), int32(sysSizeofIPMreq))) -} - -func getsockoptInterface(fd syscall.Handle, name int) (*net.Interface, error) { - var b [4]byte - l := int32(4) - if err := syscall.Getsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&b[0])), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) - if err != nil { - return nil, err - } - return ifi, nil -} - -func setsockoptInterface(fd syscall.Handle, name int, ifi *net.Interface) error { - ip, err := netInterfaceToIP4(ifi) - if err != nil { - return err - } - var b [4]byte - copy(b[:], ip) - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&b[0])), 4)) -} diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go b/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go deleted file mode 100644 index 332f403e8..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !darwin,!freebsd,!linux,!windows - -package ipv4 - -import "net" - -func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { - return nil, errOpNoSupport -} - -func setsockoptIPMreqn(fd, name int, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/fn/vendor/golang.org/x/net/ipv4/sockopt_posix.go new file mode 100644 index 000000000..e96955bc1 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + switch so.typ { + case ssoTypeIPMreqn: + return so.getIPMreqn(c) + default: + return so.getMulticastIf(c) + } +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + switch so.typ { + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, nil) + default: + return so.setMulticastIf(c, ifi) + } +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPFilter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter] + return so.Set(c, b) +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go b/fn/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go deleted file mode 100644 index 854652447..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !darwin,!freebsd,!linux - -package ipv4 - -import "net" - -func setsockoptGroupReq(fd, name int, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport -} - -func setsockoptGroupSourceReq(fd, name int, ifi *net.Interface, grp, src net.IP) error { - return errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go b/fn/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go deleted file mode 100644 index 6f647bc58..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd linux - -package ipv4 - -import ( - "net" - "os" - "unsafe" - - "golang.org/x/net/internal/iana" -) - -var freebsd32o64 bool - -func setsockoptGroupReq(fd, name int, ifi *net.Interface, grp net.IP) error { - var gr sysGroupReq - if ifi != nil { - gr.Interface = uint32(ifi.Index) - } - gr.setGroup(grp) - var p unsafe.Pointer - var l sysSockoptLen - if freebsd32o64 { - var d [sysSizeofGroupReq + 4]byte - s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) - copy(d[:4], s[:4]) - copy(d[8:], s[4:]) - p = unsafe.Pointer(&d[0]) - l = sysSizeofGroupReq + 4 - } else { - p = unsafe.Pointer(&gr) - l = sysSizeofGroupReq - } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, p, l)) -} - -func setsockoptGroupSourceReq(fd, name int, ifi *net.Interface, grp, src net.IP) error { - var gsr sysGroupSourceReq - if ifi != nil { - gsr.Interface = uint32(ifi.Index) - } - gsr.setSourceGroup(grp, src) - var p unsafe.Pointer - var l sysSockoptLen - if freebsd32o64 { - var d [sysSizeofGroupSourceReq + 4]byte - s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) - copy(d[:4], s[:4]) - copy(d[8:], s[4:]) - p = unsafe.Pointer(&d[0]) - l = sysSizeofGroupSourceReq + 4 - } else { - p = unsafe.Pointer(&gsr) - l = sysSizeofGroupSourceReq - } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, p, l)) -} diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/fn/vendor/golang.org/x/net/ipv4/sockopt_stub.go index 9d19f5dfe..23249b782 100644 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_stub.go +++ b/fn/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -1,11 +1,42 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 -func setInt(fd int, opt *sockOpt, v int) error { +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { return errOpNoSupport } diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_unix.go b/fn/vendor/golang.org/x/net/ipv4/sockopt_unix.go deleted file mode 100644 index 50cdbd81e..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_unix.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package ipv4 - -import ( - "net" - "os" - "unsafe" - - "golang.org/x/net/internal/iana" -) - -func getInt(fd int, opt *sockOpt) (int, error) { - if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) { - return 0, errOpNoSupport - } - var i int32 - var b byte - p := unsafe.Pointer(&i) - l := sysSockoptLen(4) - if opt.typ == ssoTypeByte { - p = unsafe.Pointer(&b) - l = sysSockoptLen(1) - } - if err := getsockopt(fd, iana.ProtocolIP, opt.name, p, &l); err != nil { - return 0, os.NewSyscallError("getsockopt", err) - } - if opt.typ == ssoTypeByte { - return int(b), nil - } - return int(i), nil -} - -func setInt(fd int, opt *sockOpt, v int) error { - if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) { - return errOpNoSupport - } - i := int32(v) - var b byte - p := unsafe.Pointer(&i) - l := sysSockoptLen(4) - if opt.typ == ssoTypeByte { - b = byte(v) - p = unsafe.Pointer(&b) - l = sysSockoptLen(1) - } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, opt.name, p, l)) -} - -func getInterface(fd int, opt *sockOpt) (*net.Interface, error) { - if opt.name < 1 { - return nil, errOpNoSupport - } - switch opt.typ { - case ssoTypeInterface: - return getsockoptInterface(fd, opt.name) - case ssoTypeIPMreqn: - return getsockoptIPMreqn(fd, opt.name) - default: - return nil, errOpNoSupport - } -} - -func setInterface(fd int, opt *sockOpt, ifi *net.Interface) error { - if opt.name < 1 { - return errOpNoSupport - } - switch opt.typ { - case ssoTypeInterface: - return setsockoptInterface(fd, opt.name, ifi) - case ssoTypeIPMreqn: - return setsockoptIPMreqn(fd, opt.name, ifi, nil) - default: - return errOpNoSupport - } -} - -func getICMPFilter(fd int, opt *sockOpt) (*ICMPFilter, error) { - if opt.name < 1 || opt.typ != ssoTypeICMPFilter { - return nil, errOpNoSupport - } - var f ICMPFilter - l := sysSockoptLen(sysSizeofICMPFilter) - if err := getsockopt(fd, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - return &f, nil -} - -func setICMPFilter(fd int, opt *sockOpt, f *ICMPFilter) error { - if opt.name < 1 || opt.typ != ssoTypeICMPFilter { - return errOpNoSupport - } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), sysSizeofICMPFilter)) -} - -func setGroup(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - if opt.name < 1 { - return errOpNoSupport - } - switch opt.typ { - case ssoTypeIPMreq: - return setsockoptIPMreq(fd, opt.name, ifi, grp) - case ssoTypeIPMreqn: - return setsockoptIPMreqn(fd, opt.name, ifi, grp) - case ssoTypeGroupReq: - return setsockoptGroupReq(fd, opt.name, ifi, grp) - default: - return errOpNoSupport - } -} - -func setSourceGroup(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { - if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq { - return errOpNoSupport - } - return setsockoptGroupSourceReq(fd, opt.name, ifi, grp, src) -} diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_windows.go b/fn/vendor/golang.org/x/net/ipv4/sockopt_windows.go deleted file mode 100644 index c4c2441ec..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_windows.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "net" - "os" - "syscall" - "unsafe" - - "golang.org/x/net/internal/iana" -) - -func getInt(fd syscall.Handle, opt *sockOpt) (int, error) { - if opt.name < 1 || opt.typ != ssoTypeInt { - return 0, errOpNoSupport - } - var i int32 - l := int32(4) - if err := syscall.Getsockopt(fd, iana.ProtocolIP, int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { - return 0, os.NewSyscallError("getsockopt", err) - } - return int(i), nil -} - -func setInt(fd syscall.Handle, opt *sockOpt, v int) error { - if opt.name < 1 || opt.typ != ssoTypeInt { - return errOpNoSupport - } - i := int32(v) - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) -} - -func getInterface(fd syscall.Handle, opt *sockOpt) (*net.Interface, error) { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return nil, errOpNoSupport - } - return getsockoptInterface(fd, opt.name) -} - -func setInterface(fd syscall.Handle, opt *sockOpt, ifi *net.Interface) error { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return errOpNoSupport - } - return setsockoptInterface(fd, opt.name, ifi) -} - -func getICMPFilter(fd syscall.Handle, opt *sockOpt) (*ICMPFilter, error) { - return nil, errOpNoSupport -} - -func setICMPFilter(fd syscall.Handle, opt *sockOpt, f *ICMPFilter) error { - return errOpNoSupport -} - -func setGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - if opt.name < 1 || opt.typ != ssoTypeIPMreq { - return errOpNoSupport - } - return setsockoptIPMreq(fd, opt.name, ifi, grp) -} - -func setSourceGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { - // TODO(mikio): implement this - return errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go b/fn/vendor/golang.org/x/net/ipv4/sys_asmreq.go similarity index 57% rename from fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go rename to fn/vendor/golang.org/x/net/ipv4/sys_asmreq.go index 4a6aa78ef..0388cba00 100644 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go +++ b/fn/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -1,14 +1,50 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd windows +// +build darwin dragonfly freebsd netbsd openbsd solaris windows package ipv4 -import "net" +import ( + "net" + "unsafe" -func setIPMreqInterface(mreq *sysIPMreq, ifi *net.Interface) error { + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + b := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq] + return so.Set(c, b) +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + var b [4]byte + if _, err := so.Get(c, b[:]); err != nil { + return nil, err + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return so.Set(c, b[:]) +} + +func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error { if ifi == nil { return nil } diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/fn/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go new file mode 100644 index 000000000..f3919208b --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} diff --git a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go b/fn/vendor/golang.org/x/net/ipv4/sys_asmreqn.go similarity index 50% rename from fn/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go rename to fn/vendor/golang.org/x/net/ipv4/sys_asmreqn.go index 92c8e34cf..1f24f69f3 100644 --- a/fn/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go +++ b/fn/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -8,18 +8,17 @@ package ipv4 import ( "net" - "os" "unsafe" - "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) -func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { - var mreqn sysIPMreqn - l := sysSockoptLen(sysSizeofIPMreqn) - if err := getsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + b := make([]byte, so.Len) + if _, err := so.Get(c, b); err != nil { + return nil, err } + mreqn := (*ipMreqn)(unsafe.Pointer(&b[0])) if mreqn.Ifindex == 0 { return nil, nil } @@ -30,13 +29,14 @@ func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { return ifi, nil } -func setsockoptIPMreqn(fd, name int, ifi *net.Interface, grp net.IP) error { - var mreqn sysIPMreqn +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreqn ipMreqn if ifi != nil { mreqn.Ifindex = int32(ifi.Index) } if grp != nil { mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} } - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), sysSizeofIPMreqn)) + b := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn] + return so.Set(c, b) } diff --git a/fn/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go b/fn/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go similarity index 51% rename from fn/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go rename to fn/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go index 7732e49f8..0711d3d78 100644 --- a/fn/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go +++ b/fn/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -4,14 +4,18 @@ // +build !darwin,!freebsd,!linux -package ipv6 +package ipv4 -import "net" +import ( + "net" -func setsockoptGroupReq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport } -func setsockoptGroupSourceReq(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { return errOpNoSupport } diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_bpf.go b/fn/vendor/golang.org/x/net/ipv4/sys_bpf.go new file mode 100644 index 000000000..9f30b7308 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/fn/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go new file mode 100644 index 000000000..9a2132093 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_bsd.go b/fn/vendor/golang.org/x/net/ipv4/sys_bsd.go index a669a4403..58256dd9d 100644 --- a/fn/vendor/golang.org/x/net/ipv4/sys_bsd.go +++ b/fn/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -2,16 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build dragonfly netbsd +// +build netbsd openbsd package ipv4 import ( "net" "syscall" -) -type sysSockoptLen int32 + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) var ( ctlOpts = [ctlMax]ctlOpt{ @@ -20,17 +21,17 @@ var ( ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, } - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, - ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, - ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, - ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, - ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, - ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, } ) diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_darwin.go b/fn/vendor/golang.org/x/net/ipv4/sys_darwin.go index 3f3473481..e8fb19169 100644 --- a/fn/vendor/golang.org/x/net/ipv4/sys_darwin.go +++ b/fn/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -6,11 +6,14 @@ package ipv4 import ( "net" + "strconv" + "strings" "syscall" "unsafe" -) -type sysSockoptLen int32 + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) var ( ctlOpts = [ctlMax]ctlOpt{ @@ -19,80 +22,72 @@ var ( ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, } - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, - ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, - ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, - ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, - ssoStripHeader: {sysIP_STRIPHDR, ssoTypeInt}, - ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, - ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoStripHeader: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, } ) func init() { // Seems like kern.osreldate is veiled on latest OS X. We use // kern.osrelease instead. - osver, err := syscall.Sysctl("kern.osrelease") + s, err := syscall.Sysctl("kern.osrelease") if err != nil { return } - var i int - for i = range osver { - if osver[i] == '.' { - break - } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return } // The IP_PKTINFO and protocol-independent multicast API were - // introduced in OS X 10.7 (Darwin 11.0.0). But it looks like - // those features require OS X 10.8 (Darwin 12.0.0) and above. + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. // See http://support.apple.com/kb/HT1633. - if i > 2 || i == 2 && osver[0] >= '1' && osver[1] >= '2' { - ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO - ctlOpts[ctlPacketInfo].length = sysSizeofInetPktinfo - ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo - ctlOpts[ctlPacketInfo].parse = parsePacketInfo - sockOpts[ssoPacketInfo].name = sysIP_RECVPKTINFO - sockOpts[ssoPacketInfo].typ = ssoTypeInt - sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn - sockOpts[ssoJoinGroup].name = sysMCAST_JOIN_GROUP - sockOpts[ssoJoinGroup].typ = ssoTypeGroupReq - sockOpts[ssoLeaveGroup].name = sysMCAST_LEAVE_GROUP - sockOpts[ssoLeaveGroup].typ = ssoTypeGroupReq - sockOpts[ssoJoinSourceGroup].name = sysMCAST_JOIN_SOURCE_GROUP - sockOpts[ssoJoinSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoLeaveSourceGroup].name = sysMCAST_LEAVE_SOURCE_GROUP - sockOpts[ssoLeaveSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoBlockSourceGroup].name = sysMCAST_BLOCK_SOURCE - sockOpts[ssoBlockSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoUnblockSourceGroup].name = sysMCAST_UNBLOCK_SOURCE - sockOpts[ssoUnblockSourceGroup].typ = ssoTypeGroupSourceReq + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return } + ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO + ctlOpts[ctlPacketInfo].length = sizeofInetPktinfo + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlPacketInfo].parse = parsePacketInfo + sockOpts[ssoPacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}} + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} } -func (pi *sysInetPktinfo) setIfindex(i int) { +func (pi *inetPktinfo) setIfindex(i int) { pi.Ifindex = uint32(i) } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Pad_cgo_0[0])) - sa.Len = sysSizeofSockaddrInet +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Pad_cgo_0[0])) - sa.Len = sysSizeofSockaddrInet +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Pad_cgo_1[0])) - sa.Len = sysSizeofSockaddrInet + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], src) } diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_dragonfly.go b/fn/vendor/golang.org/x/net/ipv4/sys_dragonfly.go new file mode 100644 index 000000000..859764f33 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/sys_dragonfly.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/fn/vendor/golang.org/x/net/ipv4/sys_freebsd.go index 09ef49107..b80032454 100644 --- a/fn/vendor/golang.org/x/net/ipv4/sys_freebsd.go +++ b/fn/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -10,9 +10,10 @@ import ( "strings" "syscall" "unsafe" -) -type sysSockoptLen int32 + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) var ( ctlOpts = [ctlMax]ctlOpt{ @@ -21,29 +22,29 @@ var ( ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, } - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, - ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, - ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, - ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, - ssoJoinGroup: {sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, - ssoLeaveGroup: {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, - ssoJoinSourceGroup: {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoLeaveSourceGroup: {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoBlockSourceGroup: {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, - ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, } ) func init() { freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") if freebsdVersion >= 1000000 { - sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} } if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { archs, _ := syscall.Sysctl("kern.supported_archs") @@ -56,20 +57,20 @@ func init() { } } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Group)) - sa.Len = sysSizeofSockaddrInet +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Group)) - sa.Len = sysSizeofSockaddrInet +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Source)) - sa.Len = sysSizeofSockaddrInet + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet sa.Family = syscall.AF_INET copy(sa.Addr[:], src) } diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_linux.go b/fn/vendor/golang.org/x/net/ipv4/sys_linux.go index b1f387890..60defe132 100644 --- a/fn/vendor/golang.org/x/net/ipv4/sys_linux.go +++ b/fn/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -8,50 +8,52 @@ import ( "net" "syscall" "unsafe" -) -type sysSockoptLen int32 + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) var ( ctlOpts = [ctlMax]ctlOpt{ ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, - ctlPacketInfo: {sysIP_PKTINFO, sysSizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, } - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeInt}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeIPMreqn}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, - ssoPacketInfo: {sysIP_PKTINFO, ssoTypeInt}, - ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, - ssoICMPFilter: {sysICMP_FILTER, ssoTypeICMPFilter}, - ssoJoinGroup: {sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, - ssoLeaveGroup: {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, - ssoJoinSourceGroup: {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoLeaveSourceGroup: {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoBlockSourceGroup: {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, - ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, } ) -func (pi *sysInetPktinfo) setIfindex(i int) { +func (pi *inetPktinfo) setIfindex(i int) { pi.Ifindex = int32(i) } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Group)) +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Group)) +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) sa.Family = syscall.AF_INET copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) sa.Family = syscall.AF_INET copy(sa.Addr[:], src) } diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_openbsd.go b/fn/vendor/golang.org/x/net/ipv4/sys_openbsd.go deleted file mode 100644 index 550f20816..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/sys_openbsd.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "net" - "syscall" -) - -type sysSockoptLen int32 - -var ( - ctlOpts = [ctlMax]ctlOpt{ - ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, - ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, - ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, - } - - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeByte}, - ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, - ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, - ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, - ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, - ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, - ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, - } -) diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_solaris.go b/fn/vendor/golang.org/x/net/ipv4/sys_solaris.go new file mode 100644 index 000000000..832fef1e2 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/sys_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 4, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/fn/vendor/golang.org/x/net/ipv4/sys_ssmreq.go new file mode 100644 index 000000000..ae5704e77 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/fn/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go new file mode 100644 index 000000000..e6b7623d0 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_stub.go b/fn/vendor/golang.org/x/net/ipv4/sys_stub.go index efbcc479a..4f076473b 100644 --- a/fn/vendor/golang.org/x/net/ipv4/sys_stub.go +++ b/fn/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -2,14 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{} - sockOpts = [ssoMax]sockOpt{} + sockOpts = map[int]*sockOpt{} ) diff --git a/fn/vendor/golang.org/x/net/ipv4/sys_windows.go b/fn/vendor/golang.org/x/net/ipv4/sys_windows.go index 466489fe0..b0913d539 100644 --- a/fn/vendor/golang.org/x/net/ipv4/sys_windows.go +++ b/fn/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -4,6 +4,11 @@ package ipv4 +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + const ( // See ws2tcpip.h. sysIP_OPTIONS = 0x1 @@ -20,22 +25,22 @@ const ( sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 sysIP_PKTINFO = 0x13 - sysSizeofInetPktinfo = 0x8 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqSource = 0xc + sizeofInetPktinfo = 0x8 + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc ) -type sysInetPktinfo struct { +type inetPktinfo struct { Addr [4]byte Ifindex int32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte Interface [4]byte } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte Sourceaddr [4]byte Interface [4]byte @@ -45,17 +50,18 @@ type sysIPMreqSource struct { var ( ctlOpts = [ctlMax]ctlOpt{} - sockOpts = [ssoMax]sockOpt{ - ssoTOS: {sysIP_TOS, ssoTypeInt}, - ssoTTL: {sysIP_TTL, ssoTypeInt}, - ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeInt}, - ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, - ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, - ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, } ) -func (pi *sysInetPktinfo) setIfindex(i int) { +func (pi *inetPktinfo) setIfindex(i int) { pi.Ifindex = int32(i) } diff --git a/fn/vendor/golang.org/x/net/ipv4/syscall_linux_386.go b/fn/vendor/golang.org/x/net/ipv4/syscall_linux_386.go deleted file mode 100644 index ab4ad0454..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/syscall_linux_386.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "syscall" - "unsafe" -) - -const ( - sysGETSOCKOPT = 0xf - sysSETSOCKOPT = 0xe -) - -func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) - -func getsockopt(fd, level, name int, v unsafe.Pointer, l *sysSockoptLen) error { - if _, errno := socketcall(sysGETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { - return error(errno) - } - return nil -} - -func setsockopt(fd, level, name int, v unsafe.Pointer, l sysSockoptLen) error { - if _, errno := socketcall(sysSETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { - return error(errno) - } - return nil -} diff --git a/fn/vendor/golang.org/x/net/ipv4/syscall_unix.go b/fn/vendor/golang.org/x/net/ipv4/syscall_unix.go deleted file mode 100644 index 5fe8e83bc..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/syscall_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!386 netbsd openbsd - -package ipv4 - -import ( - "syscall" - "unsafe" -) - -func getsockopt(fd, level, name int, v unsafe.Pointer, l *sysSockoptLen) error { - if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { - return error(errno) - } - return nil -} - -func setsockopt(fd, level, name int, v unsafe.Pointer, l sysSockoptLen) error { - if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { - return error(errno) - } - return nil -} diff --git a/fn/vendor/golang.org/x/net/ipv4/thunk_linux_386.s b/fn/vendor/golang.org/x/net/ipv4/thunk_linux_386.s deleted file mode 100644 index daa78bc02..000000000 --- a/fn/vendor/golang.org/x/net/ipv4/thunk_linux_386.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.2 - -TEXT ·socketcall(SB),4,$0-36 - JMP syscall·socketcall(SB) diff --git a/fn/vendor/golang.org/x/net/ipv4/unicast_test.go b/fn/vendor/golang.org/x/net/ipv4/unicast_test.go index 9c632cd89..02c089f00 100644 --- a/fn/vendor/golang.org/x/net/ipv4/unicast_test.go +++ b/fn/vendor/golang.org/x/net/ipv4/unicast_test.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -20,7 +20,7 @@ import ( func TestPacketConnReadWriteUnicastUDP(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) @@ -28,18 +28,15 @@ func TestPacketConnReadWriteUnicastUDP(t *testing.T) { t.Skipf("not available on %s", runtime.GOOS) } - c, err := net.ListenPacket("udp4", "127.0.0.1:0") + c, err := nettest.NewLocalPacketListener("udp4") if err != nil { t.Fatal(err) } defer c.Close() - - dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String()) - if err != nil { - t.Fatal(err) - } p := ipv4.NewPacketConn(c) defer p.Close() + + dst := c.LocalAddr() cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface wb := []byte("HELLO-R-U-THERE") @@ -74,7 +71,7 @@ func TestPacketConnReadWriteUnicastUDP(t *testing.T) { func TestPacketConnReadWriteUnicastICMP(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if m, ok := nettest.SupportsRawIPSocket(); !ok { @@ -97,7 +94,11 @@ func TestPacketConnReadWriteUnicastICMP(t *testing.T) { } p := ipv4.NewPacketConn(c) defer p.Close() - cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + cf := ipv4.FlagDst | ipv4.FlagInterface + if runtime.GOOS != "solaris" { + // Solaris never allows to modify ICMP properties. + cf |= ipv4.FlagTTL + } for i, toggle := range []bool{true, false, true} { wb, err := (&icmp.Message{ @@ -156,7 +157,7 @@ func TestPacketConnReadWriteUnicastICMP(t *testing.T) { func TestRawConnReadWriteUnicastICMP(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if m, ok := nettest.SupportsRawIPSocket(); !ok { diff --git a/fn/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go b/fn/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go index 25606f21d..db5213b91 100644 --- a/fn/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go +++ b/fn/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -16,7 +16,7 @@ import ( func TestConnUnicastSocketOptions(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) @@ -30,8 +30,15 @@ func TestConnUnicastSocketOptions(t *testing.T) { } defer ln.Close() - done := make(chan bool) - go acceptor(t, ln, done) + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + errc <- c.Close() + }() c, err := net.Dial("tcp4", ln.Addr().String()) if err != nil { @@ -41,7 +48,9 @@ func TestConnUnicastSocketOptions(t *testing.T) { testUnicastSocketOptions(t, ipv4.NewConn(c)) - <-done + if err := <-errc; err != nil { + t.Errorf("server: %v", err) + } } var packetConnUnicastSocketOptionTests = []struct { @@ -53,7 +62,7 @@ var packetConnUnicastSocketOptionTests = []struct { func TestPacketConnUnicastSocketOptions(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) @@ -79,7 +88,7 @@ func TestPacketConnUnicastSocketOptions(t *testing.T) { func TestRawConnUnicastSocketOptions(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if m, ok := nettest.SupportsRawIPSocket(); !ok { diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/fn/vendor/golang.org/x/net/ipv4/zsys_darwin.go index 087c63906..c07cc883f 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_darwin.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -37,18 +37,18 @@ const ( sysMCAST_BLOCK_SOURCE = 0x54 sysMCAST_UNBLOCK_SOURCE = 0x55 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -56,7 +56,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Len uint8 Family uint8 Port uint16 @@ -64,35 +64,35 @@ type sysSockaddrInet struct { Zero [8]int8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex uint32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte /* in_addr */ Sourceaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [128]byte } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [128]byte Pad_cgo_1 [128]byte diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/fn/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go index f5c9ccec4..c4365e9e7 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_dragonfly.go -// +build dragonfly - package ipv4 const ( @@ -24,10 +22,10 @@ const ( sysIP_ADD_MEMBERSHIP = 0xc sysIP_DROP_MEMBERSHIP = 0xd - sysSizeofIPMreq = 0x8 + sizeofIPMreq = 0x8 ) -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go index 6fd67e1e9..8c4aec94c 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -38,17 +38,17 @@ const ( sysMCAST_BLOCK_SOURCE = 0x54 sysMCAST_UNBLOCK_SOURCE = 0x55 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -56,7 +56,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Len uint8 Family uint8 Port uint16 @@ -64,30 +64,30 @@ type sysSockaddrInet struct { Zero [8]int8 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte /* in_addr */ Sourceaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go index ebac6d792..4b10b7c57 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -38,17 +38,17 @@ const ( sysMCAST_BLOCK_SOURCE = 0x54 sysMCAST_UNBLOCK_SOURCE = 0x55 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -56,7 +56,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Len uint8 Family uint8 Port uint16 @@ -64,32 +64,32 @@ type sysSockaddrInet struct { Zero [8]int8 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte /* in_addr */ Sourceaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go index ebac6d792..4b10b7c57 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -38,17 +38,17 @@ const ( sysMCAST_BLOCK_SOURCE = 0x54 sysMCAST_UNBLOCK_SOURCE = 0x55 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -56,7 +56,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Len uint8 Family uint8 Port uint16 @@ -64,32 +64,32 @@ type sysSockaddrInet struct { Zero [8]int8 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte /* in_addr */ Sourceaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_386.go index fc7a9ebfb..c0260f0ce 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_386.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -55,39 +55,44 @@ const ( sysSO_EE_ORIGIN_TXSTATUS = 0x4 sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 - sysSizeofICMPFilter = 0x4 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -97,34 +102,47 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go index e324b81b6..9c967eaa6 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -55,39 +55,44 @@ const ( sysSO_EE_ORIGIN_TXSTATUS = 0x4 sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 - sysSizeofICMPFilter = 0x4 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -97,36 +102,49 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go index fc7a9ebfb..c0260f0ce 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -55,39 +55,44 @@ const ( sysSO_EE_ORIGIN_TXSTATUS = 0x4 sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 - sysSizeofICMPFilter = 0x4 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -97,34 +102,47 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go index ce4194a64..9c967eaa6 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,arm64 - package ipv4 const ( @@ -57,39 +55,44 @@ const ( sysSO_EE_ORIGIN_TXSTATUS = 0x4 sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 - sysSizeofICMPFilter = 0x4 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -99,36 +102,49 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go new file mode 100644 index 000000000..c0260f0ce --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go index 94116bfa6..9c967eaa6 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,mips64 - package ipv4 const ( @@ -57,39 +55,44 @@ const ( sysSO_EE_ORIGIN_TXSTATUS = 0x4 sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 - sysSizeofICMPFilter = 0x4 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -99,36 +102,49 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go index 698d7db32..9c967eaa6 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,mips64le - package ipv4 const ( @@ -57,39 +55,44 @@ const ( sysSO_EE_ORIGIN_TXSTATUS = 0x4 sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 - sysSizeofICMPFilter = 0x4 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -99,36 +102,49 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go new file mode 100644 index 000000000..c0260f0ce --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go new file mode 100644 index 000000000..f65bd9a7a --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go index 9fe5ee2b6..9c967eaa6 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,ppc64 - package ipv4 const ( @@ -57,39 +55,44 @@ const ( sysSO_EE_ORIGIN_TXSTATUS = 0x4 sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 - sysSizeofICMPFilter = 0x4 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -99,36 +102,49 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go index 3891f54ef..9c967eaa6 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,ppc64le - package ipv4 const ( @@ -57,39 +55,44 @@ const ( sysSO_EE_ORIGIN_TXSTATUS = 0x4 sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet = 0x10 - sysSizeofInetPktinfo = 0xc - sysSizeofSockExtendedErr = 0x10 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqn = 0xc - sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 - sysSizeofICMPFilter = 0x4 + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet struct { +type sockaddrInet struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ X__pad [8]uint8 } -type sysInetPktinfo struct { +type inetPktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysSockExtendedErr struct { +type sockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 @@ -99,36 +102,49 @@ type sysSockExtendedErr struct { Data uint32 } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqn struct { +type ipMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr uint32 Interface uint32 Sourceaddr uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPFilter struct { +type icmpFilter struct { Data uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go new file mode 100644 index 000000000..9c967eaa6 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/fn/vendor/golang.org/x/net/ipv4/zsys_netbsd.go index 8a440eb65..fd3624d93 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_netbsd.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -21,10 +21,10 @@ const ( sysIP_ADD_MEMBERSHIP = 0xc sysIP_DROP_MEMBERSHIP = 0xd - sysSizeofIPMreq = 0x8 + sizeofIPMreq = 0x8 ) -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/fn/vendor/golang.org/x/net/ipv4/zsys_openbsd.go index fd522b573..12f36be75 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_openbsd.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -21,10 +21,10 @@ const ( sysIP_ADD_MEMBERSHIP = 0xc sysIP_DROP_MEMBERSHIP = 0xd - sysSizeofIPMreq = 0x8 + sizeofIPMreq = 0x8 ) -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } diff --git a/fn/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/fn/vendor/golang.org/x/net/ipv4/zsys_solaris.go index d7c23349a..0a3875cc4 100644 --- a/fn/vendor/golang.org/x/net/ipv4/zsys_solaris.go +++ b/fn/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -1,30 +1,20 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_solaris.go -// +build solaris - package ipv4 const ( - sysIP_OPTIONS = 0x1 - sysIP_HDRINCL = 0x2 - sysIP_TOS = 0x3 - sysIP_TTL = 0x4 - sysIP_RECVOPTS = 0x5 - sysIP_RECVRETOPTS = 0x6 - sysIP_RECVDSTADDR = 0x7 - sysIP_RETOPTS = 0x8 - sysIP_RECVIF = 0x9 - sysIP_RECVSLLA = 0xa - sysIP_RECVTTL = 0xb - sysIP_NEXTHOP = 0x19 - sysIP_PKTINFO = 0x1a - sysIP_RECVPKTINFO = 0x1a - sysIP_DONTFRAG = 0x1b - sysIP_BOUND_IF = 0x41 - sysIP_UNSPEC_SRC = 0x42 - sysIP_BROADCAST_TTL = 0x43 - sysIP_DHCPINIT_IF = 0x45 + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb sysIP_MULTICAST_IF = 0x10 sysIP_MULTICAST_TTL = 0x11 @@ -35,26 +25,76 @@ const ( sysIP_UNBLOCK_SOURCE = 0x16 sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + sysIP_NEXTHOP = 0x19 - sysSizeofInetPktinfo = 0xc + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b - sysSizeofIPMreq = 0x8 - sysSizeofIPMreqSource = 0xc + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_REUSEADDR = 0x104 + sysIP_DONTROUTE = 0x105 + sysIP_BROADCAST = 0x106 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 ) -type sysInetPktinfo struct { +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { Ifindex uint32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } -type sysIPMreq struct { +type ipMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } -type sysIPMreqSource struct { +type ipMreqSource struct { Multiaddr [4]byte /* in_addr */ Sourceaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} diff --git a/fn/vendor/golang.org/x/net/ipv6/batch.go b/fn/vendor/golang.org/x/net/ipv6/batch.go new file mode 100644 index 000000000..4f5fe683d --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/batch.go @@ -0,0 +1,119 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/fn/vendor/golang.org/x/net/ipv6/bpf_test.go b/fn/vendor/golang.org/x/net/ipv6/bpf_test.go new file mode 100644 index 000000000..8253e1f42 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/bpf_test.go @@ -0,0 +1,96 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv6" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + l, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv6.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp6", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/fn/vendor/golang.org/x/net/ipv6/control.go b/fn/vendor/golang.org/x/net/ipv6/control.go index b7362aae7..2da644413 100644 --- a/fn/vendor/golang.org/x/net/ipv6/control.go +++ b/fn/vendor/golang.org/x/net/ipv6/control.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,10 +8,13 @@ import ( "fmt" "net" "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) // Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the -// former still support RFC 2292 only. Please be aware that almost +// former still support RFC 2292 only. Please be aware that almost // all protocol implementations prohibit using a combination of RFC // 2292 and RFC 3542 for some practical reasons. @@ -66,6 +69,105 @@ func (cm *ControlMessage) String() string { return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) } +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + bb := b + if tclass { + bb = ctlOpts[ctlTrafficClass].marshal(bb, cm) + } + if hoplimit { + bb = ctlOpts[ctlHopLimit].marshal(bb, cm) + } + if pktinfo { + bb = ctlOpts[ctlPacketInfo].marshal(bb, cm) + } + if nexthop { + bb = ctlOpts[ctlNextHop].marshal(bb, cm) + } + } + return b +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIPv6 { + continue + } + switch { + case typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length: + ctlOpts[ctlTrafficClass].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length: + ctlOpts[ctlHopLimit].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length: + ctlOpts[ctlPathMTU].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + // Ancillary data socket options const ( ctlTrafficClass = iota // header field diff --git a/fn/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/fn/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go index ce201ce36..9fd9eb15e 100644 --- a/fn/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go +++ b/fn/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -7,32 +7,26 @@ package ipv6 import ( - "syscall" "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_2292HOPLIMIT - m.SetLen(syscall.CmsgLen(4)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4) if cm != nil { - data := b[syscall.CmsgLen(0):] - // TODO(mikio): fix potential misaligned memory access - *(*int32)(unsafe.Pointer(&data[:4][0])) = int32(cm.HopLimit) + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) } - return b[syscall.CmsgSpace(4):] + return m.Next(4) } func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_2292PKTINFO - m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo) if cm != nil { - pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { copy(pi.Addr[:], ip) } @@ -40,17 +34,15 @@ func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { pi.setIfindex(cm.IfIndex) } } - return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):] + return m.Next(sizeofInet6Pktinfo) } func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_2292NEXTHOP - m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6) if cm != nil { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) sa.setSockaddr(cm.NextHop, cm.IfIndex) } - return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):] + return m.Next(sizeofSockaddrInet6) } diff --git a/fn/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/fn/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go index e55c4aa97..eec529c20 100644 --- a/fn/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go +++ b/fn/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -1,61 +1,50 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd +// +build darwin dragonfly freebsd linux netbsd openbsd solaris package ipv6 import ( - "syscall" + "net" "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_TCLASS - m.SetLen(syscall.CmsgLen(4)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4) if cm != nil { - data := b[syscall.CmsgLen(0):] - // TODO(mikio): fix potential misaligned memory access - *(*int32)(unsafe.Pointer(&data[:4][0])) = int32(cm.TrafficClass) + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass)) } - return b[syscall.CmsgSpace(4):] + return m.Next(4) } func parseTrafficClass(cm *ControlMessage, b []byte) { - // TODO(mikio): fix potential misaligned memory access - cm.TrafficClass = int(*(*int32)(unsafe.Pointer(&b[:4][0]))) + cm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4])) } func marshalHopLimit(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_HOPLIMIT - m.SetLen(syscall.CmsgLen(4)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4) if cm != nil { - data := b[syscall.CmsgLen(0):] - // TODO(mikio): fix potential misaligned memory access - *(*int32)(unsafe.Pointer(&data[:4][0])) = int32(cm.HopLimit) + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) } - return b[syscall.CmsgSpace(4):] + return m.Next(4) } func parseHopLimit(cm *ControlMessage, b []byte) { - // TODO(mikio): fix potential misaligned memory access - cm.HopLimit = int(*(*int32)(unsafe.Pointer(&b[:4][0]))) + cm.HopLimit = int(socket.NativeEndian.Uint32(b[:4])) } func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_PKTINFO - m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo) if cm != nil { - pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { copy(pi.Addr[:], ip) } @@ -63,41 +52,43 @@ func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { pi.setIfindex(cm.IfIndex) } } - return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):] + return m.Next(sizeofInet6Pktinfo) } func parsePacketInfo(cm *ControlMessage, b []byte) { - pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[0])) - cm.Dst = pi.Addr[:] + pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, pi.Addr[:]) cm.IfIndex = int(pi.Ifindex) } func marshalNextHop(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_NEXTHOP - m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6)) + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6) if cm != nil { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) sa.setSockaddr(cm.NextHop, cm.IfIndex) } - return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):] + return m.Next(sizeofSockaddrInet6) } func parseNextHop(cm *ControlMessage, b []byte) { } func marshalPathMTU(b []byte, cm *ControlMessage) []byte { - m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - m.Level = iana.ProtocolIPv6 - m.Type = sysIPV6_PATHMTU - m.SetLen(syscall.CmsgLen(sysSizeofIPv6Mtuinfo)) - return b[syscall.CmsgSpace(sysSizeofIPv6Mtuinfo):] + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo) + return m.Next(sizeofIPv6Mtuinfo) } func parsePathMTU(cm *ControlMessage, b []byte) { - mi := (*sysIPv6Mtuinfo)(unsafe.Pointer(&b[0])) - cm.Dst = mi.Addr.Addr[:] + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, mi.Addr.Addr[:]) cm.IfIndex = int(mi.Addr.Scope_id) cm.MTU = int(mi.Mtu) } diff --git a/fn/vendor/golang.org/x/net/ipv6/control_stub.go b/fn/vendor/golang.org/x/net/ipv6/control_stub.go index 2fecf7e5c..a045f28f7 100644 --- a/fn/vendor/golang.org/x/net/ipv6/control_stub.go +++ b/fn/vendor/golang.org/x/net/ipv6/control_stub.go @@ -1,23 +1,13 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 -func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { return errOpNoSupport } - -func newControlMessage(opt *rawOpt) (oob []byte) { - return nil -} - -func parseControlMessage(b []byte) (*ControlMessage, error) { - return nil, errOpNoSupport -} - -func marshalControlMessage(cm *ControlMessage) (oob []byte) { - return nil -} diff --git a/fn/vendor/golang.org/x/net/ipv6/control_test.go b/fn/vendor/golang.org/x/net/ipv6/control_test.go new file mode 100644 index 000000000..c186ca99f --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/control_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "testing" + + "golang.org/x/net/ipv6" +) + +func TestControlMessageParseWithFuzz(t *testing.T) { + var cm ipv6.ControlMessage + for _, fuzz := range []string{ + "\f\x00\x00\x00)\x00\x00\x00.\x00\x00\x00", + "\f\x00\x00\x00)\x00\x00\x00,\x00\x00\x00", + } { + cm.Parse([]byte(fuzz)) + } +} diff --git a/fn/vendor/golang.org/x/net/ipv6/control_unix.go b/fn/vendor/golang.org/x/net/ipv6/control_unix.go index 2af5beb43..66515060a 100644 --- a/fn/vendor/golang.org/x/net/ipv6/control_unix.go +++ b/fn/vendor/golang.org/x/net/ipv6/control_unix.go @@ -1,23 +1,18 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd +// +build darwin dragonfly freebsd linux netbsd openbsd solaris package ipv6 -import ( - "os" - "syscall" +import "golang.org/x/net/internal/socket" - "golang.org/x/net/internal/iana" -) - -func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { opt.Lock() defer opt.Unlock() - if cf&FlagTrafficClass != 0 && sockOpts[ssoReceiveTrafficClass].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceiveTrafficClass], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -26,8 +21,8 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { opt.clear(FlagTrafficClass) } } - if cf&FlagHopLimit != 0 && sockOpts[ssoReceiveHopLimit].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceiveHopLimit], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -36,8 +31,8 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { opt.clear(FlagHopLimit) } } - if cf&flagPacketInfo != 0 && sockOpts[ssoReceivePacketInfo].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceivePacketInfo], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -46,8 +41,8 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { opt.clear(cf & flagPacketInfo) } } - if cf&FlagPathMTU != 0 && sockOpts[ssoReceivePathMTU].name > 0 { - if err := setInt(fd, &sockOpts[ssoReceivePathMTU], boolint(on)); err != nil { + if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { return err } if on { @@ -58,109 +53,3 @@ func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { } return nil } - -func newControlMessage(opt *rawOpt) (oob []byte) { - opt.RLock() - var l int - if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length) - } - if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length) - } - if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) - } - if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { - l += syscall.CmsgSpace(ctlOpts[ctlPathMTU].length) - } - if l > 0 { - oob = make([]byte, l) - b := oob - if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { - b = ctlOpts[ctlTrafficClass].marshal(b, nil) - } - if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { - b = ctlOpts[ctlHopLimit].marshal(b, nil) - } - if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { - b = ctlOpts[ctlPacketInfo].marshal(b, nil) - } - if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { - b = ctlOpts[ctlPathMTU].marshal(b, nil) - } - } - opt.RUnlock() - return -} - -func parseControlMessage(b []byte) (*ControlMessage, error) { - if len(b) == 0 { - return nil, nil - } - cmsgs, err := syscall.ParseSocketControlMessage(b) - if err != nil { - return nil, os.NewSyscallError("parse socket control message", err) - } - cm := &ControlMessage{} - for _, m := range cmsgs { - if m.Header.Level != iana.ProtocolIPv6 { - continue - } - switch int(m.Header.Type) { - case ctlOpts[ctlTrafficClass].name: - ctlOpts[ctlTrafficClass].parse(cm, m.Data[:]) - case ctlOpts[ctlHopLimit].name: - ctlOpts[ctlHopLimit].parse(cm, m.Data[:]) - case ctlOpts[ctlPacketInfo].name: - ctlOpts[ctlPacketInfo].parse(cm, m.Data[:]) - case ctlOpts[ctlPathMTU].name: - ctlOpts[ctlPathMTU].parse(cm, m.Data[:]) - } - } - return cm, nil -} - -func marshalControlMessage(cm *ControlMessage) (oob []byte) { - if cm == nil { - return - } - var l int - tclass := false - if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { - tclass = true - l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length) - } - hoplimit := false - if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { - hoplimit = true - l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length) - } - pktinfo := false - if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { - pktinfo = true - l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) - } - nexthop := false - if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { - nexthop = true - l += syscall.CmsgSpace(ctlOpts[ctlNextHop].length) - } - if l > 0 { - oob = make([]byte, l) - b := oob - if tclass { - b = ctlOpts[ctlTrafficClass].marshal(b, cm) - } - if hoplimit { - b = ctlOpts[ctlHopLimit].marshal(b, cm) - } - if pktinfo { - b = ctlOpts[ctlPacketInfo].marshal(b, cm) - } - if nexthop { - b = ctlOpts[ctlNextHop].marshal(b, cm) - } - } - return -} diff --git a/fn/vendor/golang.org/x/net/ipv6/control_windows.go b/fn/vendor/golang.org/x/net/ipv6/control_windows.go index 72fdc1b03..ef2563b3f 100644 --- a/fn/vendor/golang.org/x/net/ipv6/control_windows.go +++ b/fn/vendor/golang.org/x/net/ipv6/control_windows.go @@ -1,27 +1,16 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv6 -import "syscall" +import ( + "syscall" -func setControlMessage(fd syscall.Handle, opt *rawOpt, cf ControlFlags, on bool) error { + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { // TODO(mikio): implement this return syscall.EWINDOWS } - -func newControlMessage(opt *rawOpt) (oob []byte) { - // TODO(mikio): implement this - return nil -} - -func parseControlMessage(b []byte) (*ControlMessage, error) { - // TODO(mikio): implement this - return nil, syscall.EWINDOWS -} - -func marshalControlMessage(cm *ControlMessage) (oob []byte) { - // TODO(mikio): implement this - return nil -} diff --git a/fn/vendor/golang.org/x/net/ipv6/defs_darwin.go b/fn/vendor/golang.org/x/net/ipv6/defs_darwin.go index 4c7f476a8..55ddc116f 100644 --- a/fn/vendor/golang.org/x/net/ipv6/defs_darwin.go +++ b/fn/vendor/golang.org/x/net/ipv6/defs_darwin.go @@ -83,30 +83,30 @@ const ( sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter ) -type sysSockaddrStorage C.struct_sockaddr_storage +type sockaddrStorage C.struct_sockaddr_storage -type sysSockaddrInet6 C.struct_sockaddr_in6 +type sockaddrInet6 C.struct_sockaddr_in6 -type sysInet6Pktinfo C.struct_in6_pktinfo +type inet6Pktinfo C.struct_in6_pktinfo -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo +type ipv6Mtuinfo C.struct_ip6_mtuinfo -type sysIPv6Mreq C.struct_ipv6_mreq +type ipv6Mreq C.struct_ipv6_mreq -type sysICMPv6Filter C.struct_icmp6_filter +type icmpv6Filter C.struct_icmp6_filter -type sysGroupReq C.struct_group_req +type groupReq C.struct_group_req -type sysGroupSourceReq C.struct_group_source_req +type groupSourceReq C.struct_group_source_req diff --git a/fn/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/fn/vendor/golang.org/x/net/ipv6/defs_dragonfly.go index c72487ceb..a4c383a51 100644 --- a/fn/vendor/golang.org/x/net/ipv6/defs_dragonfly.go +++ b/fn/vendor/golang.org/x/net/ipv6/defs_dragonfly.go @@ -64,21 +64,21 @@ const ( sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter ) -type sysSockaddrInet6 C.struct_sockaddr_in6 +type sockaddrInet6 C.struct_sockaddr_in6 -type sysInet6Pktinfo C.struct_in6_pktinfo +type inet6Pktinfo C.struct_in6_pktinfo -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo +type ipv6Mtuinfo C.struct_ip6_mtuinfo -type sysIPv6Mreq C.struct_ipv6_mreq +type ipv6Mreq C.struct_ipv6_mreq -type sysICMPv6Filter C.struct_icmp6_filter +type icmpv6Filter C.struct_icmp6_filter diff --git a/fn/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/fn/vendor/golang.org/x/net/ipv6/defs_freebsd.go index de199ec6a..53e625389 100644 --- a/fn/vendor/golang.org/x/net/ipv6/defs_freebsd.go +++ b/fn/vendor/golang.org/x/net/ipv6/defs_freebsd.go @@ -76,30 +76,30 @@ const ( sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter ) -type sysSockaddrStorage C.struct_sockaddr_storage +type sockaddrStorage C.struct_sockaddr_storage -type sysSockaddrInet6 C.struct_sockaddr_in6 +type sockaddrInet6 C.struct_sockaddr_in6 -type sysInet6Pktinfo C.struct_in6_pktinfo +type inet6Pktinfo C.struct_in6_pktinfo -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo +type ipv6Mtuinfo C.struct_ip6_mtuinfo -type sysIPv6Mreq C.struct_ipv6_mreq +type ipv6Mreq C.struct_ipv6_mreq -type sysGroupReq C.struct_group_req +type groupReq C.struct_group_req -type sysGroupSourceReq C.struct_group_source_req +type groupSourceReq C.struct_group_source_req -type sysICMPv6Filter C.struct_icmp6_filter +type icmpv6Filter C.struct_icmp6_filter diff --git a/fn/vendor/golang.org/x/net/ipv6/defs_linux.go b/fn/vendor/golang.org/x/net/ipv6/defs_linux.go index d83abce35..3308cb2c3 100644 --- a/fn/vendor/golang.org/x/net/ipv6/defs_linux.go +++ b/fn/vendor/golang.org/x/net/ipv6/defs_linux.go @@ -13,6 +13,8 @@ package ipv6 #include #include #include +#include +#include */ import "C" @@ -104,33 +106,42 @@ const ( sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY - sysSizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sysSizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sysSizeofGroupReq = C.sizeof_struct_group_req - sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog ) -type sysKernelSockaddrStorage C.struct___kernel_sockaddr_storage +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage -type sysSockaddrInet6 C.struct_sockaddr_in6 +type sockaddrInet6 C.struct_sockaddr_in6 -type sysInet6Pktinfo C.struct_in6_pktinfo +type inet6Pktinfo C.struct_in6_pktinfo -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo +type ipv6Mtuinfo C.struct_ip6_mtuinfo -type sysIPv6FlowlabelReq C.struct_in6_flowlabel_req +type ipv6FlowlabelReq C.struct_in6_flowlabel_req -type sysIPv6Mreq C.struct_ipv6_mreq +type ipv6Mreq C.struct_ipv6_mreq -type sysGroupReq C.struct_group_req +type groupReq C.struct_group_req -type sysGroupSourceReq C.struct_group_source_req +type groupSourceReq C.struct_group_source_req -type sysICMPv6Filter C.struct_icmp6_filter +type icmpv6Filter C.struct_icmp6_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/fn/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/fn/vendor/golang.org/x/net/ipv6/defs_netbsd.go index 7bd09e8e8..be9ceb9cc 100644 --- a/fn/vendor/golang.org/x/net/ipv6/defs_netbsd.go +++ b/fn/vendor/golang.org/x/net/ipv6/defs_netbsd.go @@ -60,21 +60,21 @@ const ( sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter ) -type sysSockaddrInet6 C.struct_sockaddr_in6 +type sockaddrInet6 C.struct_sockaddr_in6 -type sysInet6Pktinfo C.struct_in6_pktinfo +type inet6Pktinfo C.struct_in6_pktinfo -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo +type ipv6Mtuinfo C.struct_ip6_mtuinfo -type sysIPv6Mreq C.struct_ipv6_mreq +type ipv6Mreq C.struct_ipv6_mreq -type sysICMPv6Filter C.struct_icmp6_filter +type icmpv6Filter C.struct_icmp6_filter diff --git a/fn/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/fn/vendor/golang.org/x/net/ipv6/defs_openbsd.go index 6796d9b2f..177ddf87d 100644 --- a/fn/vendor/golang.org/x/net/ipv6/defs_openbsd.go +++ b/fn/vendor/golang.org/x/net/ipv6/defs_openbsd.go @@ -69,21 +69,21 @@ const ( sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter ) -type sysSockaddrInet6 C.struct_sockaddr_in6 +type sockaddrInet6 C.struct_sockaddr_in6 -type sysInet6Pktinfo C.struct_in6_pktinfo +type inet6Pktinfo C.struct_in6_pktinfo -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo +type ipv6Mtuinfo C.struct_ip6_mtuinfo -type sysIPv6Mreq C.struct_ipv6_mreq +type ipv6Mreq C.struct_ipv6_mreq -type sysICMPv6Filter C.struct_icmp6_filter +type icmpv6Filter C.struct_icmp6_filter diff --git a/fn/vendor/golang.org/x/net/ipv6/defs_solaris.go b/fn/vendor/golang.org/x/net/ipv6/defs_solaris.go index 972b17126..0f8ce2b46 100644 --- a/fn/vendor/golang.org/x/net/ipv6/defs_solaris.go +++ b/fn/vendor/golang.org/x/net/ipv6/defs_solaris.go @@ -9,6 +9,8 @@ package ipv6 /* +#include + #include #include */ @@ -53,6 +55,13 @@ const ( sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC @@ -76,21 +85,30 @@ const ( sysICMP6_FILTER = C.ICMP6_FILTER - sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req - sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter ) -type sysSockaddrInet6 C.struct_sockaddr_in6 +type sockaddrStorage C.struct_sockaddr_storage -type sysInet6Pktinfo C.struct_in6_pktinfo +type sockaddrInet6 C.struct_sockaddr_in6 -type sysIPv6Mtuinfo C.struct_ip6_mtuinfo +type inet6Pktinfo C.struct_in6_pktinfo -type sysIPv6Mreq C.struct_ipv6_mreq +type ipv6Mtuinfo C.struct_ip6_mtuinfo -type sysICMPv6Filter C.struct_icmp6_filter +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/fn/vendor/golang.org/x/net/ipv6/dgramopt_posix.go b/fn/vendor/golang.org/x/net/ipv6/dgramopt.go similarity index 69% rename from fn/vendor/golang.org/x/net/ipv6/dgramopt_posix.go rename to fn/vendor/golang.org/x/net/ipv6/dgramopt.go index 93ff2f1af..703dafe84 100644 --- a/fn/vendor/golang.org/x/net/ipv6/dgramopt_posix.go +++ b/fn/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -1,14 +1,14 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd windows - package ipv6 import ( "net" "syscall" + + "golang.org/x/net/bpf" ) // MulticastHopLimit returns the hop limit field value for outgoing @@ -17,11 +17,11 @@ func (c *dgramOpt) MulticastHopLimit() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoMulticastHopLimit]) + return so.GetInt(c.Conn) } // SetMulticastHopLimit sets the hop limit field value for future @@ -30,11 +30,11 @@ func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoMulticastHopLimit], hoplim) + return so.SetInt(c.Conn, hoplim) } // MulticastInterface returns the default interface for multicast @@ -43,11 +43,11 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { if !c.ok() { return nil, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return nil, err + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport } - return getInterface(fd, &sockOpts[ssoMulticastInterface]) + return so.getMulticastInterface(c.Conn) } // SetMulticastInterface sets the default interface for future @@ -56,11 +56,11 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport } - return setInterface(fd, &sockOpts[ssoMulticastInterface], ifi) + return so.setMulticastInterface(c.Conn, ifi) } // MulticastLoopback reports whether transmitted multicast packets @@ -69,11 +69,11 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { if !c.ok() { return false, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return false, err + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport } - on, err := getInt(fd, &sockOpts[ssoMulticastLoopback]) + on, err := so.GetInt(c.Conn) if err != nil { return false, err } @@ -86,11 +86,11 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoMulticastLoopback], boolint(on)) + return so.SetInt(c.Conn, boolint(on)) } // JoinGroup joins the group address group on the interface ifi. @@ -106,15 +106,15 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { return errMissingAddress } - return setGroup(fd, &sockOpts[ssoJoinGroup], ifi, grp) + return so.setGroup(c.Conn, ifi, grp) } // LeaveGroup leaves the group address group on the interface ifi @@ -124,15 +124,15 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { return errMissingAddress } - return setGroup(fd, &sockOpts[ssoLeaveGroup], ifi, grp) + return so.setGroup(c.Conn, ifi, grp) } // JoinSourceSpecificGroup joins the source-specific group comprising @@ -145,9 +145,9 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { @@ -157,7 +157,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoJoinSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // LeaveSourceSpecificGroup leaves the source-specific group on the @@ -166,9 +166,9 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { @@ -178,7 +178,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // ExcludeSourceSpecificGroup excludes the source-specific group from @@ -188,9 +188,9 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { @@ -200,7 +200,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoBlockSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // IncludeSourceSpecificGroup includes the excluded source-specific @@ -209,9 +209,9 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport } grp := netAddrToIP16(group) if grp == nil { @@ -221,22 +221,22 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source if src == nil { return errMissingAddress } - return setSourceGroup(fd, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src) + return so.setSourceGroup(c.Conn, ifi, grp, src) } // Checksum reports whether the kernel will compute, store or verify a -// checksum for both incoming and outgoing packets. If on is true, it +// checksum for both incoming and outgoing packets. If on is true, it // returns an offset in bytes into the data of where the checksum // field is located. func (c *dgramOpt) Checksum() (on bool, offset int, err error) { if !c.ok() { return false, 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return false, 0, err + so, ok := sockOpts[ssoChecksum] + if !ok { + return false, 0, errOpNoSupport } - offset, err = getInt(fd, &sockOpts[ssoChecksum]) + offset, err = so.GetInt(c.Conn) if err != nil { return false, 0, err } @@ -246,21 +246,21 @@ func (c *dgramOpt) Checksum() (on bool, offset int, err error) { return true, offset, nil } -// SetChecksum enables the kernel checksum processing. If on is ture, +// SetChecksum enables the kernel checksum processing. If on is ture, // the offset should be an offset in bytes into the data of where the // checksum field is located. func (c *dgramOpt) SetChecksum(on bool, offset int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoChecksum] + if !ok { + return errOpNoSupport } if !on { offset = -1 } - return setInt(fd, &sockOpts[ssoChecksum], offset) + return so.SetInt(c.Conn, offset) } // ICMPFilter returns an ICMP filter. @@ -268,11 +268,11 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { if !c.ok() { return nil, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return nil, err + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport } - return getICMPFilter(fd, &sockOpts[ssoICMPFilter]) + return so.getICMPFilter(c.Conn) } // SetICMPFilter deploys the ICMP filter. @@ -280,9 +280,23 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport } - return setICMPFilter(fd, &sockOpts[ssoICMPFilter], f) + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) } diff --git a/fn/vendor/golang.org/x/net/ipv6/dgramopt_stub.go b/fn/vendor/golang.org/x/net/ipv6/dgramopt_stub.go deleted file mode 100644 index fb067fb2f..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/dgramopt_stub.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv6 - -import "net" - -// MulticastHopLimit returns the hop limit field value for outgoing -// multicast packets. -func (c *dgramOpt) MulticastHopLimit() (int, error) { - return 0, errOpNoSupport -} - -// SetMulticastHopLimit sets the hop limit field value for future -// outgoing multicast packets. -func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { - return errOpNoSupport -} - -// MulticastInterface returns the default interface for multicast -// packet transmissions. -func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { - return nil, errOpNoSupport -} - -// SetMulticastInterface sets the default interface for future -// multicast packet transmissions. -func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { - return errOpNoSupport -} - -// MulticastLoopback reports whether transmitted multicast packets -// should be copied and send back to the originator. -func (c *dgramOpt) MulticastLoopback() (bool, error) { - return false, errOpNoSupport -} - -// SetMulticastLoopback sets whether transmitted multicast packets -// should be copied and send back to the originator. -func (c *dgramOpt) SetMulticastLoopback(on bool) error { - return errOpNoSupport -} - -// JoinGroup joins the group address group on the interface ifi. -// By default all sources that can cast data to group are accepted. -// It's possible to mute and unmute data transmission from a specific -// source by using ExcludeSourceSpecificGroup and -// IncludeSourceSpecificGroup. -// JoinGroup uses the system assigned multicast interface when ifi is -// nil, although this is not recommended because the assignment -// depends on platforms and sometimes it might require routing -// configuration. -func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { - return errOpNoSupport -} - -// LeaveGroup leaves the group address group on the interface ifi -// regardless of whether the group is any-source group or -// source-specific group. -func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { - return errOpNoSupport -} - -// JoinSourceSpecificGroup joins the source-specific group comprising -// group and source on the interface ifi. -// JoinSourceSpecificGroup uses the system assigned multicast -// interface when ifi is nil, although this is not recommended because -// the assignment depends on platforms and sometimes it might require -// routing configuration. -func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// LeaveSourceSpecificGroup leaves the source-specific group on the -// interface ifi. -func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// ExcludeSourceSpecificGroup excludes the source-specific group from -// the already joined any-source groups by JoinGroup on the interface -// ifi. -func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// IncludeSourceSpecificGroup includes the excluded source-specific -// group by ExcludeSourceSpecificGroup again on the interface ifi. -func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { - return errOpNoSupport -} - -// Checksum reports whether the kernel will compute, store or verify a -// checksum for both incoming and outgoing packets. If on is true, it -// returns an offset in bytes into the data of where the checksum -// field is located. -func (c *dgramOpt) Checksum() (on bool, offset int, err error) { - return false, 0, errOpNoSupport -} - -// SetChecksum enables the kernel checksum processing. If on is ture, -// the offset should be an offset in bytes into the data of where the -// checksum field is located. -func (c *dgramOpt) SetChecksum(on bool, offset int) error { - return errOpNoSupport -} - -// ICMPFilter returns an ICMP filter. -func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { - return nil, errOpNoSupport -} - -// SetICMPFilter deploys the ICMP filter. -func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { - return errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv6/doc.go b/fn/vendor/golang.org/x/net/ipv6/doc.go index dd13aa21f..eaa24c580 100644 --- a/fn/vendor/golang.org/x/net/ipv6/doc.go +++ b/fn/vendor/golang.org/x/net/ipv6/doc.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -9,22 +9,23 @@ // manipulation of IPv6 facilities. // // The IPv6 protocol is defined in RFC 2460. -// Basic and advanced socket interface extensions are defined in RFC -// 3493 and RFC 3542. -// Socket interface extensions for multicast source filters are -// defined in RFC 3678. +// Socket interface extensions are defined in RFC 3493, RFC 3542 and +// RFC 3678. // MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. // Source-specific multicast is defined in RFC 4607. // +// On Darwin, this package requires OS X Mavericks version 10.9 or +// above, or equivalent. +// // // Unicasting // // The options for unicasting are available for net.TCPConn, // net.UDPConn and net.IPConn which are created as network connections -// that use the IPv6 transport. When a single TCP connection carrying +// that use the IPv6 transport. When a single TCP connection carrying // a data flow of multiple packets needs to indicate the flow is -// important, ipv6.Conn is used to set the traffic class field on the -// IPv6 header for each packet. +// important, Conn is used to set the traffic class field on the IPv6 +// header for each packet. // // ln, err := net.Listen("tcp6", "[::]:1024") // if err != nil { @@ -56,7 +57,7 @@ // // The options for multicasting are available for net.UDPConn and // net.IPconn which are created as network connections that use the -// IPv6 transport. A few network facilities must be prepared before +// IPv6 transport. A few network facilities must be prepared before // you begin multicasting, at a minimum joining network interfaces and // multicast groups. // @@ -80,7 +81,7 @@ // defer c.Close() // // Second, the application joins multicast groups, starts listening to -// the groups on the specified network interfaces. Note that the +// the groups on the specified network interfaces. Note that the // service port for transport layer protocol does not matter with this // operation as joining groups affects only network and link layer // protocols, such as IPv6 and Ethernet. @@ -94,10 +95,10 @@ // } // // The application might set per packet control message transmissions -// between the protocol stack within the kernel. When the application +// between the protocol stack within the kernel. When the application // needs a destination address on an incoming packet, -// SetControlMessage of ipv6.PacketConn is used to enable control -// message transmissons. +// SetControlMessage of PacketConn is used to enable control message +// transmissions. // // if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { // // error handling @@ -143,7 +144,7 @@ // More multicasting // // An application that uses PacketConn may join multiple multicast -// groups. For example, a UDP listener with port 1024 might join two +// groups. For example, a UDP listener with port 1024 might join two // different groups across over two different network interfaces by // using: // @@ -164,7 +165,7 @@ // } // // It is possible for multiple UDP listeners that listen on the same -// UDP port to join the same multicast group. The net package will +// UDP port to join the same multicast group. The net package will // provide a socket that listens to a wildcard address with reusable // UDP port when an appropriate multicast address prefix is passed to // the net.ListenPacket or net.ListenUDP. @@ -238,3 +239,5 @@ // In the fallback case, ExcludeSourceSpecificGroup and // IncludeSourceSpecificGroup may return an error. package ipv6 // import "golang.org/x/net/ipv6" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/fn/vendor/golang.org/x/net/ipv6/endpoint.go b/fn/vendor/golang.org/x/net/ipv6/endpoint.go index 966eaa892..0624c1740 100644 --- a/fn/vendor/golang.org/x/net/ipv6/endpoint.go +++ b/fn/vendor/golang.org/x/net/ipv6/endpoint.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,8 +8,15 @@ import ( "net" "syscall" "time" + + "golang.org/x/net/internal/socket" ) +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn are not +// implemented. + // A Conn represents a network endpoint that uses IPv6 transport. // It allows to set basic IP-level socket options such as traffic // class and hop limit. @@ -18,7 +25,7 @@ type Conn struct { } type genericOpt struct { - net.Conn + *socket.Conn } func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } @@ -26,14 +33,14 @@ func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } // PathMTU returns a path MTU value for the destination associated // with the endpoint. func (c *Conn) PathMTU() (int, error) { - if !c.genericOpt.ok() { + if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.genericOpt.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoPathMTU] + if !ok { + return 0, errOpNoSupport } - _, mtu, err := getMTUInfo(fd, &sockOpts[ssoPathMTU]) + _, mtu, err := so.getMTUInfo(c.Conn) if err != nil { return 0, err } @@ -42,14 +49,15 @@ func (c *Conn) PathMTU() (int, error) { // NewConn returns a new Conn. func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) return &Conn{ - genericOpt: genericOpt{Conn: c}, + genericOpt: genericOpt{Conn: cc}, } } // A PacketConn represents a packet network endpoint that uses IPv6 -// transport. It is used to control several IP-level socket options -// including IPv6 header manipulation. It also provides datagram +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram // based network I/O methods specific to the IPv6 and higher layer // protocols such as OSPF, GRE, and UDP. type PacketConn struct { @@ -59,10 +67,10 @@ type PacketConn struct { } type dgramOpt struct { - net.PacketConn + *socket.Conn } -func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil } +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } // SetControlMessage allows to receive the per packet basis IP-level // socket options. @@ -70,11 +78,7 @@ func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.payloadHandler.ok() { return syscall.EINVAL } - fd, err := c.payloadHandler.sysfd() - if err != nil { - return err - } - return setControlMessage(fd, &c.payloadHandler.rawOpt, cf, on) + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) } // SetDeadline sets the read and write deadlines associated with the @@ -115,9 +119,10 @@ func (c *PacketConn) Close() error { // NewPacketConn returns a new PacketConn using c as its underlying // transport. func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) return &PacketConn{ - genericOpt: genericOpt{Conn: c.(net.Conn)}, - dgramOpt: dgramOpt{PacketConn: c}, - payloadHandler: payloadHandler{PacketConn: c}, + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, } } diff --git a/fn/vendor/golang.org/x/net/ipv6/gen.go b/fn/vendor/golang.org/x/net/ipv6/gen.go index b1e0ea378..41886ec72 100644 --- a/fn/vendor/golang.org/x/net/ipv6/gen.go +++ b/fn/vendor/golang.org/x/net/ipv6/gen.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -52,15 +52,6 @@ func genzsys() error { if err != nil { return err } - // The ipv6 pacakge still supports go1.2, and so we need to - // take care of additional platforms in go1.3 and above for - // working with go1.2. - switch { - case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris": - b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv6\n"), 1) - case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le"): - b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv6\n"), 1) - } b, err = format.Source(b) if err != nil { return err diff --git a/fn/vendor/golang.org/x/net/ipv6/genericopt_posix.go b/fn/vendor/golang.org/x/net/ipv6/genericopt.go similarity index 61% rename from fn/vendor/golang.org/x/net/ipv6/genericopt_posix.go rename to fn/vendor/golang.org/x/net/ipv6/genericopt.go index dd77a0167..e9dbc2e18 100644 --- a/fn/vendor/golang.org/x/net/ipv6/genericopt_posix.go +++ b/fn/vendor/golang.org/x/net/ipv6/genericopt.go @@ -1,9 +1,7 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd windows - package ipv6 import "syscall" @@ -14,11 +12,11 @@ func (c *genericOpt) TrafficClass() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoTrafficClass]) + return so.GetInt(c.Conn) } // SetTrafficClass sets the traffic class field value for future @@ -27,11 +25,11 @@ func (c *genericOpt) SetTrafficClass(tclass int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoTrafficClass], tclass) + return so.SetInt(c.Conn, tclass) } // HopLimit returns the hop limit field value for outgoing packets. @@ -39,11 +37,11 @@ func (c *genericOpt) HopLimit() (int, error) { if !c.ok() { return 0, syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return 0, err + so, ok := sockOpts[ssoHopLimit] + if !ok { + return 0, errOpNoSupport } - return getInt(fd, &sockOpts[ssoHopLimit]) + return so.GetInt(c.Conn) } // SetHopLimit sets the hop limit field value for future outgoing @@ -52,9 +50,9 @@ func (c *genericOpt) SetHopLimit(hoplim int) error { if !c.ok() { return syscall.EINVAL } - fd, err := c.sysfd() - if err != nil { - return err + so, ok := sockOpts[ssoHopLimit] + if !ok { + return errOpNoSupport } - return setInt(fd, &sockOpts[ssoHopLimit], hoplim) + return so.SetInt(c.Conn, hoplim) } diff --git a/fn/vendor/golang.org/x/net/ipv6/genericopt_stub.go b/fn/vendor/golang.org/x/net/ipv6/genericopt_stub.go deleted file mode 100644 index f5c372242..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/genericopt_stub.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv6 - -// TrafficClass returns the traffic class field value for outgoing -// packets. -func (c *genericOpt) TrafficClass() (int, error) { - return 0, errOpNoSupport -} - -// SetTrafficClass sets the traffic class field value for future -// outgoing packets. -func (c *genericOpt) SetTrafficClass(tclass int) error { - return errOpNoSupport -} - -// HopLimit returns the hop limit field value for outgoing packets. -func (c *genericOpt) HopLimit() (int, error) { - return 0, errOpNoSupport -} - -// SetHopLimit sets the hop limit field value for future outgoing -// packets. -func (c *genericOpt) SetHopLimit(hoplim int) error { - return errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv6/header.go b/fn/vendor/golang.org/x/net/ipv6/header.go index ad73776be..e05cb08b2 100644 --- a/fn/vendor/golang.org/x/net/ipv6/header.go +++ b/fn/vendor/golang.org/x/net/ipv6/header.go @@ -5,6 +5,7 @@ package ipv6 import ( + "encoding/binary" "fmt" "net" ) @@ -42,7 +43,7 @@ func ParseHeader(b []byte) (*Header, error) { Version: int(b[0]) >> 4, TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), - PayloadLen: int(b[4])<<8 | int(b[5]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), NextHeader: int(b[6]), HopLimit: int(b[7]), } diff --git a/fn/vendor/golang.org/x/net/ipv6/helper.go b/fn/vendor/golang.org/x/net/ipv6/helper.go index 4a6f1069b..259740132 100644 --- a/fn/vendor/golang.org/x/net/ipv6/helper.go +++ b/fn/vendor/golang.org/x/net/ipv6/helper.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -37,3 +37,21 @@ func netAddrToIP16(a net.Addr) net.IP { } return nil } + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/fn/vendor/golang.org/x/net/ipv6/helper_stub.go b/fn/vendor/golang.org/x/net/ipv6/helper_stub.go deleted file mode 100644 index 20354ab2f..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/helper_stub.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package ipv6 - -func (c *genericOpt) sysfd() (int, error) { - return 0, errOpNoSupport -} - -func (c *dgramOpt) sysfd() (int, error) { - return 0, errOpNoSupport -} - -func (c *payloadHandler) sysfd() (int, error) { - return 0, errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv6/helper_unix.go b/fn/vendor/golang.org/x/net/ipv6/helper_unix.go deleted file mode 100644 index 92868ed29..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/helper_unix.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package ipv6 - -import ( - "net" - "reflect" -) - -func (c *genericOpt) sysfd() (int, error) { - switch p := c.Conn.(type) { - case *net.TCPConn, *net.UDPConn, *net.IPConn: - return sysfd(p) - } - return 0, errInvalidConnType -} - -func (c *dgramOpt) sysfd() (int, error) { - switch p := c.PacketConn.(type) { - case *net.UDPConn, *net.IPConn: - return sysfd(p.(net.Conn)) - } - return 0, errInvalidConnType -} - -func (c *payloadHandler) sysfd() (int, error) { - return sysfd(c.PacketConn.(net.Conn)) -} - -func sysfd(c net.Conn) (int, error) { - cv := reflect.ValueOf(c) - switch ce := cv.Elem(); ce.Kind() { - case reflect.Struct: - nfd := ce.FieldByName("conn").FieldByName("fd") - switch fe := nfd.Elem(); fe.Kind() { - case reflect.Struct: - fd := fe.FieldByName("sysfd") - return int(fd.Int()), nil - } - } - return 0, errInvalidConnType -} diff --git a/fn/vendor/golang.org/x/net/ipv6/helper_windows.go b/fn/vendor/golang.org/x/net/ipv6/helper_windows.go deleted file mode 100644 index 28c401b53..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/helper_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6 - -import ( - "net" - "reflect" - "syscall" -) - -func (c *genericOpt) sysfd() (syscall.Handle, error) { - switch p := c.Conn.(type) { - case *net.TCPConn, *net.UDPConn, *net.IPConn: - return sysfd(p) - } - return syscall.InvalidHandle, errInvalidConnType -} - -func (c *dgramOpt) sysfd() (syscall.Handle, error) { - switch p := c.PacketConn.(type) { - case *net.UDPConn, *net.IPConn: - return sysfd(p.(net.Conn)) - } - return syscall.InvalidHandle, errInvalidConnType -} - -func (c *payloadHandler) sysfd() (syscall.Handle, error) { - return sysfd(c.PacketConn.(net.Conn)) -} - -func sysfd(c net.Conn) (syscall.Handle, error) { - cv := reflect.ValueOf(c) - switch ce := cv.Elem(); ce.Kind() { - case reflect.Struct: - netfd := ce.FieldByName("conn").FieldByName("fd") - switch fe := netfd.Elem(); fe.Kind() { - case reflect.Struct: - fd := fe.FieldByName("sysfd") - return syscall.Handle(fd.Uint()), nil - } - } - return syscall.InvalidHandle, errInvalidConnType -} diff --git a/fn/vendor/golang.org/x/net/ipv6/icmp.go b/fn/vendor/golang.org/x/net/ipv6/icmp.go index a2de65a08..ff21d1071 100644 --- a/fn/vendor/golang.org/x/net/ipv6/icmp.go +++ b/fn/vendor/golang.org/x/net/ipv6/icmp.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,6 +6,9 @@ package ipv6 import "golang.org/x/net/internal/iana" +// BUG(mikio): On Windows, methods related to ICMPFilter are not +// implemented. + // An ICMPType represents a type of ICMP message. type ICMPType int @@ -31,7 +34,7 @@ func (typ ICMPType) Protocol() int { // packets not explicitly addressed to itself, and a host means a node // that is not a router. type ICMPFilter struct { - sysICMPv6Filter + icmpv6Filter } // Accept accepts incoming ICMP packets including the type field value diff --git a/fn/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/fn/vendor/golang.org/x/net/ipv6/icmp_bsd.go index 30e3ce424..e1a791de4 100644 --- a/fn/vendor/golang.org/x/net/ipv6/icmp_bsd.go +++ b/fn/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,15 +6,15 @@ package ipv6 -func (f *sysICMPv6Filter) accept(typ ICMPType) { +func (f *icmpv6Filter) accept(typ ICMPType) { f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) block(typ ICMPType) { +func (f *icmpv6Filter) block(typ ICMPType) { f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) setAll(block bool) { +func (f *icmpv6Filter) setAll(block bool) { for i := range f.Filt { if block { f.Filt[i] = 0 @@ -24,6 +24,6 @@ func (f *sysICMPv6Filter) setAll(block bool) { } } -func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 } diff --git a/fn/vendor/golang.org/x/net/ipv6/icmp_linux.go b/fn/vendor/golang.org/x/net/ipv6/icmp_linux.go index a67ecf690..647f6b44f 100644 --- a/fn/vendor/golang.org/x/net/ipv6/icmp_linux.go +++ b/fn/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -1,18 +1,18 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv6 -func (f *sysICMPv6Filter) accept(typ ICMPType) { +func (f *icmpv6Filter) accept(typ ICMPType) { f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) block(typ ICMPType) { +func (f *icmpv6Filter) block(typ ICMPType) { f.Data[typ>>5] |= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) setAll(block bool) { +func (f *icmpv6Filter) setAll(block bool) { for i := range f.Data { if block { f.Data[i] = 1<<32 - 1 @@ -22,6 +22,6 @@ func (f *sysICMPv6Filter) setAll(block bool) { } } -func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 } diff --git a/fn/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/fn/vendor/golang.org/x/net/ipv6/icmp_solaris.go index a942f354c..7c23bb1cf 100644 --- a/fn/vendor/golang.org/x/net/ipv6/icmp_solaris.go +++ b/fn/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -1,24 +1,27 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build solaris - package ipv6 -func (f *sysICMPv6Filter) accept(typ ICMPType) { - // TODO(mikio): implement this +func (f *icmpv6Filter) accept(typ ICMPType) { + f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) block(typ ICMPType) { - // TODO(mikio): implement this +func (f *icmpv6Filter) block(typ ICMPType) { + f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31) } -func (f *sysICMPv6Filter) setAll(block bool) { - // TODO(mikio): implement this +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.X__icmp6_filt { + if block { + f.X__icmp6_filt[i] = 0 + } else { + f.X__icmp6_filt[i] = 1<<32 - 1 + } + } } -func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { - // TODO(mikio): implement this - return false +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 } diff --git a/fn/vendor/golang.org/x/net/ipv6/icmp_stub.go b/fn/vendor/golang.org/x/net/ipv6/icmp_stub.go index c1263ecac..c4b9be6db 100644 --- a/fn/vendor/golang.org/x/net/ipv6/icmp_stub.go +++ b/fn/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -1,23 +1,23 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 -type sysICMPv6Filter struct { +type icmpv6Filter struct { } -func (f *sysICMPv6Filter) accept(typ ICMPType) { +func (f *icmpv6Filter) accept(typ ICMPType) { } -func (f *sysICMPv6Filter) block(typ ICMPType) { +func (f *icmpv6Filter) block(typ ICMPType) { } -func (f *sysICMPv6Filter) setAll(block bool) { +func (f *icmpv6Filter) setAll(block bool) { } -func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { return false } diff --git a/fn/vendor/golang.org/x/net/ipv6/icmp_test.go b/fn/vendor/golang.org/x/net/ipv6/icmp_test.go index e192d6d8c..d8e9675dc 100644 --- a/fn/vendor/golang.org/x/net/ipv6/icmp_test.go +++ b/fn/vendor/golang.org/x/net/ipv6/icmp_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -34,7 +34,7 @@ func TestICMPString(t *testing.T) { func TestICMPFilter(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } @@ -61,7 +61,7 @@ func TestICMPFilter(t *testing.T) { func TestSetICMPFilter(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { diff --git a/fn/vendor/golang.org/x/net/ipv6/icmp_windows.go b/fn/vendor/golang.org/x/net/ipv6/icmp_windows.go index 9dcfb8106..443cd0736 100644 --- a/fn/vendor/golang.org/x/net/ipv6/icmp_windows.go +++ b/fn/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -1,26 +1,22 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv6 -type sysICMPv6Filter struct { +func (f *icmpv6Filter) accept(typ ICMPType) { // TODO(mikio): implement this } -func (f *sysICMPv6Filter) accept(typ ICMPType) { +func (f *icmpv6Filter) block(typ ICMPType) { // TODO(mikio): implement this } -func (f *sysICMPv6Filter) block(typ ICMPType) { +func (f *icmpv6Filter) setAll(block bool) { // TODO(mikio): implement this } -func (f *sysICMPv6Filter) setAll(block bool) { - // TODO(mikio): implement this -} - -func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { // TODO(mikio): implement this return false } diff --git a/fn/vendor/golang.org/x/net/ipv6/mocktransponder_test.go b/fn/vendor/golang.org/x/net/ipv6/mocktransponder_test.go index d587922a1..6efe56c68 100644 --- a/fn/vendor/golang.org/x/net/ipv6/mocktransponder_test.go +++ b/fn/vendor/golang.org/x/net/ipv6/mocktransponder_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/ipv6/multicast_test.go b/fn/vendor/golang.org/x/net/ipv6/multicast_test.go index a3a8979d2..69a21cd38 100644 --- a/fn/vendor/golang.org/x/net/ipv6/multicast_test.go +++ b/fn/vendor/golang.org/x/net/ipv6/multicast_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -29,15 +29,15 @@ var packetConnReadWriteMulticastUDPTests = []struct { func TestPacketConnReadWriteMulticastUDP(t *testing.T) { switch runtime.GOOS { - case "freebsd": // due to a bug on loopback marking - // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. - t.Skipf("not supported on %s", runtime.GOOS) - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { t.Skip("ipv6 is not supported") } + if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { + t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) + } ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) if ifi == nil { t.Skipf("not available on %s", runtime.GOOS) @@ -129,15 +129,15 @@ var packetConnReadWriteMulticastICMPTests = []struct { func TestPacketConnReadWriteMulticastICMP(t *testing.T) { switch runtime.GOOS { - case "freebsd": // due to a bug on loopback marking - // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. - t.Skipf("not supported on %s", runtime.GOOS) - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { t.Skip("ipv6 is not supported") } + if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { + t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) + } if m, ok := nettest.SupportsRawIPSocket(); !ok { t.Skip(m) } @@ -205,7 +205,11 @@ func TestPacketConnReadWriteMulticastICMP(t *testing.T) { if toggle { psh = nil if err := p.SetChecksum(true, 2); err != nil { - t.Fatal(err) + // Solaris never allows to + // modify ICMP properties. + if runtime.GOOS != "solaris" { + t.Fatal(err) + } } } else { psh = pshicmp diff --git a/fn/vendor/golang.org/x/net/ipv6/multicastlistener_test.go b/fn/vendor/golang.org/x/net/ipv6/multicastlistener_test.go index 9711f7513..b27713e2f 100644 --- a/fn/vendor/golang.org/x/net/ipv6/multicastlistener_test.go +++ b/fn/vendor/golang.org/x/net/ipv6/multicastlistener_test.go @@ -1,11 +1,10 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv6_test import ( - "fmt" "net" "runtime" "testing" @@ -22,7 +21,7 @@ var udpMultipleGroupListenerTests = []net.Addr{ func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { @@ -62,7 +61,7 @@ func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { @@ -70,13 +69,16 @@ func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { } for _, gaddr := range udpMultipleGroupListenerTests { - c1, err := net.ListenPacket("udp6", "[ff02::]:1024") // wildcard address with reusable port + c1, err := net.ListenPacket("udp6", "[ff02::]:0") // wildcard address with reusable port if err != nil { t.Fatal(err) } defer c1.Close() - - c2, err := net.ListenPacket("udp6", "[ff02::]:1024") // wildcard address with reusable port + _, port, err := net.SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c2, err := net.ListenPacket("udp6", net.JoinHostPort("ff02::", port)) // wildcard address with reusable port if err != nil { t.Fatal(err) } @@ -114,7 +116,7 @@ func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { @@ -132,16 +134,29 @@ func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { if err != nil { t.Fatal(err) } + port := "0" for i, ifi := range ift { ip, ok := nettest.IsMulticastCapable("ip6", &ifi) if !ok { continue } - c, err := net.ListenPacket("udp6", fmt.Sprintf("[%s%%%s]:1024", ip.String(), ifi.Name)) // unicast address with non-reusable port + c, err := net.ListenPacket("udp6", net.JoinHostPort(ip.String()+"%"+ifi.Name, port)) // unicast address with non-reusable port if err != nil { - t.Fatal(err) + // The listen may fail when the serivce is + // already in use, but it's fine because the + // purpose of this is not to test the + // bookkeeping of IP control block inside the + // kernel. + t.Log(err) + continue } defer c.Close() + if port == "0" { + _, port, err = net.SplitHostPort(c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + } p := ipv6.NewPacketConn(c) if err := p.JoinGroup(&ifi, &gaddr); err != nil { t.Fatal(err) @@ -157,7 +172,7 @@ func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { func TestIPSinglePacketConnWithSingleGroupListener(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { @@ -201,7 +216,7 @@ func TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { switch runtime.GOOS { case "darwin", "dragonfly", "openbsd": // platforms that return fe80::1%lo0: bind: can't assign requested address t.Skipf("not supported on %s", runtime.GOOS) - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { @@ -227,7 +242,7 @@ func TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { if !ok { continue } - c, err := net.ListenPacket("ip6:ipv6-icmp", fmt.Sprintf("%s%%%s", ip.String(), ifi.Name)) // unicast address + c, err := net.ListenPacket("ip6:ipv6-icmp", ip.String()+"%"+ifi.Name) // unicast address if err != nil { t.Fatal(err) } diff --git a/fn/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go b/fn/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go index fe0e6e1b1..9e6b902d7 100644 --- a/fn/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go +++ b/fn/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -26,7 +26,7 @@ var packetConnMulticastSocketOptionTests = []struct { func TestPacketConnMulticastSocketOptions(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { diff --git a/fn/vendor/golang.org/x/net/ipv6/payload.go b/fn/vendor/golang.org/x/net/ipv6/payload.go index 529b20bca..a8197f169 100644 --- a/fn/vendor/golang.org/x/net/ipv6/payload.go +++ b/fn/vendor/golang.org/x/net/ipv6/payload.go @@ -1,15 +1,23 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ipv6 -import "net" +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. // A payloadHandler represents the IPv6 datagram payload handler. type payloadHandler struct { net.PacketConn + *socket.Conn rawOpt } -func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil } +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/fn/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/fn/vendor/golang.org/x/net/ipv6/payload_cmsg.go index 8e90d324d..4ee4b062c 100644 --- a/fn/vendor/golang.org/x/net/ipv6/payload_cmsg.go +++ b/fn/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,59 +12,24 @@ import ( ) // ReadFrom reads a payload of the received IPv6 datagram, from the -// endpoint c, copying the payload into b. It returns the number of +// endpoint c, copying the payload into b. It returns the number of // bytes copied into b, the control message cm and the source address // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { return 0, nil, nil, syscall.EINVAL } - oob := newControlMessage(&c.rawOpt) - var oobn int - switch c := c.PacketConn.(type) { - case *net.UDPConn: - if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { - return 0, nil, nil, err - } - case *net.IPConn: - if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil { - return 0, nil, nil, err - } - default: - return 0, nil, nil, errInvalidConnType - } - if cm, err = parseControlMessage(oob[:oobn]); err != nil { - return 0, nil, nil, err - } - if cm != nil { - cm.Src = netAddrToIP16(src) - } - return + return c.readFrom(b) } // WriteTo writes a payload of the IPv6 datagram, to the destination -// address dst through the endpoint c, copying the payload from b. It -// returns the number of bytes written. The control message cm allows -// the IPv6 header fields and the datagram path to be specified. The +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The // cm may be nil if control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { return 0, syscall.EINVAL } - oob := marshalControlMessage(cm) - if dst == nil { - return 0, errMissingAddress - } - switch c := c.PacketConn.(type) { - case *net.UDPConn: - n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) - case *net.IPConn: - n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) - default: - return 0, errInvalidConnType - } - if err != nil { - return 0, err - } - return + return c.writeTo(b, cm, dst) } diff --git a/fn/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/fn/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go new file mode 100644 index 000000000..fdc6c3994 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if n, nn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP16(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/fn/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/fn/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go new file mode 100644 index 000000000..8f6d02e2f --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go @@ -0,0 +1,57 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/fn/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/fn/vendor/golang.org/x/net/ipv6/payload_nocmsg.go index 499204d0c..99a43542b 100644 --- a/fn/vendor/golang.org/x/net/ipv6/payload_nocmsg.go +++ b/fn/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,7 +12,7 @@ import ( ) // ReadFrom reads a payload of the received IPv6 datagram, from the -// endpoint c, copying the payload into b. It returns the number of +// endpoint c, copying the payload into b. It returns the number of // bytes copied into b, the control message cm and the source address // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { @@ -26,9 +26,9 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. } // WriteTo writes a payload of the IPv6 datagram, to the destination -// address dst through the endpoint c, copying the payload from b. It -// returns the number of bytes written. The control message cm allows -// the IPv6 header fields and the datagram path to be specified. The +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The // cm may be nil if control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { diff --git a/fn/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go b/fn/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go new file mode 100644 index 000000000..c11d92ae9 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go @@ -0,0 +1,242 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv6_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagHopLimit | ipv6.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr) { + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/fn/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go b/fn/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go new file mode 100644 index 000000000..e2fd73370 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go @@ -0,0 +1,373 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagHopLimit | ipv6.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv6.Message{ + { + Buffers: [][]byte{payload}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv6.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv6.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv6.Message{ + { + Buffers: [][]byte{datagram}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv6.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv6.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr, batch bool) { + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + batchReader := func() { + defer wg.Done() + ms := []ipv6.Message{ + { + Buffers: [][]byte{make([]byte, 128)}, + OOB: ipv6.NewControlMessage(cf), + }, + } + n, err := p.ReadBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + var cm ipv6.ControlMessage + if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { + t.Error(err) + return + } + b := ms[0].Buffers[0][:ms[0].N] + if !bytes.Equal(b, data) { + t.Errorf("got %#v; want %#v", b, data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + batchWriter := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + ms := []ipv6.Message{ + { + Buffers: [][]byte{data}, + OOB: cm.Marshal(), + Addr: dst, + }, + } + n, err := p.WriteBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + if ms[0].N != len(data) { + t.Errorf("got %d; want %d", ms[0].N, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + if batch { + go batchWriter(i%2 != 0) + } else { + go writer(i%2 != 0) + } + } + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Wait() +} diff --git a/fn/vendor/golang.org/x/net/ipv6/readwrite_test.go b/fn/vendor/golang.org/x/net/ipv6/readwrite_test.go index 8c8c6fde0..206b915ce 100644 --- a/fn/vendor/golang.org/x/net/ipv6/readwrite_test.go +++ b/fn/vendor/golang.org/x/net/ipv6/readwrite_test.go @@ -17,99 +17,62 @@ import ( "golang.org/x/net/ipv6" ) -func benchmarkUDPListener() (net.PacketConn, net.Addr, error) { - c, err := net.ListenPacket("udp6", "[::1]:0") +func BenchmarkReadWriteUnicast(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") if err != nil { - return nil, nil, err - } - dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String()) - if err != nil { - c.Close() - return nil, nil, err - } - return c, dst, nil -} - -func BenchmarkReadWriteNetUDP(b *testing.B) { - if !supportsIPv6 { - b.Skip("ipv6 is not supported") - } - - c, dst, err := benchmarkUDPListener() - if err != nil { - b.Fatal(err) + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) } defer c.Close() + dst := c.LocalAddr() wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) - b.ResetTimer() - for i := 0; i < b.N; i++ { - benchmarkReadWriteNetUDP(b, c, wb, rb, dst) - } -} -func benchmarkReadWriteNetUDP(b *testing.B, c net.PacketConn, wb, rb []byte, dst net.Addr) { - if _, err := c.WriteTo(wb, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(rb); err != nil { - b.Fatal(err) - } -} + b.Run("NetUDP", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("IPv6UDP", func(b *testing.B) { + p := ipv6.NewPacketConn(c) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } -func BenchmarkReadWriteIPv6UDP(b *testing.B) { - if !supportsIPv6 { - b.Skip("ipv6 is not supported") - } - - c, dst, err := benchmarkUDPListener() - if err != nil { - b.Fatal(err) - } - defer c.Close() - - p := ipv6.NewPacketConn(c) - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) - - wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) - b.ResetTimer() - for i := 0; i < b.N; i++ { - benchmarkReadWriteIPv6UDP(b, p, wb, rb, dst, ifi) - } -} - -func benchmarkReadWriteIPv6UDP(b *testing.B, p *ipv6.PacketConn, wb, rb []byte, dst net.Addr, ifi *net.Interface) { - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - HopLimit: 1, - } - if ifi != nil { - cm.IfIndex = ifi.Index - } - if n, err := p.WriteTo(wb, &cm, dst); err != nil { - b.Fatal(err) - } else if n != len(wb) { - b.Fatalf("got %v; want %v", n, len(wb)) - } - if _, _, _, err := p.ReadFrom(rb); err != nil { - b.Fatal(err) - } + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) } func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { t.Skip("ipv6 is not supported") } - c, err := net.ListenPacket("udp6", "[::1]:0") + c, err := nettest.NewLocalPacketListener("udp6") if err != nil { t.Fatal(err) } @@ -117,11 +80,7 @@ func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { p := ipv6.NewPacketConn(c) defer p.Close() - dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String()) - if err != nil { - t.Fatal(err) - } - + dst := c.LocalAddr() ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU wb := []byte("HELLO-R-U-THERE") @@ -167,7 +126,7 @@ func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { t.Error(err) return } else if n != len(wb) { - t.Errorf("got %v; want %v", n, len(wb)) + t.Errorf("got %d; want %d", n, len(wb)) return } } diff --git a/fn/vendor/golang.org/x/net/ipv6/sockopt.go b/fn/vendor/golang.org/x/net/ipv6/sockopt.go index f0cfc2f94..cc3907df3 100644 --- a/fn/vendor/golang.org/x/net/ipv6/sockopt.go +++ b/fn/vendor/golang.org/x/net/ipv6/sockopt.go @@ -4,6 +4,8 @@ package ipv6 +import "golang.org/x/net/internal/socket" + // Sticky socket options const ( ssoTrafficClass = iota // header field for unicast packet, RFC 3542 @@ -24,23 +26,18 @@ const ( ssoLeaveSourceGroup // source-specific multicast ssoBlockSourceGroup // any-source or source-specific multicast ssoUnblockSourceGroup // any-source or source-specific multicast - ssoMax + ssoAttachFilter // attach BPF for filtering inbound traffic ) // Sticky socket option value types const ( - ssoTypeInt = iota + 1 - ssoTypeInterface - ssoTypeICMPFilter - ssoTypeMTUInfo - ssoTypeIPMreq + ssoTypeIPMreq = iota + 1 ssoTypeGroupReq ssoTypeGroupSourceReq ) // A sockOpt represents a binding for sticky socket option. type sockOpt struct { - level int // option level - name int // option name, must be equal or greater than 1 - typ int // option value type, must be equal or greater than 1 + socket.Option + typ int // hint for option value type; optional } diff --git a/fn/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go b/fn/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go deleted file mode 100644 index b7fd4fe67..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package ipv6 - -import ( - "net" - "os" - "unsafe" -) - -func setsockoptIPMreq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - var mreq sysIPv6Mreq - copy(mreq.Multiaddr[:], grp) - if ifi != nil { - mreq.setIfindex(ifi.Index) - } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&mreq), sysSizeofIPv6Mreq)) -} diff --git a/fn/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go b/fn/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go deleted file mode 100644 index c03c73134..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6 - -import ( - "net" - "os" - "syscall" - "unsafe" -) - -func setsockoptIPMreq(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - var mreq sysIPv6Mreq - copy(mreq.Multiaddr[:], grp) - if ifi != nil { - mreq.setIfindex(ifi.Index) - } - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&mreq)), sysSizeofIPv6Mreq)) -} diff --git a/fn/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/fn/vendor/golang.org/x/net/ipv6/sockopt_posix.go new file mode 100644 index 000000000..0eac86eb8 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + n, err := so.GetInt(c) + if err != nil { + return nil, err + } + return net.InterfaceByIndex(n) +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + var n int + if ifi != nil { + n = ifi.Index + } + return so.SetInt(c, n) +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPv6Filter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter] + return so.Set(c, b) +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, 0, err + } + if n != sizeofIPv6Mtuinfo { + return nil, 0, errOpNoSupport + } + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if mi.Addr.Scope_id == 0 { + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/fn/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go b/fn/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go deleted file mode 100644 index c64d6d584..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd linux - -package ipv6 - -import ( - "net" - "os" - "unsafe" -) - -var freebsd32o64 bool - -func setsockoptGroupReq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - var gr sysGroupReq - if ifi != nil { - gr.Interface = uint32(ifi.Index) - } - gr.setGroup(grp) - var p unsafe.Pointer - var l sysSockoptLen - if freebsd32o64 { - var d [sysSizeofGroupReq + 4]byte - s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) - copy(d[:4], s[:4]) - copy(d[8:], s[4:]) - p = unsafe.Pointer(&d[0]) - l = sysSizeofGroupReq + 4 - } else { - p = unsafe.Pointer(&gr) - l = sysSizeofGroupReq - } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, p, l)) -} - -func setsockoptGroupSourceReq(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { - var gsr sysGroupSourceReq - if ifi != nil { - gsr.Interface = uint32(ifi.Index) - } - gsr.setSourceGroup(grp, src) - var p unsafe.Pointer - var l sysSockoptLen - if freebsd32o64 { - var d [sysSizeofGroupSourceReq + 4]byte - s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) - copy(d[:4], s[:4]) - copy(d[8:], s[4:]) - p = unsafe.Pointer(&d[0]) - l = sysSizeofGroupSourceReq + 4 - } else { - p = unsafe.Pointer(&gsr) - l = sysSizeofGroupSourceReq - } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, p, l)) -} diff --git a/fn/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/fn/vendor/golang.org/x/net/ipv6/sockopt_stub.go index b8dacfde9..1f4a273e4 100644 --- a/fn/vendor/golang.org/x/net/ipv6/sockopt_stub.go +++ b/fn/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -1,13 +1,46 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 -import "net" +import ( + "net" -func getMTUInfo(fd int, opt *sockOpt) (*net.Interface, int, error) { + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { return nil, 0, errOpNoSupport } + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/fn/vendor/golang.org/x/net/ipv6/sockopt_test.go b/fn/vendor/golang.org/x/net/ipv6/sockopt_test.go index 9c2190316..774338dbf 100644 --- a/fn/vendor/golang.org/x/net/ipv6/sockopt_test.go +++ b/fn/vendor/golang.org/x/net/ipv6/sockopt_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -19,7 +19,7 @@ var supportsIPv6 bool = nettest.SupportsIPv6() func TestConnInitiatorPathMTU(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { @@ -57,7 +57,7 @@ func TestConnInitiatorPathMTU(t *testing.T) { func TestConnResponderPathMTU(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { @@ -95,7 +95,7 @@ func TestConnResponderPathMTU(t *testing.T) { func TestPacketConnChecksum(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { diff --git a/fn/vendor/golang.org/x/net/ipv6/sockopt_unix.go b/fn/vendor/golang.org/x/net/ipv6/sockopt_unix.go deleted file mode 100644 index 25ea545f5..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/sockopt_unix.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package ipv6 - -import ( - "net" - "os" - "unsafe" -) - -func getInt(fd int, opt *sockOpt) (int, error) { - if opt.name < 1 || opt.typ != ssoTypeInt { - return 0, errOpNoSupport - } - var i int32 - l := sysSockoptLen(4) - if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil { - return 0, os.NewSyscallError("getsockopt", err) - } - return int(i), nil -} - -func setInt(fd int, opt *sockOpt, v int) error { - if opt.name < 1 || opt.typ != ssoTypeInt { - return errOpNoSupport - } - i := int32(v) - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), sysSockoptLen(4))) -} - -func getInterface(fd int, opt *sockOpt) (*net.Interface, error) { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return nil, errOpNoSupport - } - var i int32 - l := sysSockoptLen(4) - if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - if i == 0 { - return nil, nil - } - ifi, err := net.InterfaceByIndex(int(i)) - if err != nil { - return nil, err - } - return ifi, nil -} - -func setInterface(fd int, opt *sockOpt, ifi *net.Interface) error { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return errOpNoSupport - } - var i int32 - if ifi != nil { - i = int32(ifi.Index) - } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), sysSockoptLen(4))) -} - -func getICMPFilter(fd int, opt *sockOpt) (*ICMPFilter, error) { - if opt.name < 1 || opt.typ != ssoTypeICMPFilter { - return nil, errOpNoSupport - } - var f ICMPFilter - l := sysSockoptLen(sysSizeofICMPv6Filter) - if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - return &f, nil -} - -func setICMPFilter(fd int, opt *sockOpt, f *ICMPFilter) error { - if opt.name < 1 || opt.typ != ssoTypeICMPFilter { - return errOpNoSupport - } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), sysSizeofICMPv6Filter)) -} - -func getMTUInfo(fd int, opt *sockOpt) (*net.Interface, int, error) { - if opt.name < 1 || opt.typ != ssoTypeMTUInfo { - return nil, 0, errOpNoSupport - } - var mi sysIPv6Mtuinfo - l := sysSockoptLen(sysSizeofIPv6Mtuinfo) - if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&mi), &l); err != nil { - return nil, 0, os.NewSyscallError("getsockopt", err) - } - if mi.Addr.Scope_id == 0 { - return nil, int(mi.Mtu), nil - } - ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) - if err != nil { - return nil, 0, err - } - return ifi, int(mi.Mtu), nil -} - -func setGroup(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - if opt.name < 1 { - return errOpNoSupport - } - switch opt.typ { - case ssoTypeIPMreq: - return setsockoptIPMreq(fd, opt, ifi, grp) - case ssoTypeGroupReq: - return setsockoptGroupReq(fd, opt, ifi, grp) - default: - return errOpNoSupport - } -} - -func setSourceGroup(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { - if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq { - return errOpNoSupport - } - return setsockoptGroupSourceReq(fd, opt, ifi, grp, src) -} diff --git a/fn/vendor/golang.org/x/net/ipv6/sockopt_windows.go b/fn/vendor/golang.org/x/net/ipv6/sockopt_windows.go deleted file mode 100644 index 32c73b722..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/sockopt_windows.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6 - -import ( - "net" - "os" - "syscall" - "unsafe" -) - -func getInt(fd syscall.Handle, opt *sockOpt) (int, error) { - if opt.name < 1 || opt.typ != ssoTypeInt { - return 0, errOpNoSupport - } - var i int32 - l := int32(4) - if err := syscall.Getsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { - return 0, os.NewSyscallError("getsockopt", err) - } - return int(i), nil -} - -func setInt(fd syscall.Handle, opt *sockOpt, v int) error { - if opt.name < 1 || opt.typ != ssoTypeInt { - return errOpNoSupport - } - i := int32(v) - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) -} - -func getInterface(fd syscall.Handle, opt *sockOpt) (*net.Interface, error) { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return nil, errOpNoSupport - } - var i int32 - l := int32(4) - if err := syscall.Getsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { - return nil, os.NewSyscallError("getsockopt", err) - } - if i == 0 { - return nil, nil - } - ifi, err := net.InterfaceByIndex(int(i)) - if err != nil { - return nil, err - } - return ifi, nil -} - -func setInterface(fd syscall.Handle, opt *sockOpt, ifi *net.Interface) error { - if opt.name < 1 || opt.typ != ssoTypeInterface { - return errOpNoSupport - } - var i int32 - if ifi != nil { - i = int32(ifi.Index) - } - return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) -} - -func getICMPFilter(fd syscall.Handle, opt *sockOpt) (*ICMPFilter, error) { - return nil, errOpNoSupport -} - -func setICMPFilter(fd syscall.Handle, opt *sockOpt, f *ICMPFilter) error { - return errOpNoSupport -} - -func getMTUInfo(fd syscall.Handle, opt *sockOpt) (*net.Interface, int, error) { - return nil, 0, errOpNoSupport -} - -func setGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { - if opt.name < 1 || opt.typ != ssoTypeIPMreq { - return errOpNoSupport - } - return setsockoptIPMreq(fd, opt, ifi, grp) -} - -func setSourceGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { - // TODO(mikio): implement this - return errOpNoSupport -} diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/fn/vendor/golang.org/x/net/ipv6/sys_asmreq.go new file mode 100644 index 000000000..b0510c0b5 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreq ipv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] + return so.Set(c, b) +} diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/fn/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go new file mode 100644 index 000000000..eece96187 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_bpf.go b/fn/vendor/golang.org/x/net/ipv6/sys_bpf.go new file mode 100644 index 000000000..b2dbcb2f2 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/fn/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go new file mode 100644 index 000000000..676bea555 --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_bsd.go b/fn/vendor/golang.org/x/net/ipv6/sys_bsd.go index 75a8863b3..e416eaa1f 100644 --- a/fn/vendor/golang.org/x/net/ipv6/sys_bsd.go +++ b/fn/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -11,48 +11,47 @@ import ( "syscall" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, - ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, - ctlNextHop: {sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop}, - ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, } - sockOpts = [ssoMax]sockOpt{ - ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, - ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, - ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, - ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, - ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, - ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, - ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, - ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, - ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, - ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, - ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, - ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, } ) -func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { - sa.Len = sysSizeofSockaddrInet6 +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], ip) sa.Scope_id = uint32(i) } -func (pi *sysInet6Pktinfo) setIfindex(i int) { +func (pi *inet6Pktinfo) setIfindex(i int) { pi.Ifindex = uint32(i) } -func (mreq *sysIPv6Mreq) setIfindex(i int) { +func (mreq *ipv6Mreq) setIfindex(i int) { mreq.Interface = uint32(i) } diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_darwin.go b/fn/vendor/golang.org/x/net/ipv6/sys_darwin.go index 411fb498c..e3d044392 100644 --- a/fn/vendor/golang.org/x/net/ipv6/sys_darwin.go +++ b/fn/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,130 +6,101 @@ package ipv6 import ( "net" + "strconv" + "strings" "syscall" "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit}, - ctlPacketInfo: {sysIPV6_2292PKTINFO, sysSizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, + ctlPacketInfo: {sysIPV6_2292PKTINFO, sizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, } - sockOpts = [ssoMax]sockOpt{ - ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, - ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, - ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, ssoTypeInt}, - ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_2292PKTINFO, ssoTypeInt}, - ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, - ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, - ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, - ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292HOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292PKTINFO, Len: 4}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, } ) func init() { // Seems like kern.osreldate is veiled on latest OS X. We use // kern.osrelease instead. - osver, err := syscall.Sysctl("kern.osrelease") + s, err := syscall.Sysctl("kern.osrelease") if err != nil { return } - var i int - for i = range osver { - if osver[i] == '.' { - break - } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return } // The IP_PKTINFO and protocol-independent multicast API were - // introduced in OS X 10.7 (Darwin 11.0.0). But it looks like - // those features require OS X 10.8 (Darwin 12.0.0) and above. + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. // See http://support.apple.com/kb/HT1633. - if i > 2 || i == 2 && osver[0] >= '1' && osver[1] >= '2' { - ctlOpts[ctlTrafficClass].name = sysIPV6_TCLASS - ctlOpts[ctlTrafficClass].length = 4 - ctlOpts[ctlTrafficClass].marshal = marshalTrafficClass - ctlOpts[ctlTrafficClass].parse = parseTrafficClass - ctlOpts[ctlHopLimit].name = sysIPV6_HOPLIMIT - ctlOpts[ctlHopLimit].marshal = marshalHopLimit - ctlOpts[ctlPacketInfo].name = sysIPV6_PKTINFO - ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo - ctlOpts[ctlNextHop].name = sysIPV6_NEXTHOP - ctlOpts[ctlNextHop].length = sysSizeofSockaddrInet6 - ctlOpts[ctlNextHop].marshal = marshalNextHop - ctlOpts[ctlNextHop].parse = parseNextHop - ctlOpts[ctlPathMTU].name = sysIPV6_PATHMTU - ctlOpts[ctlPathMTU].length = sysSizeofIPv6Mtuinfo - ctlOpts[ctlPathMTU].marshal = marshalPathMTU - ctlOpts[ctlPathMTU].parse = parsePathMTU - sockOpts[ssoTrafficClass].level = iana.ProtocolIPv6 - sockOpts[ssoTrafficClass].name = sysIPV6_TCLASS - sockOpts[ssoTrafficClass].typ = ssoTypeInt - sockOpts[ssoReceiveTrafficClass].level = iana.ProtocolIPv6 - sockOpts[ssoReceiveTrafficClass].name = sysIPV6_RECVTCLASS - sockOpts[ssoReceiveTrafficClass].typ = ssoTypeInt - sockOpts[ssoReceiveHopLimit].name = sysIPV6_RECVHOPLIMIT - sockOpts[ssoReceivePacketInfo].name = sysIPV6_RECVPKTINFO - sockOpts[ssoReceivePathMTU].level = iana.ProtocolIPv6 - sockOpts[ssoReceivePathMTU].name = sysIPV6_RECVPATHMTU - sockOpts[ssoReceivePathMTU].typ = ssoTypeInt - sockOpts[ssoPathMTU].level = iana.ProtocolIPv6 - sockOpts[ssoPathMTU].name = sysIPV6_PATHMTU - sockOpts[ssoPathMTU].typ = ssoTypeMTUInfo - sockOpts[ssoJoinGroup].name = sysMCAST_JOIN_GROUP - sockOpts[ssoJoinGroup].typ = ssoTypeGroupReq - sockOpts[ssoLeaveGroup].name = sysMCAST_LEAVE_GROUP - sockOpts[ssoLeaveGroup].typ = ssoTypeGroupReq - sockOpts[ssoJoinSourceGroup].level = iana.ProtocolIPv6 - sockOpts[ssoJoinSourceGroup].name = sysMCAST_JOIN_SOURCE_GROUP - sockOpts[ssoJoinSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoLeaveSourceGroup].level = iana.ProtocolIPv6 - sockOpts[ssoLeaveSourceGroup].name = sysMCAST_LEAVE_SOURCE_GROUP - sockOpts[ssoLeaveSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoBlockSourceGroup].level = iana.ProtocolIPv6 - sockOpts[ssoBlockSourceGroup].name = sysMCAST_BLOCK_SOURCE - sockOpts[ssoBlockSourceGroup].typ = ssoTypeGroupSourceReq - sockOpts[ssoUnblockSourceGroup].level = iana.ProtocolIPv6 - sockOpts[ssoUnblockSourceGroup].name = sysMCAST_UNBLOCK_SOURCE - sockOpts[ssoUnblockSourceGroup].typ = ssoTypeGroupSourceReq + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return } + ctlOpts[ctlTrafficClass] = ctlOpt{sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass} + ctlOpts[ctlHopLimit] = ctlOpt{sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit} + ctlOpts[ctlPacketInfo] = ctlOpt{sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo} + ctlOpts[ctlNextHop] = ctlOpt{sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop} + ctlOpts[ctlPathMTU] = ctlOpt{sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU} + sockOpts[ssoTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}} + sockOpts[ssoReceiveTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}} + sockOpts[ssoReceiveHopLimit] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}} + sockOpts[ssoReceivePacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}} + sockOpts[ssoReceivePathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}} + sockOpts[ssoPathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} } -func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { - sa.Len = sysSizeofSockaddrInet6 +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], ip) sa.Scope_id = uint32(i) } -func (pi *sysInet6Pktinfo) setIfindex(i int) { +func (pi *inet6Pktinfo) setIfindex(i int) { pi.Ifindex = uint32(i) } -func (mreq *sysIPv6Mreq) setIfindex(i int) { +func (mreq *ipv6Mreq) setIfindex(i int) { mreq.Interface = uint32(i) } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Pad_cgo_0[0])) - sa.Len = sysSizeofSockaddrInet6 +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Pad_cgo_0[0])) - sa.Len = sysSizeofSockaddrInet6 +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Pad_cgo_1[0])) - sa.Len = sysSizeofSockaddrInet6 + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], src) } diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/fn/vendor/golang.org/x/net/ipv6/sys_freebsd.go index b68725cba..e9349dc2c 100644 --- a/fn/vendor/golang.org/x/net/ipv6/sys_freebsd.go +++ b/fn/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,38 +12,37 @@ import ( "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, - ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, - ctlNextHop: {sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop}, - ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, } - sockOpts = [ssoMax]sockOpt{ - ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, - ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, - ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, - ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, - ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, - ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, - ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, - ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, - ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, - ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, - ssoJoinGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, - ssoLeaveGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, - ssoJoinSourceGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoLeaveSourceGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoBlockSourceGroup: {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, - ssoUnblockSourceGroup: {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + sockOpts = map[int]sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, } ) @@ -59,35 +58,35 @@ func init() { } } -func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { - sa.Len = sysSizeofSockaddrInet6 +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], ip) sa.Scope_id = uint32(i) } -func (pi *sysInet6Pktinfo) setIfindex(i int) { +func (pi *inet6Pktinfo) setIfindex(i int) { pi.Ifindex = uint32(i) } -func (mreq *sysIPv6Mreq) setIfindex(i int) { +func (mreq *ipv6Mreq) setIfindex(i int) { mreq.Interface = uint32(i) } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Group)) - sa.Len = sysSizeofSockaddrInet6 +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Group)) - sa.Len = sysSizeofSockaddrInet6 +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Source)) - sa.Len = sysSizeofSockaddrInet6 + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet6 sa.Family = syscall.AF_INET6 copy(sa.Addr[:], src) } diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_linux.go b/fn/vendor/golang.org/x/net/ipv6/sys_linux.go index 2fa6088d0..bc218103c 100644 --- a/fn/vendor/golang.org/x/net/ipv6/sys_linux.go +++ b/fn/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -10,65 +10,65 @@ import ( "unsafe" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, - ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, - ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, } - sockOpts = [ssoMax]sockOpt{ - ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, - ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, - ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, - ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, - ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, - ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, - ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, - ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, - ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, - ssoChecksum: {iana.ProtocolReserved, sysIPV6_CHECKSUM, ssoTypeInt}, - ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMPV6_FILTER, ssoTypeICMPFilter}, - ssoJoinGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, - ssoLeaveGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, - ssoJoinSourceGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoLeaveSourceGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, - ssoBlockSourceGroup: {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, - ssoUnblockSourceGroup: {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, } ) -func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { sa.Family = syscall.AF_INET6 copy(sa.Addr[:], ip) sa.Scope_id = uint32(i) } -func (pi *sysInet6Pktinfo) setIfindex(i int) { +func (pi *inet6Pktinfo) setIfindex(i int) { pi.Ifindex = int32(i) } -func (mreq *sysIPv6Mreq) setIfindex(i int) { +func (mreq *ipv6Mreq) setIfindex(i int) { mreq.Ifindex = int32(i) } -func (gr *sysGroupReq) setGroup(grp net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Group)) +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) } -func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { - sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Group)) +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) sa.Family = syscall.AF_INET6 copy(sa.Addr[:], grp) - sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) sa.Family = syscall.AF_INET6 copy(sa.Addr[:], src) } diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_solaris.go b/fn/vendor/golang.org/x/net/ipv6/sys_solaris.go new file mode 100644 index 000000000..d348b5f6e --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/sys_solaris.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/fn/vendor/golang.org/x/net/ipv6/sys_ssmreq.go new file mode 100644 index 000000000..add8ccc0b --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/fn/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go new file mode 100644 index 000000000..581ee490f --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_stub.go b/fn/vendor/golang.org/x/net/ipv6/sys_stub.go index 6c9a14304..b845388ea 100644 --- a/fn/vendor/golang.org/x/net/ipv6/sys_stub.go +++ b/fn/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -2,14 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{} - sockOpts = [ssoMax]sockOpt{} + sockOpts = map[int]*sockOpt{} ) diff --git a/fn/vendor/golang.org/x/net/ipv6/sys_windows.go b/fn/vendor/golang.org/x/net/ipv6/sys_windows.go index fda875736..fc36b018b 100644 --- a/fn/vendor/golang.org/x/net/ipv6/sys_windows.go +++ b/fn/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -9,6 +9,7 @@ import ( "syscall" "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" ) const ( @@ -21,12 +22,14 @@ const ( sysIPV6_LEAVE_GROUP = 0xd sysIPV6_PKTINFO = 0x13 - sysSizeofSockaddrInet6 = 0x1c + sizeofSockaddrInet6 = 0x1c - sysSizeofIPv6Mreq = 0x14 + sizeofIPv6Mreq = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofICMPv6Filter = 0 ) -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -34,30 +37,39 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type icmpv6Filter struct { + // TODO(mikio): implement this +} + var ( ctlOpts = [ctlMax]ctlOpt{} - sockOpts = [ssoMax]sockOpt{ - ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, - ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, - ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, - ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, - ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, - ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, } ) -func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { sa.Family = syscall.AF_INET6 copy(sa.Addr[:], ip) sa.Scope_id = uint32(i) } -func (mreq *sysIPv6Mreq) setIfindex(i int) { +func (mreq *ipv6Mreq) setIfindex(i int) { mreq.Interface = uint32(i) } diff --git a/fn/vendor/golang.org/x/net/ipv6/syscall_linux_386.go b/fn/vendor/golang.org/x/net/ipv6/syscall_linux_386.go deleted file mode 100644 index 82633a564..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/syscall_linux_386.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6 - -import ( - "syscall" - "unsafe" -) - -const ( - sysGETSOCKOPT = 0xf - sysSETSOCKOPT = 0xe -) - -func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) - -func getsockopt(fd, level, name int, v unsafe.Pointer, l *sysSockoptLen) error { - if _, errno := socketcall(sysGETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { - return error(errno) - } - return nil -} - -func setsockopt(fd, level, name int, v unsafe.Pointer, l sysSockoptLen) error { - if _, errno := socketcall(sysSETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { - return error(errno) - } - return nil -} diff --git a/fn/vendor/golang.org/x/net/ipv6/syscall_unix.go b/fn/vendor/golang.org/x/net/ipv6/syscall_unix.go deleted file mode 100644 index a2bd8363a..000000000 --- a/fn/vendor/golang.org/x/net/ipv6/syscall_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!386 netbsd openbsd - -package ipv6 - -import ( - "syscall" - "unsafe" -) - -func getsockopt(fd, level, name int, v unsafe.Pointer, l *sysSockoptLen) error { - if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { - return error(errno) - } - return nil -} - -func setsockopt(fd, level, name int, v unsafe.Pointer, l sysSockoptLen) error { - if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { - return error(errno) - } - return nil -} diff --git a/fn/vendor/golang.org/x/net/ipv6/unicast_test.go b/fn/vendor/golang.org/x/net/ipv6/unicast_test.go index db5b08a28..a0b7d9550 100644 --- a/fn/vendor/golang.org/x/net/ipv6/unicast_test.go +++ b/fn/vendor/golang.org/x/net/ipv6/unicast_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -20,14 +20,14 @@ import ( func TestPacketConnReadWriteUnicastUDP(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { t.Skip("ipv6 is not supported") } - c, err := net.ListenPacket("udp6", "[::1]:0") + c, err := nettest.NewLocalPacketListener("udp6") if err != nil { t.Fatal(err) } @@ -35,11 +35,7 @@ func TestPacketConnReadWriteUnicastUDP(t *testing.T) { p := ipv6.NewPacketConn(c) defer p.Close() - dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String()) - if err != nil { - t.Fatal(err) - } - + dst := c.LocalAddr() cm := ipv6.ControlMessage{ TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, Src: net.IPv6loopback, @@ -54,7 +50,8 @@ func TestPacketConnReadWriteUnicastUDP(t *testing.T) { for i, toggle := range []bool{true, false, true} { if err := p.SetControlMessage(cf, toggle); err != nil { if nettest.ProtocolNotSupported(err) { - t.Skipf("not supported on %s", runtime.GOOS) + t.Logf("not supported on %s", runtime.GOOS) + continue } t.Fatal(err) } @@ -81,7 +78,7 @@ func TestPacketConnReadWriteUnicastUDP(t *testing.T) { func TestPacketConnReadWriteUnicastICMP(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { @@ -127,7 +124,11 @@ func TestPacketConnReadWriteUnicastICMP(t *testing.T) { if toggle { psh = nil if err := p.SetChecksum(true, 2); err != nil { - t.Fatal(err) + // Solaris never allows to modify + // ICMP properties. + if runtime.GOOS != "solaris" { + t.Fatal(err) + } } } else { psh = pshicmp @@ -147,7 +148,8 @@ func TestPacketConnReadWriteUnicastICMP(t *testing.T) { } if err := p.SetControlMessage(cf, toggle); err != nil { if nettest.ProtocolNotSupported(err) { - t.Skipf("not supported on %s", runtime.GOOS) + t.Logf("not supported on %s", runtime.GOOS) + continue } t.Fatal(err) } diff --git a/fn/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go b/fn/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go index 7bb2e440a..e175dccf5 100644 --- a/fn/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go +++ b/fn/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -16,7 +16,7 @@ import ( func TestConnUnicastSocketOptions(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { @@ -29,8 +29,15 @@ func TestConnUnicastSocketOptions(t *testing.T) { } defer ln.Close() - done := make(chan bool) - go acceptor(t, ln, done) + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + errc <- c.Close() + }() c, err := net.Dial("tcp6", ln.Addr().String()) if err != nil { @@ -40,7 +47,9 @@ func TestConnUnicastSocketOptions(t *testing.T) { testUnicastSocketOptions(t, ipv6.NewConn(c)) - <-done + if err := <-errc; err != nil { + t.Errorf("server: %v", err) + } } var packetConnUnicastSocketOptionTests = []struct { @@ -52,7 +61,7 @@ var packetConnUnicastSocketOptionTests = []struct { func TestPacketConnUnicastSocketOptions(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": + case "nacl", "plan9", "windows": t.Skipf("not supported on %s", runtime.GOOS) } if !supportsIPv6 { diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/fn/vendor/golang.org/x/net/ipv6/zsys_darwin.go index cb044b033..6aab1dfab 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_darwin.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -71,19 +71,19 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -91,7 +91,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -100,31 +100,31 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [128]byte } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [128]byte Pad_cgo_1 [128]byte diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/fn/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go index 5a03ab734..d2de804d8 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_dragonfly.go -// +build dragonfly - package ipv6 const ( @@ -52,16 +50,16 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 + sizeofIPv6Mreq = 0x14 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -70,21 +68,21 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go index 4ace96f0c..919e572d4 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -62,19 +62,19 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -82,7 +82,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -91,32 +91,32 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go index 4a62c2d5c..cb8141f9c 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -62,19 +62,19 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -82,7 +82,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -91,34 +91,34 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go index 4a62c2d5c..cb8141f9c 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -62,19 +62,19 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrStorage struct { +type sockaddrStorage struct { Len uint8 Family uint8 X__ss_pad1 [6]int8 @@ -82,7 +82,7 @@ type sysSockaddrStorage struct { X__ss_pad2 [112]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -91,34 +91,34 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage + Group sockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysSockaddrStorage - Source sysSockaddrStorage + Group sockaddrStorage + Source sockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_386.go index 272792929..73aa8c6df 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_386.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -84,25 +84,30 @@ const ( sysICMPV6_FILTER_BLOCKOTHERS = 0x3 sysICMPV6_FILTER_PASSONLY = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -110,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -131,22 +136,35 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go index 2f742e956..b64f0157d 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -84,25 +84,30 @@ const ( sysICMPV6_FILTER_BLOCKOTHERS = 0x3 sysICMPV6_FILTER_PASSONLY = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -110,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -131,24 +136,37 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go index 272792929..73aa8c6df 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -84,25 +84,30 @@ const ( sysICMPV6_FILTER_BLOCKOTHERS = 0x3 sysICMPV6_FILTER_PASSONLY = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -110,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -131,22 +136,35 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go index ab1046453..b64f0157d 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,arm64 - package ipv6 const ( @@ -86,25 +84,30 @@ const ( sysICMPV6_FILTER_BLOCKOTHERS = 0x3 sysICMPV6_FILTER_PASSONLY = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -112,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -133,24 +136,37 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go new file mode 100644 index 000000000..73aa8c6df --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go index ec8ce1579..b64f0157d 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,mips64 - package ipv6 const ( @@ -86,25 +84,30 @@ const ( sysICMPV6_FILTER_BLOCKOTHERS = 0x3 sysICMPV6_FILTER_PASSONLY = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -112,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -133,24 +136,37 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go index 2341ae677..b64f0157d 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,mips64le - package ipv6 const ( @@ -86,25 +84,30 @@ const ( sysICMPV6_FILTER_BLOCKOTHERS = 0x3 sysICMPV6_FILTER_PASSONLY = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -112,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -133,24 +136,37 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go new file mode 100644 index 000000000..73aa8c6df --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go new file mode 100644 index 000000000..c9bf6a87e --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go index b99b8a515..b64f0157d 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,ppc64 - package ipv6 const ( @@ -86,25 +84,30 @@ const ( sysICMPV6_FILTER_BLOCKOTHERS = 0x3 sysICMPV6_FILTER_PASSONLY = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -112,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -133,24 +136,37 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go index 992b56e2e..b64f0157d 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_linux.go -// +build linux,ppc64le - package ipv6 const ( @@ -86,25 +84,30 @@ const ( sysICMPV6_FILTER_BLOCKOTHERS = 0x3 sysICMPV6_FILTER_PASSONLY = 0x4 - sysSizeofKernelSockaddrStorage = 0x80 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6FlowlabelReq = 0x20 + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a - sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x88 - sysSizeofGroupSourceReq = 0x108 + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 - sysSizeofICMPv6Filter = 0x20 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 ) -type sysKernelSockaddrStorage struct { +type kernelSockaddrStorage struct { Family uint16 X__data [126]int8 } -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -112,17 +115,17 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex int32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6FlowlabelReq struct { +type ipv6FlowlabelReq struct { Dst [16]byte /* in6_addr */ Label uint32 Action uint8 @@ -133,24 +136,37 @@ type sysIPv6FlowlabelReq struct { X__flr_pad uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Ifindex int32 } -type sysGroupReq struct { +type groupReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage + Group kernelSockaddrStorage } -type sysGroupSourceReq struct { +type groupSourceReq struct { Interface uint32 Pad_cgo_0 [4]byte - Group sysKernelSockaddrStorage - Source sysKernelSockaddrStorage + Group kernelSockaddrStorage + Source kernelSockaddrStorage } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Data [8]uint32 } + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go new file mode 100644 index 000000000..b64f0157d --- /dev/null +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/fn/vendor/golang.org/x/net/ipv6/zsys_netbsd.go index d6ec88e39..bcada13b7 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_netbsd.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -46,16 +46,16 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 + sizeofIPv6Mreq = 0x14 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -64,21 +64,21 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/fn/vendor/golang.org/x/net/ipv6/zsys_openbsd.go index 3e080b78a..86cf3c637 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_openbsd.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -55,16 +55,16 @@ const ( sysIPV6_PORTRANGE_HIGH = 0x1 sysIPV6_PORTRANGE_LOW = 0x2 - sysSizeofSockaddrInet6 = 0x1c - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x20 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 - sysSizeofIPv6Mreq = 0x14 + sizeofIPv6Mreq = 0x14 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrInet6 struct { +type sockaddrInet6 struct { Len uint8 Family uint8 Port uint16 @@ -73,21 +73,21 @@ type sysSockaddrInet6 struct { Scope_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysICMPv6Filter struct { +type icmpv6Filter struct { Filt [8]uint32 } diff --git a/fn/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/fn/vendor/golang.org/x/net/ipv6/zsys_solaris.go index cdf00c25d..cf1837dd2 100644 --- a/fn/vendor/golang.org/x/net/ipv6/zsys_solaris.go +++ b/fn/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -1,8 +1,6 @@ // Created by cgo -godefs - DO NOT EDIT // cgo -godefs defs_solaris.go -// +build solaris - package ipv6 const ( @@ -44,6 +42,13 @@ const ( sysIPV6_RECVDSTOPTS = 0x28 + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + sysIPV6_PREFER_SRC_HOME = 0x1 sysIPV6_PREFER_SRC_COA = 0x2 sysIPV6_PREFER_SRC_PUBLIC = 0x4 @@ -67,16 +72,26 @@ const ( sysICMP6_FILTER = 0x1 - sysSizeofSockaddrInet6 = 0x20 - sysSizeofInet6Pktinfo = 0x14 - sysSizeofIPv6Mtuinfo = 0x24 + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet6 = 0x20 + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x24 - sysSizeofIPv6Mreq = 0x14 + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 - sysSizeofICMPv6Filter = 0x20 + sizeofICMPv6Filter = 0x20 ) -type sysSockaddrInet6 struct { +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 @@ -85,21 +100,32 @@ type sysSockaddrInet6 struct { X__sin6_src_id uint32 } -type sysInet6Pktinfo struct { +type inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } -type sysIPv6Mtuinfo struct { - Addr sysSockaddrInet6 +type ipv6Mtuinfo struct { + Addr sockaddrInet6 Mtu uint32 } -type sysIPv6Mreq struct { +type ipv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } -type sysICMPv6Filter struct { +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} + +type icmpv6Filter struct { X__icmp6_filt [8]uint32 } diff --git a/fn/vendor/golang.org/x/net/lex/httplex/httplex.go b/fn/vendor/golang.org/x/net/lex/httplex/httplex.go new file mode 100644 index 000000000..20f2b8940 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lex/httplex/httplex.go @@ -0,0 +1,351 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httplex contains rules around lexical matters of various +// HTTP-related specifications. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httplex + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/fn/vendor/golang.org/x/net/lex/httplex/httplex_test.go b/fn/vendor/golang.org/x/net/lex/httplex/httplex_test.go new file mode 100644 index 000000000..f47adc939 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lex/httplex/httplex_test.go @@ -0,0 +1,119 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httplex + +import ( + "testing" +) + +func isChar(c rune) bool { return c <= 127 } + +func isCtl(c rune) bool { return c <= 31 || c == 127 } + +func isSeparator(c rune) bool { + switch c { + case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': + return true + } + return false +} + +func TestIsToken(t *testing.T) { + for i := 0; i <= 130; i++ { + r := rune(i) + expected := isChar(r) && !isCtl(r) && !isSeparator(r) + if IsTokenRune(r) != expected { + t.Errorf("isToken(0x%x) = %v", r, !expected) + } + } +} + +func TestHeaderValuesContainsToken(t *testing.T) { + tests := []struct { + vals []string + token string + want bool + }{ + { + vals: []string{"foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"bar", "foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo"}, + token: "bar", + want: false, + }, + { + vals: []string{" foo "}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar,foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo ,bar "}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar, foo ,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + } + for _, tt := range tests { + got := HeaderValuesContainsToken(tt.vals, tt.token) + if got != tt.want { + t.Errorf("headerValuesContainsToken(%q, %q) = %v; want %v", tt.vals, tt.token, got, tt.want) + } + } +} + +func TestPunycodeHostPort(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.google.com", "www.google.com"}, + {"гофер.рф", "xn--c1ae0ajs.xn--p1ai"}, + {"bücher.de", "xn--bcher-kva.de"}, + {"bücher.de:8080", "xn--bcher-kva.de:8080"}, + {"[1::6]:8080", "[1::6]:8080"}, + } + for _, tt := range tests { + got, err := PunycodeHostPort(tt.in) + if tt.want != got || err != nil { + t.Errorf("PunycodeHostPort(%q) = %q, %v, want %q, nil", tt.in, got, err, tt.want) + } + } +} diff --git a/fn/vendor/golang.org/x/net/lif/address.go b/fn/vendor/golang.org/x/net/lif/address.go new file mode 100644 index 000000000..afb957fd8 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/address.go @@ -0,0 +1,105 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "errors" + "unsafe" +) + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address + PrefixLen int // address prefix length +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + PrefixLen int // address prefix length + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +// Addrs returns a list of interface addresses. +// +// The provided af must be an address family and name must be a data +// link name. The zero value of af or name means a wildcard. +func Addrs(af int, name string) ([]Addr, error) { + eps, err := newEndpoints(af) + if len(eps) == 0 { + return nil, err + } + defer func() { + for _, ep := range eps { + ep.close() + } + }() + lls, err := links(eps, name) + if len(lls) == 0 { + return nil, err + } + var as []Addr + for _, ll := range lls { + var lifr lifreq + for i := 0; i < len(ll.Name); i++ { + lifr.Name[i] = int8(ll.Name[i]) + } + for _, ep := range eps { + ioc := int64(sysSIOCGLIFADDR) + err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifr)) + if err != nil { + continue + } + sa := (*sockaddrStorage)(unsafe.Pointer(&lifr.Lifru[0])) + l := int(nativeEndian.Uint32(lifr.Lifru1[:4])) + if l == 0 { + continue + } + switch sa.Family { + case sysAF_INET: + a := &Inet4Addr{PrefixLen: l} + copy(a.IP[:], lifr.Lifru[4:8]) + as = append(as, a) + case sysAF_INET6: + a := &Inet6Addr{PrefixLen: l, ZoneID: int(nativeEndian.Uint32(lifr.Lifru[24:28]))} + copy(a.IP[:], lifr.Lifru[8:24]) + as = append(as, a) + } + } + } + return as, nil +} + +func parseLinkAddr(b []byte) ([]byte, error) { + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + l := 4 + nlen + alen + slen + if len(b) < l { + return nil, errors.New("invalid address") + } + b = b[4:] + var addr []byte + if nlen > 0 { + b = b[nlen:] + } + if alen > 0 { + addr = make([]byte, alen) + copy(addr, b[:alen]) + } + return addr, nil +} diff --git a/fn/vendor/golang.org/x/net/lif/address_test.go b/fn/vendor/golang.org/x/net/lif/address_test.go new file mode 100644 index 000000000..a25f10b67 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/address_test.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "fmt" + "testing" +) + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *Inet4Addr) String() string { + return fmt.Sprintf("(%s %s %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%s %s %d %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen, a.ZoneID) +} + +type addrPack struct { + af int + as []Addr +} + +func addrPacks() ([]addrPack, error) { + var lastErr error + var aps []addrPack + for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + as, err := Addrs(af, "") + if err != nil { + lastErr = err + continue + } + aps = append(aps, addrPack{af: af, as: as}) + } + return aps, lastErr +} + +func TestAddrs(t *testing.T) { + aps, err := addrPacks() + if len(aps) == 0 && err != nil { + t.Fatal(err) + } + lps, err := linkPacks() + if len(lps) == 0 && err != nil { + t.Fatal(err) + } + for _, lp := range lps { + n := 0 + for _, ll := range lp.lls { + as, err := Addrs(lp.af, ll.Name) + if err != nil { + t.Fatal(lp.af, ll.Name, err) + } + t.Logf("af=%s name=%s %v", addrFamily(lp.af), ll.Name, as) + n += len(as) + } + for _, ap := range aps { + if ap.af != lp.af { + continue + } + if n != len(ap.as) { + t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(ap.as)) + continue + } + } + } +} diff --git a/fn/vendor/golang.org/x/net/lif/binary.go b/fn/vendor/golang.org/x/net/lif/binary.go new file mode 100644 index 000000000..738a94f42 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/binary.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +// This file contains duplicates of encoding/binary package. +// +// This package is supposed to be used by the net package of standard +// library. Therefore the package set used in the package must be the +// same as net package. + +var ( + littleEndian binaryLittleEndian + bigEndian binaryBigEndian +) + +type binaryByteOrder interface { + Uint16([]byte) uint16 + Uint32([]byte) uint32 + Uint64([]byte) uint64 + PutUint16([]byte, uint16) + PutUint32([]byte, uint32) + PutUint64([]byte, uint64) +} + +type binaryLittleEndian struct{} + +func (binaryLittleEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[0]) | uint16(b[1])<<8 +} + +func (binaryLittleEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (binaryLittleEndian) PutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + b[4] = byte(v >> 32) + b[5] = byte(v >> 40) + b[6] = byte(v >> 48) + b[7] = byte(v >> 56) +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +func (binaryBigEndian) PutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} diff --git a/fn/vendor/golang.org/x/net/lif/defs_solaris.go b/fn/vendor/golang.org/x/net/lif/defs_solaris.go new file mode 100644 index 000000000..02c19981d --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/defs_solaris.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package lif + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_DGRAM = C.SOCK_DGRAM +) + +type sockaddrStorage C.struct_sockaddr_storage + +const ( + sysLIFC_NOXMIT = C.LIFC_NOXMIT + sysLIFC_EXTERNAL_SOURCE = C.LIFC_EXTERNAL_SOURCE + sysLIFC_TEMPORARY = C.LIFC_TEMPORARY + sysLIFC_ALLZONES = C.LIFC_ALLZONES + sysLIFC_UNDER_IPMP = C.LIFC_UNDER_IPMP + sysLIFC_ENABLED = C.LIFC_ENABLED + + sysSIOCGLIFADDR = C.SIOCGLIFADDR + sysSIOCGLIFDSTADDR = C.SIOCGLIFDSTADDR + sysSIOCGLIFFLAGS = C.SIOCGLIFFLAGS + sysSIOCGLIFMTU = C.SIOCGLIFMTU + sysSIOCGLIFNETMASK = C.SIOCGLIFNETMASK + sysSIOCGLIFMETRIC = C.SIOCGLIFMETRIC + sysSIOCGLIFNUM = C.SIOCGLIFNUM + sysSIOCGLIFINDEX = C.SIOCGLIFINDEX + sysSIOCGLIFSUBNET = C.SIOCGLIFSUBNET + sysSIOCGLIFLNKINFO = C.SIOCGLIFLNKINFO + sysSIOCGLIFCONF = C.SIOCGLIFCONF + sysSIOCGLIFHWADDR = C.SIOCGLIFHWADDR +) + +const ( + sysIFF_UP = C.IFF_UP + sysIFF_BROADCAST = C.IFF_BROADCAST + sysIFF_DEBUG = C.IFF_DEBUG + sysIFF_LOOPBACK = C.IFF_LOOPBACK + sysIFF_POINTOPOINT = C.IFF_POINTOPOINT + sysIFF_NOTRAILERS = C.IFF_NOTRAILERS + sysIFF_RUNNING = C.IFF_RUNNING + sysIFF_NOARP = C.IFF_NOARP + sysIFF_PROMISC = C.IFF_PROMISC + sysIFF_ALLMULTI = C.IFF_ALLMULTI + sysIFF_INTELLIGENT = C.IFF_INTELLIGENT + sysIFF_MULTICAST = C.IFF_MULTICAST + sysIFF_MULTI_BCAST = C.IFF_MULTI_BCAST + sysIFF_UNNUMBERED = C.IFF_UNNUMBERED + sysIFF_PRIVATE = C.IFF_PRIVATE +) + +const ( + sizeofLifnum = C.sizeof_struct_lifnum + sizeofLifreq = C.sizeof_struct_lifreq + sizeofLifconf = C.sizeof_struct_lifconf + sizeofLifIfinfoReq = C.sizeof_struct_lif_ifinfo_req +) + +type lifnum C.struct_lifnum + +type lifreq C.struct_lifreq + +type lifconf C.struct_lifconf + +type lifIfinfoReq C.struct_lif_ifinfo_req + +const ( + sysIFT_IPV4 = C.IFT_IPV4 + sysIFT_IPV6 = C.IFT_IPV6 + sysIFT_6TO4 = C.IFT_6TO4 +) diff --git a/fn/vendor/golang.org/x/net/lif/lif.go b/fn/vendor/golang.org/x/net/lif/lif.go new file mode 100644 index 000000000..6e81f81f1 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/lif.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +// Package lif provides basic functions for the manipulation of +// logical network interfaces and interface addresses on Solaris. +// +// The package supports Solaris 11 or above. +package lif + +import "syscall" + +type endpoint struct { + af int + s uintptr +} + +func (ep *endpoint) close() error { + return syscall.Close(int(ep.s)) +} + +func newEndpoints(af int) ([]endpoint, error) { + var lastErr error + var eps []endpoint + afs := []int{sysAF_INET, sysAF_INET6} + if af != sysAF_UNSPEC { + afs = []int{af} + } + for _, af := range afs { + s, err := syscall.Socket(af, sysSOCK_DGRAM, 0) + if err != nil { + lastErr = err + continue + } + eps = append(eps, endpoint{af: af, s: uintptr(s)}) + } + if len(eps) == 0 { + return nil, lastErr + } + return eps, nil +} diff --git a/fn/vendor/golang.org/x/net/lif/link.go b/fn/vendor/golang.org/x/net/lif/link.go new file mode 100644 index 000000000..913a53e11 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/link.go @@ -0,0 +1,126 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import "unsafe" + +// A Link represents logical data link information. +// +// It also represents base information for logical network interface. +// On Solaris, each logical network interface represents network layer +// adjacency information and the interface has a only single network +// address or address pair for tunneling. It's usual that multiple +// logical network interfaces share the same logical data link. +type Link struct { + Name string // name, equivalent to IP interface name + Index int // index, equivalent to IP interface index + Type int // type + Flags int // flags + MTU int // maximum transmission unit, basically link MTU but may differ between IP address families + Addr []byte // address +} + +func (ll *Link) fetch(s uintptr) { + var lifr lifreq + for i := 0; i < len(ll.Name); i++ { + lifr.Name[i] = int8(ll.Name[i]) + } + ioc := int64(sysSIOCGLIFINDEX) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Index = int(nativeEndian.Uint32(lifr.Lifru[:4])) + } + ioc = int64(sysSIOCGLIFFLAGS) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Flags = int(nativeEndian.Uint64(lifr.Lifru[:8])) + } + ioc = int64(sysSIOCGLIFMTU) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.MTU = int(nativeEndian.Uint32(lifr.Lifru[:4])) + } + switch ll.Type { + case sysIFT_IPV4, sysIFT_IPV6, sysIFT_6TO4: + default: + ioc = int64(sysSIOCGLIFHWADDR) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Addr, _ = parseLinkAddr(lifr.Lifru[4:]) + } + } +} + +// Links returns a list of logical data links. +// +// The provided af must be an address family and name must be a data +// link name. The zero value of af or name means a wildcard. +func Links(af int, name string) ([]Link, error) { + eps, err := newEndpoints(af) + if len(eps) == 0 { + return nil, err + } + defer func() { + for _, ep := range eps { + ep.close() + } + }() + return links(eps, name) +} + +func links(eps []endpoint, name string) ([]Link, error) { + var lls []Link + lifn := lifnum{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} + lifc := lifconf{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} + for _, ep := range eps { + lifn.Family = uint16(ep.af) + ioc := int64(sysSIOCGLIFNUM) + if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifn)); err != nil { + continue + } + if lifn.Count == 0 { + continue + } + b := make([]byte, lifn.Count*sizeofLifreq) + lifc.Family = uint16(ep.af) + lifc.Len = lifn.Count * sizeofLifreq + if len(lifc.Lifcu) == 8 { + nativeEndian.PutUint64(lifc.Lifcu[:], uint64(uintptr(unsafe.Pointer(&b[0])))) + } else { + nativeEndian.PutUint32(lifc.Lifcu[:], uint32(uintptr(unsafe.Pointer(&b[0])))) + } + ioc = int64(sysSIOCGLIFCONF) + if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifc)); err != nil { + continue + } + nb := make([]byte, 32) // see LIFNAMSIZ in net/if.h + for i := 0; i < int(lifn.Count); i++ { + lifr := (*lifreq)(unsafe.Pointer(&b[i*sizeofLifreq])) + for i := 0; i < 32; i++ { + if lifr.Name[i] == 0 { + nb = nb[:i] + break + } + nb[i] = byte(lifr.Name[i]) + } + llname := string(nb) + nb = nb[:32] + if isDupLink(lls, llname) || name != "" && name != llname { + continue + } + ll := Link{Name: llname, Type: int(lifr.Type)} + ll.fetch(ep.s) + lls = append(lls, ll) + } + } + return lls, nil +} + +func isDupLink(lls []Link, name string) bool { + for _, ll := range lls { + if ll.Name == name { + return true + } + } + return false +} diff --git a/fn/vendor/golang.org/x/net/lif/link_test.go b/fn/vendor/golang.org/x/net/lif/link_test.go new file mode 100644 index 000000000..0cb9b95c6 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/link_test.go @@ -0,0 +1,63 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "fmt" + "testing" +) + +func (ll *Link) String() string { + return fmt.Sprintf("name=%s index=%d type=%d flags=%#x mtu=%d addr=%v", ll.Name, ll.Index, ll.Type, ll.Flags, ll.MTU, llAddr(ll.Addr)) +} + +type linkPack struct { + af int + lls []Link +} + +func linkPacks() ([]linkPack, error) { + var lastErr error + var lps []linkPack + for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + lls, err := Links(af, "") + if err != nil { + lastErr = err + continue + } + lps = append(lps, linkPack{af: af, lls: lls}) + } + return lps, lastErr +} + +func TestLinks(t *testing.T) { + lps, err := linkPacks() + if len(lps) == 0 && err != nil { + t.Fatal(err) + } + for _, lp := range lps { + n := 0 + for _, sll := range lp.lls { + lls, err := Links(lp.af, sll.Name) + if err != nil { + t.Fatal(lp.af, sll.Name, err) + } + for _, ll := range lls { + if ll.Name != sll.Name || ll.Index != sll.Index { + t.Errorf("af=%s got %v; want %v", addrFamily(lp.af), &ll, &sll) + continue + } + t.Logf("af=%s name=%s %v", addrFamily(lp.af), sll.Name, &ll) + n++ + } + } + if n != len(lp.lls) { + t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(lp.lls)) + continue + } + } +} diff --git a/fn/vendor/golang.org/x/net/lif/sys.go b/fn/vendor/golang.org/x/net/lif/sys.go new file mode 100644 index 000000000..c896041b7 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/sys.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import "unsafe" + +var nativeEndian binaryByteOrder + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } +} diff --git a/fn/vendor/golang.org/x/net/lif/sys_solaris_amd64.s b/fn/vendor/golang.org/x/net/lif/sys_solaris_amd64.s new file mode 100644 index 000000000..39d76af79 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/sys_solaris_amd64.s @@ -0,0 +1,8 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) diff --git a/fn/vendor/golang.org/x/net/lif/syscall.go b/fn/vendor/golang.org/x/net/lif/syscall.go new file mode 100644 index 000000000..aadab2e14 --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/syscall.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +//go:linkname procIoctl libc_ioctl + +var procIoctl uintptr + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func ioctl(s, ioc uintptr, arg unsafe.Pointer) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procIoctl)), 3, s, ioc, uintptr(arg), 0, 0, 0) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/fn/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go b/fn/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go new file mode 100644 index 000000000..b5e999bec --- /dev/null +++ b/fn/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go @@ -0,0 +1,103 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package lif + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_DGRAM = 0x1 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +const ( + sysLIFC_NOXMIT = 0x1 + sysLIFC_EXTERNAL_SOURCE = 0x2 + sysLIFC_TEMPORARY = 0x4 + sysLIFC_ALLZONES = 0x8 + sysLIFC_UNDER_IPMP = 0x10 + sysLIFC_ENABLED = 0x20 + + sysSIOCGLIFADDR = -0x3f87968f + sysSIOCGLIFDSTADDR = -0x3f87968d + sysSIOCGLIFFLAGS = -0x3f87968b + sysSIOCGLIFMTU = -0x3f879686 + sysSIOCGLIFNETMASK = -0x3f879683 + sysSIOCGLIFMETRIC = -0x3f879681 + sysSIOCGLIFNUM = -0x3ff3967e + sysSIOCGLIFINDEX = -0x3f87967b + sysSIOCGLIFSUBNET = -0x3f879676 + sysSIOCGLIFLNKINFO = -0x3f879674 + sysSIOCGLIFCONF = -0x3fef965b + sysSIOCGLIFHWADDR = -0x3f879640 +) + +const ( + sysIFF_UP = 0x1 + sysIFF_BROADCAST = 0x2 + sysIFF_DEBUG = 0x4 + sysIFF_LOOPBACK = 0x8 + sysIFF_POINTOPOINT = 0x10 + sysIFF_NOTRAILERS = 0x20 + sysIFF_RUNNING = 0x40 + sysIFF_NOARP = 0x80 + sysIFF_PROMISC = 0x100 + sysIFF_ALLMULTI = 0x200 + sysIFF_INTELLIGENT = 0x400 + sysIFF_MULTICAST = 0x800 + sysIFF_MULTI_BCAST = 0x1000 + sysIFF_UNNUMBERED = 0x2000 + sysIFF_PRIVATE = 0x8000 +) + +const ( + sizeofLifnum = 0xc + sizeofLifreq = 0x178 + sizeofLifconf = 0x18 + sizeofLifIfinfoReq = 0x10 +) + +type lifnum struct { + Family uint16 + Pad_cgo_0 [2]byte + Flags int32 + Count int32 +} + +type lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} + +type lifconf struct { + Family uint16 + Pad_cgo_0 [2]byte + Flags int32 + Len int32 + Pad_cgo_1 [4]byte + Lifcu [8]byte +} + +type lifIfinfoReq struct { + Maxhops uint8 + Pad_cgo_0 [3]byte + Reachtime uint32 + Reachretrans uint32 + Maxmtu uint32 +} + +const ( + sysIFT_IPV4 = 0xc8 + sysIFT_IPV6 = 0xc9 + sysIFT_6TO4 = 0xca +) diff --git a/fn/vendor/golang.org/x/net/nettest/conntest.go b/fn/vendor/golang.org/x/net/nettest/conntest.go new file mode 100644 index 000000000..5bd3a8c68 --- /dev/null +++ b/fn/vendor/golang.org/x/net/nettest/conntest.go @@ -0,0 +1,456 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for network testing. +package nettest + +import ( + "bytes" + "encoding/binary" + "io" + "io/ioutil" + "math/rand" + "net" + "runtime" + "sync" + "testing" + "time" +) + +var ( + aLongTimeAgo = time.Unix(233431200, 0) + neverTimeout = time.Time{} +) + +// MakePipe creates a connection between two endpoints and returns the pair +// as c1 and c2, such that anything written to c1 is read by c2 and vice-versa. +// The stop function closes all resources, including c1, c2, and the underlying +// net.Listener (if there is one), and should not be nil. +type MakePipe func() (c1, c2 net.Conn, stop func(), err error) + +// TestConn tests that a net.Conn implementation properly satisfies the interface. +// The tests should not produce any false positives, but may experience +// false negatives. Thus, some issues may only be detected when the test is +// run multiple times. For maximal effectiveness, run the tests under the +// race detector. +func TestConn(t *testing.T, mp MakePipe) { + testConn(t, mp) +} + +type connTester func(t *testing.T, c1, c2 net.Conn) + +func timeoutWrapper(t *testing.T, mp MakePipe, f connTester) { + c1, c2, stop, err := mp() + if err != nil { + t.Fatalf("unable to make pipe: %v", err) + } + var once sync.Once + defer once.Do(func() { stop() }) + timer := time.AfterFunc(time.Minute, func() { + once.Do(func() { + t.Error("test timed out; terminating pipe") + stop() + }) + }) + defer timer.Stop() + f(t, c1, c2) +} + +// testBasicIO tests that the data sent on c1 is properly received on c2. +func testBasicIO(t *testing.T, c1, c2 net.Conn) { + want := make([]byte, 1<<20) + rand.New(rand.NewSource(0)).Read(want) + + dataCh := make(chan []byte) + go func() { + rd := bytes.NewReader(want) + if err := chunkedCopy(c1, rd); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } + if err := c1.Close(); err != nil { + t.Errorf("unexpected c1.Close error: %v", err) + } + }() + + go func() { + wr := new(bytes.Buffer) + if err := chunkedCopy(wr, c2); err != nil { + t.Errorf("unexpected c2.Read error: %v", err) + } + if err := c2.Close(); err != nil { + t.Errorf("unexpected c2.Close error: %v", err) + } + dataCh <- wr.Bytes() + }() + + if got := <-dataCh; !bytes.Equal(got, want) { + t.Errorf("transmitted data differs") + } +} + +// testPingPong tests that the two endpoints can synchronously send data to +// each other in a typical request-response pattern. +func testPingPong(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + + pingPonger := func(c net.Conn) { + defer wg.Done() + buf := make([]byte, 8) + var prev uint64 + for { + if _, err := io.ReadFull(c, buf); err != nil { + if err == io.EOF { + break + } + t.Errorf("unexpected Read error: %v", err) + } + + v := binary.LittleEndian.Uint64(buf) + binary.LittleEndian.PutUint64(buf, v+1) + if prev != 0 && prev+2 != v { + t.Errorf("mismatching value: got %d, want %d", v, prev+2) + } + prev = v + if v == 1000 { + break + } + + if _, err := c.Write(buf); err != nil { + t.Errorf("unexpected Write error: %v", err) + break + } + } + if err := c.Close(); err != nil { + t.Errorf("unexpected Close error: %v", err) + } + } + + wg.Add(2) + go pingPonger(c1) + go pingPonger(c2) + + // Start off the chain reaction. + if _, err := c1.Write(make([]byte, 8)); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } +} + +// testRacyRead tests that it is safe to mutate the input Read buffer +// immediately after cancelation has occurred. +func testRacyRead(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Read(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testRacyWrite tests that it is safe to mutate the input Write buffer +// immediately after cancelation has occurred. +func testRacyWrite(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Write(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testReadTimeout tests that Read timeouts do not affect Write. +func testReadTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + c1.SetReadDeadline(aLongTimeAgo) + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Write(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// testWriteTimeout tests that Write timeouts do not affect Read. +func testWriteTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + c1.SetWriteDeadline(aLongTimeAgo) + _, err := c1.Write(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Read(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Read error: %v", err) + } +} + +// testPastTimeout tests that a deadline set in the past immediately times out +// Read and Write requests. +func testPastTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + testRoundtrip(t, c1) + + c1.SetDeadline(aLongTimeAgo) + n, err := c1.Write(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Write count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + n, err = c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + + testRoundtrip(t, c1) +} + +// testPresentTimeout tests that a deadline set while there are pending +// Read and Write operations immediately times out those operations. +func testPresentTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + deadlineSet := make(chan bool, 1) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + deadlineSet <- true + c1.SetReadDeadline(aLongTimeAgo) + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + n, err := c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Read timed out before deadline is set") + } + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Write timed out before deadline is set") + } + }() +} + +// testFutureTimeout tests that a future deadline will eventually time out +// Read and Write operations. +func testFutureTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + wg.Add(2) + + c1.SetDeadline(time.Now().Add(100 * time.Millisecond)) + go func() { + defer wg.Done() + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + }() + wg.Wait() + + go chunkedCopy(c2, c2) + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// testCloseTimeout tests that calling Close immediately times out pending +// Read and Write operations. +func testCloseTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + // Test for cancelation upon connection closure. + c1.SetDeadline(neverTimeout) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + c1.Close() + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Read(buf) + } + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Write(buf) + } + }() +} + +// testConcurrentMethods tests that the methods of net.Conn can safely +// be called concurrently. +func testConcurrentMethods(t *testing.T, c1, c2 net.Conn) { + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; see https://golang.org/issue/20489") + } + go chunkedCopy(c2, c2) + + // The results of the calls may be nonsensical, but this should + // not trigger a race detector warning. + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(7) + go func() { + defer wg.Done() + c1.Read(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.Write(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.SetDeadline(time.Now().Add(10 * time.Millisecond)) + }() + go func() { + defer wg.Done() + c1.SetReadDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.LocalAddr() + }() + go func() { + defer wg.Done() + c1.RemoteAddr() + }() + } + wg.Wait() // At worst, the deadline is set 10ms into the future + + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// checkForTimeoutError checks that the error satisfies the Error interface +// and that Timeout returns true. +func checkForTimeoutError(t *testing.T, err error) { + if nerr, ok := err.(net.Error); ok { + if !nerr.Timeout() { + t.Errorf("err.Timeout() = false, want true") + } + } else { + t.Errorf("got %T, want net.Error", err) + } +} + +// testRoundtrip writes something into c and reads it back. +// It assumes that everything written into c is echoed back to itself. +func testRoundtrip(t *testing.T, c net.Conn) { + if err := c.SetDeadline(neverTimeout); err != nil { + t.Errorf("roundtrip SetDeadline error: %v", err) + } + + const s = "Hello, world!" + buf := []byte(s) + if _, err := c.Write(buf); err != nil { + t.Errorf("roundtrip Write error: %v", err) + } + if _, err := io.ReadFull(c, buf); err != nil { + t.Errorf("roundtrip Read error: %v", err) + } + if string(buf) != s { + t.Errorf("roundtrip data mismatch: got %q, want %q", buf, s) + } +} + +// resyncConn resynchronizes the connection into a sane state. +// It assumes that everything written into c is echoed back to itself. +// It assumes that 0xff is not currently on the wire or in the read buffer. +func resyncConn(t *testing.T, c net.Conn) { + c.SetDeadline(neverTimeout) + errCh := make(chan error) + go func() { + _, err := c.Write([]byte{0xff}) + errCh <- err + }() + buf := make([]byte, 1024) + for { + n, err := c.Read(buf) + if n > 0 && bytes.IndexByte(buf[:n], 0xff) == n-1 { + break + } + if err != nil { + t.Errorf("unexpected Read error: %v", err) + break + } + } + if err := <-errCh; err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// chunkedCopy copies from r to w in fixed-width chunks to avoid +// causing a Write that exceeds the maximum packet size for packet-based +// connections like "unixpacket". +// We assume that the maximum packet size is at least 1024. +func chunkedCopy(w io.Writer, r io.Reader) error { + b := make([]byte, 1024) + _, err := io.CopyBuffer(struct{ io.Writer }{w}, struct{ io.Reader }{r}, b) + return err +} diff --git a/fn/vendor/golang.org/x/net/nettest/conntest_go16.go b/fn/vendor/golang.org/x/net/nettest/conntest_go16.go new file mode 100644 index 000000000..4cbf48e35 --- /dev/null +++ b/fn/vendor/golang.org/x/net/nettest/conntest_go16.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package nettest + +import "testing" + +func testConn(t *testing.T, mp MakePipe) { + // Avoid using subtests on Go 1.6 and below. + timeoutWrapper(t, mp, testBasicIO) + timeoutWrapper(t, mp, testPingPong) + timeoutWrapper(t, mp, testRacyRead) + timeoutWrapper(t, mp, testRacyWrite) + timeoutWrapper(t, mp, testReadTimeout) + timeoutWrapper(t, mp, testWriteTimeout) + timeoutWrapper(t, mp, testPastTimeout) + timeoutWrapper(t, mp, testPresentTimeout) + timeoutWrapper(t, mp, testFutureTimeout) + timeoutWrapper(t, mp, testCloseTimeout) + timeoutWrapper(t, mp, testConcurrentMethods) +} diff --git a/fn/vendor/golang.org/x/net/nettest/conntest_go17.go b/fn/vendor/golang.org/x/net/nettest/conntest_go17.go new file mode 100644 index 000000000..fa039f03f --- /dev/null +++ b/fn/vendor/golang.org/x/net/nettest/conntest_go17.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package nettest + +import "testing" + +func testConn(t *testing.T, mp MakePipe) { + // Use subtests on Go 1.7 and above since it is better organized. + t.Run("BasicIO", func(t *testing.T) { timeoutWrapper(t, mp, testBasicIO) }) + t.Run("PingPong", func(t *testing.T) { timeoutWrapper(t, mp, testPingPong) }) + t.Run("RacyRead", func(t *testing.T) { timeoutWrapper(t, mp, testRacyRead) }) + t.Run("RacyWrite", func(t *testing.T) { timeoutWrapper(t, mp, testRacyWrite) }) + t.Run("ReadTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testReadTimeout) }) + t.Run("WriteTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testWriteTimeout) }) + t.Run("PastTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPastTimeout) }) + t.Run("PresentTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPresentTimeout) }) + t.Run("FutureTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testFutureTimeout) }) + t.Run("CloseTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testCloseTimeout) }) + t.Run("ConcurrentMethods", func(t *testing.T) { timeoutWrapper(t, mp, testConcurrentMethods) }) +} diff --git a/fn/vendor/golang.org/x/net/nettest/conntest_test.go b/fn/vendor/golang.org/x/net/nettest/conntest_test.go new file mode 100644 index 000000000..9f9453fb5 --- /dev/null +++ b/fn/vendor/golang.org/x/net/nettest/conntest_test.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package nettest + +import ( + "net" + "os" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" +) + +func TestTestConn(t *testing.T) { + tests := []struct{ name, network string }{ + {"TCP", "tcp"}, + {"UnixPipe", "unix"}, + {"UnixPacketPipe", "unixpacket"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if !nettest.TestableNetwork(tt.network) { + t.Skipf("not supported on %s", runtime.GOOS) + } + + mp := func() (c1, c2 net.Conn, stop func(), err error) { + ln, err := nettest.NewLocalListener(tt.network) + if err != nil { + return nil, nil, nil, err + } + + // Start a connection between two endpoints. + var err1, err2 error + done := make(chan bool) + go func() { + c2, err2 = ln.Accept() + close(done) + }() + c1, err1 = net.Dial(ln.Addr().Network(), ln.Addr().String()) + <-done + + stop = func() { + if err1 == nil { + c1.Close() + } + if err2 == nil { + c2.Close() + } + ln.Close() + switch tt.network { + case "unix", "unixpacket": + os.Remove(ln.Addr().String()) + } + } + + switch { + case err1 != nil: + stop() + return nil, nil, nil, err1 + case err2 != nil: + stop() + return nil, nil, nil, err2 + default: + return c1, c2, stop, nil + } + } + + TestConn(t, mp) + }) + } +} diff --git a/fn/vendor/golang.org/x/net/netutil/listen.go b/fn/vendor/golang.org/x/net/netutil/listen.go index b317ba2e6..56f43bf65 100644 --- a/fn/vendor/golang.org/x/net/netutil/listen.go +++ b/fn/vendor/golang.org/x/net/netutil/listen.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/netutil/listen_test.go b/fn/vendor/golang.org/x/net/netutil/listen_test.go index c1a3d5527..5e07d7bea 100644 --- a/fn/vendor/golang.org/x/net/netutil/listen_test.go +++ b/fn/vendor/golang.org/x/net/netutil/listen_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/proxy/proxy.go b/fn/vendor/golang.org/x/net/proxy/proxy.go index 78a8b7bee..553ead7cf 100644 --- a/fn/vendor/golang.org/x/net/proxy/proxy.go +++ b/fn/vendor/golang.org/x/net/proxy/proxy.go @@ -11,6 +11,7 @@ import ( "net" "net/url" "os" + "sync" ) // A Dialer is a means to establish a connection. @@ -27,7 +28,7 @@ type Auth struct { // FromEnvironment returns the dialer specified by the proxy related variables in // the environment. func FromEnvironment() Dialer { - allProxy := os.Getenv("all_proxy") + allProxy := allProxyEnv.Get() if len(allProxy) == 0 { return Direct } @@ -41,7 +42,7 @@ func FromEnvironment() Dialer { return Direct } - noProxy := os.Getenv("no_proxy") + noProxy := noProxyEnv.Get() if len(noProxy) == 0 { return proxy } @@ -92,3 +93,42 @@ func FromURL(u *url.URL, forward Dialer) (Dialer, error) { return nil, errors.New("proxy: unknown scheme: " + u.Scheme) } + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/fn/vendor/golang.org/x/net/proxy/proxy_test.go b/fn/vendor/golang.org/x/net/proxy/proxy_test.go index c19a5c063..0f31e211c 100644 --- a/fn/vendor/golang.org/x/net/proxy/proxy_test.go +++ b/fn/vendor/golang.org/x/net/proxy/proxy_test.go @@ -5,14 +5,73 @@ package proxy import ( + "bytes" + "fmt" "io" "net" "net/url" + "os" "strconv" + "strings" "sync" "testing" ) +type proxyFromEnvTest struct { + allProxyEnv string + noProxyEnv string + wantTypeOf Dialer +} + +func (t proxyFromEnvTest) String() string { + var buf bytes.Buffer + space := func() { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + } + if t.allProxyEnv != "" { + fmt.Fprintf(&buf, "all_proxy=%q", t.allProxyEnv) + } + if t.noProxyEnv != "" { + space() + fmt.Fprintf(&buf, "no_proxy=%q", t.noProxyEnv) + } + return strings.TrimSpace(buf.String()) +} + +func TestFromEnvironment(t *testing.T) { + ResetProxyEnv() + + type dummyDialer struct { + direct + } + + RegisterDialerType("irc", func(_ *url.URL, _ Dialer) (Dialer, error) { + return dummyDialer{}, nil + }) + + proxyFromEnvTests := []proxyFromEnvTest{ + {allProxyEnv: "127.0.0.1:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {allProxyEnv: "ftp://example.com:8000", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {allProxyEnv: "socks5://example.com:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: &PerHost{}}, + {allProxyEnv: "irc://example.com:8000", wantTypeOf: dummyDialer{}}, + {noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {wantTypeOf: direct{}}, + } + + for _, tt := range proxyFromEnvTests { + os.Setenv("ALL_PROXY", tt.allProxyEnv) + os.Setenv("NO_PROXY", tt.noProxyEnv) + ResetCachedEnvironment() + + d := FromEnvironment() + if got, want := fmt.Sprintf("%T", d), fmt.Sprintf("%T", tt.wantTypeOf); got != want { + t.Errorf("%v: got type = %T, want %T", tt, d, tt.wantTypeOf) + } + } +} + func TestFromURL(t *testing.T) { endSystem, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -140,3 +199,17 @@ func socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg * return } } + +func ResetProxyEnv() { + for _, env := range []*envOnce{allProxyEnv, noProxyEnv} { + for _, v := range env.names { + os.Setenv(v, "") + } + } + ResetCachedEnvironment() +} + +func ResetCachedEnvironment() { + allProxyEnv.reset() + noProxyEnv.reset() +} diff --git a/fn/vendor/golang.org/x/net/proxy/socks5.go b/fn/vendor/golang.org/x/net/proxy/socks5.go index 9b9628239..973f57f19 100644 --- a/fn/vendor/golang.org/x/net/proxy/socks5.go +++ b/fn/vendor/golang.org/x/net/proxy/socks5.go @@ -72,24 +72,28 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { if err != nil { return nil, err } - closeConn := &conn - defer func() { - if closeConn != nil { - (*closeConn).Close() - } - }() - - host, portStr, err := net.SplitHostPort(addr) - if err != nil { + if err := s.connect(conn, addr); err != nil { + conn.Close() return nil, err } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } port, err := strconv.Atoi(portStr) if err != nil { - return nil, errors.New("proxy: failed to parse port number: " + portStr) + return errors.New("proxy: failed to parse port number: " + portStr) } if port < 1 || port > 0xffff { - return nil, errors.New("proxy: port number out of range: " + portStr) + return errors.New("proxy: port number out of range: " + portStr) } // the size here is just an estimate @@ -103,17 +107,17 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { } if _, err := conn.Write(buf); err != nil { - return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if buf[0] != 5 { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) } if buf[1] == 0xff { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") } if buf[1] == socks5AuthPassword { @@ -125,15 +129,15 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = append(buf, s.password...) if _, err := conn.Write(buf); err != nil { - return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if buf[1] != 0 { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") } } @@ -150,7 +154,7 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = append(buf, ip...) } else { if len(host) > 255 { - return nil, errors.New("proxy: destination hostname too long: " + host) + return errors.New("proxy: destination hostname too long: " + host) } buf = append(buf, socks5Domain) buf = append(buf, byte(len(host))) @@ -159,11 +163,11 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = append(buf, byte(port>>8), byte(port)) if _, err := conn.Write(buf); err != nil { - return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } failure := "unknown error" @@ -172,7 +176,7 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { } if len(failure) > 0 { - return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) } bytesToDiscard := 0 @@ -184,11 +188,11 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { case socks5Domain: _, err := io.ReadFull(conn, buf[:1]) if err != nil { - return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } bytesToDiscard = int(buf[0]) default: - return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) } if cap(buf) < bytesToDiscard { @@ -197,14 +201,13 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) { buf = buf[:bytesToDiscard] } if _, err := io.ReadFull(conn, buf); err != nil { - return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } // Also need to discard the port number if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } - closeConn = nil - return conn, nil + return nil } diff --git a/fn/vendor/golang.org/x/net/publicsuffix/gen.go b/fn/vendor/golang.org/x/net/publicsuffix/gen.go index ee2598c38..a2d499529 100644 --- a/fn/vendor/golang.org/x/net/publicsuffix/gen.go +++ b/fn/vendor/golang.org/x/net/publicsuffix/gen.go @@ -6,17 +6,17 @@ package main -// This program generates table.go and table_test.go. -// Invoke as: +// This program generates table.go and table_test.go based on the authoritative +// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat // -// go run gen.go -version "xxx" >table.go -// go run gen.go -version "xxx" -test >table_test.go -// -// The version is derived from information found at +// The version is derived from +// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat +// and a human-readable form is at // https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat // // To fetch a particular git revision, such as 5c70ccd250, pass // -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" +// and -version "an explicit version string". import ( "bufio" @@ -25,6 +25,7 @@ import ( "fmt" "go/format" "io" + "io/ioutil" "net/http" "os" "regexp" @@ -35,11 +36,13 @@ import ( ) const ( + // These sum of these four values must be no greater than 32. nodesBitsChildren = 9 nodesBitsICANN = 1 nodesBitsTextOffset = 15 nodesBitsTextLength = 6 + // These sum of these four values must be no greater than 32. childrenBitsWildcard = 1 childrenBitsNodeType = 2 childrenBitsHi = 14 @@ -87,25 +90,30 @@ func nodeTypeStr(n int) string { panic("unreachable") } +const ( + defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" + gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" +) + var ( labelEncoding = map[string]uint32{} labelsList = []string{} labelsMap = map[string]bool{} rules = []string{} - // validSuffix is used to check that the entries in the public suffix list - // are in canonical form (after Punycode encoding). Specifically, capital - // letters are not allowed. - validSuffix = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + // validSuffixRE is used to check that the entries in the public suffix + // list are in canonical form (after Punycode encoding). Specifically, + // capital letters are not allowed. + validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) - crush = flag.Bool("crush", true, "make the generated node text as small as possible") - subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") - url = flag.String("url", - "https://publicsuffix.org/list/effective_tld_names.dat", - "URL of the publicsuffix.org list. If empty, stdin is read instead") - v = flag.Bool("v", false, "verbose output (to stderr)") - version = flag.String("version", "", "the effective_tld_names.dat version") - test = flag.Bool("test", false, "generate table_test.go") + shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) + dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) + + comments = flag.Bool("comments", false, "generate table.go comments, for debugging") + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") ) func main() { @@ -124,7 +132,14 @@ func main1() error { return fmt.Errorf("not enough bits to encode the children table") } if *version == "" { - return fmt.Errorf("-version was not specified") + if *url != defaultURL { + return fmt.Errorf("-version was not specified, and the -url is not the default one") + } + sha, date, err := gitCommit() + if err != nil { + return err + } + *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) } var r io.Reader = os.Stdin if *url != "" { @@ -141,7 +156,6 @@ func main1() error { var root node icann := false - buf := new(bytes.Buffer) br := bufio.NewReader(r) for { s, err := br.ReadString('\n') @@ -167,7 +181,7 @@ func main1() error { if err != nil { return err } - if !validSuffix.MatchString(s) { + if !validSuffixRE.MatchString(s) { return fmt.Errorf("bad publicsuffix.org list data: %q", s) } @@ -225,20 +239,50 @@ func main1() error { } sort.Strings(labelsList) - p := printReal - if *test { - p = printTest - } - if err := p(buf, &root); err != nil { + if err := generate(printReal, &root, "table.go"); err != nil { return err } + if err := generate(printTest, &root, "table_test.go"); err != nil { + return err + } + return nil +} +func generate(p func(io.Writer, *node) error, root *node, filename string) error { + buf := new(bytes.Buffer) + if err := p(buf, root); err != nil { + return err + } b, err := format.Source(buf.Bytes()) if err != nil { return err } - _, err = os.Stdout.Write(b) - return err + return ioutil.WriteFile(filename, b, 0644) +} + +func gitCommit() (sha, date string, retErr error) { + res, err := http.Get(gitCommitURL) + if err != nil { + return "", "", err + } + if res.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) + } + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if m := shaRE.FindSubmatch(b); m != nil { + sha = string(m[1]) + } + if m := dateRE.FindSubmatch(b); m != nil { + date = string(m[1]) + } + if sha == "" || date == "" { + retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) + } + return sha, date, retErr } func printTest(w io.Writer, n *node) error { @@ -289,7 +333,7 @@ const numTLD = %d childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) - text := makeText() + text := combineText(labelsList) if text == "" { return fmt.Errorf("internal error: makeText returned no text") } @@ -299,8 +343,11 @@ const numTLD = %d return fmt.Errorf("internal error: could not find %q in text %q", label, text) } maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) - if offset >= 1<= 1<= 1<= 1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 - fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", - c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + if *comments { + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } else { + fmt.Fprintf(w, "0x%x,\n", c) + } } fmt.Fprintf(w, "}\n\n") fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1<= 1<= 1< 0 && ss[0] == "" { ss = ss[1:] } + return ss +} - // Join strings where one suffix matches another prefix. - for { - // Find best i, j, k such that ss[i][len-k:] == ss[j][:k], - // maximizing overlap length k. - besti := -1 - bestj := -1 - bestk := 0 +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) for i, s := range ss { - if s == "" { + if len(s) <= prefixLen { continue } - for j, t := range ss { - if i == j { - continue - } - for k := bestk + 1; k <= len(s) && k <= len(t); k++ { - if s[len(s)-k:] == t[:k] { - besti = i - bestj = j - bestk = k - } - } - } + mergeLabel(ss, i, prefixLen, prefixes) } - if bestk > 0 { - if *v { - fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d) out of (%4d,%4d): %q and %q\n", - bestk, besti, bestj, len(ss), len(ss), ss[besti], ss[bestj]) - } - ss[besti] += ss[bestj][bestk:] - ss[bestj] = "" - continue - } - break } - text := strings.Join(ss, "") - if *v { - fmt.Fprintf(os.Stderr, "crushed %d bytes to become %d bytes\n", beforeLength, len(text)) - } - return text + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes } diff --git a/fn/vendor/golang.org/x/net/publicsuffix/list.go b/fn/vendor/golang.org/x/net/publicsuffix/list.go index 9419ca992..8bbf3bcd7 100644 --- a/fn/vendor/golang.org/x/net/publicsuffix/list.go +++ b/fn/vendor/golang.org/x/net/publicsuffix/list.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run gen.go + // Package publicsuffix provides a public suffix list based on data from // http://publicsuffix.org/. A public suffix is one under which Internet users // can directly register names. diff --git a/fn/vendor/golang.org/x/net/publicsuffix/list_test.go b/fn/vendor/golang.org/x/net/publicsuffix/list_test.go index a08e64eaf..42d79cc43 100644 --- a/fn/vendor/golang.org/x/net/publicsuffix/list_test.go +++ b/fn/vendor/golang.org/x/net/publicsuffix/list_test.go @@ -216,13 +216,13 @@ var publicSuffixTestCases = []struct { {"aaa.xn--p1ai", "xn--p1ai"}, {"www.xxx.yyy.xn--p1ai", "xn--p1ai"}, - // The .zw rules are: - // *.zw - {"zw", "zw"}, - {"www.zw", "www.zw"}, - {"zzz.zw", "zzz.zw"}, - {"www.zzz.zw", "zzz.zw"}, - {"www.xxx.yyy.zzz.zw", "zzz.zw"}, + // The .bd rules are: + // *.bd + {"bd", "bd"}, + {"www.bd", "www.bd"}, + {"zzz.bd", "zzz.bd"}, + {"www.zzz.bd", "zzz.bd"}, + {"www.xxx.yyy.zzz.bd", "zzz.bd"}, // There are no .nosuchtld rules. {"nosuchtld", "nosuchtld"}, diff --git a/fn/vendor/golang.org/x/net/publicsuffix/table.go b/fn/vendor/golang.org/x/net/publicsuffix/table.go index 46a4ddb8a..50f070a92 100644 --- a/fn/vendor/golang.org/x/net/publicsuffix/table.go +++ b/fn/vendor/golang.org/x/net/publicsuffix/table.go @@ -2,7 +2,7 @@ package publicsuffix -const version = "publicsuffix.org's public_suffix_list.dat, git revision 77cb90d (2016-01-15)" +const version = "publicsuffix.org's public_suffix_list.dat, git revision f47d806df99585862c8426c3e064a50eb5a278f5 (2017-06-14T11:49:01Z)" const ( nodesBitsChildren = 9 @@ -23,438 +23,461 @@ const ( ) // numTLD is the number of top level domains. -const numTLD = 1543 +const numTLD = 1549 // Text is the combined text of all labels. -const text = "bieszczadygeyachimataipeigersundupontarioddabievatmallorcadaques" + - "anjotateshinanomachintaijinfinitinfoggiabifukagawalmartateyamabi" + - "horologyusuisservicesannanikonantanangerbikedagestangebilbaogaki" + - "evenesannohelsinkitahiroshimarshallstatebankasukabedzin-the-band" + - "aioirasebastopologyuudmurtiabillustrationinohelplfinancialinzain" + - "uyamanouchikuhokuryugasakitashiobarabiomutashinainvestmentsanoka" + - "sumigaurawa-mazowszextraspace-to-rentalstomakomaibarabirdartcent" + - "erprisesakikonaircraftraeumtgeradealstahaugesundurbanamexeterbir" + - "kenesoddtangenovaravennagatorogersvpalanaklodzkodairabirthplaceb" + - "jarkoyuzawabjerkreimmobilieninomiyakonojoshkar-olayangroupaleost" + - "rowiecartoonartdecoffeedbackasuyakutiabjugnirasakis-a-candidateb" + - "lockbusternidurhamburgliwicebloombergbauernrtatsunostrowwlkpmglo" + - "balashovhachinoheguris-a-catererbluedatinglobodoes-itveronaharim" + - "alvikaszubyuzhno-sakhalinskatowicebmoattachmentsantabarbarabmsan" + - "tacruzsantafedexhibitionishiazais-a-celticsfanishigotpantheonish" + - "iharabmwegroweiboltattoolsztynsettlersanukis-a-chefarsundvrdnsao" + - "tomeldalipetskatsushikabeeldengeluidwglogoweirbnpparibaselburglo" + - "ppenzaogashimadachicagoboatsapodhalewismillerbolzanore-og-uvdali" + - "vornobomloansapporobondyndns-homednsaratovalleaostavernishiizuna" + - "zukis-a-conservativefsncfailomzansimagicasadelamonedavvesiidazai" + - "fudaigodoesntexistanbullensakerbonnishikatakazakis-a-cpadoval-da" + - "ostavalleybookingminakamichigangwonishikatsuragivestbytomaritime" + - "keepingmodenakatombetsumidatlanticaseihichisobetsuitairabootsard" + - "egnamsskoganeis-a-cubicle-slavellinowtvalled-aostavropolicebosch" + - "aefflerdalorenskogmxboxfinitybostikatsuyamasfjordenishikawazukan" + - "azawabostonakijinsekikogentingretakamoriokamchatkameokameyamashi" + - "natsukigatakanabeatsardiniabotanicalgardenishimerabotanicgardeni" + - "shinomiyashironobotanyboutiquebecngrimstadyndns-ip6bozentsujiieb" + - "radescorporationishinoomotegotsukisosakitagatakamatsukawabrandyw" + - "inevalleybrasiljan-mayenishinoshimatta-varjjataxihuanishiokoppeg" + - "ardyndns-mailotenkawabresciabrindisibenikebristolgalsacebritishc" + - "olumbialowiezagannakadomari-elasticbeanstalkaufenishitosashimizu" + - "naminamiashigarabroadcastlebtimnetzgorabroadwaybroke-itgorybroke" + - "rrypropertiesarlottebronnoysundyndns-office-on-the-webcambridges" + - "tonewspaperbrothermesaverdefensejnybrumunddalottokigawabrunelbla" + - "gdenesnaaseralingenkainanaejrietisalatinabenogatachikawakayamaga" + - "dancebetsukubabia-goracleaningatlantagajobojis-a-democratjeldsun" + - "dyndns-picsarpsborgripebrusselsarufutsunomiyawakasaikaitakoelnis" + - "hiwakis-a-designerbruxellesasayamabryanskjervoyagebryneustarhuba" + - "latinordre-landiscountyumenaval-d-aosta-valleyokozehimejibestadi" + - "scoveryggeelvinckarlsoyomitanobninskarmoyonabaruconnectarnobrzeg" + - "jovikarpaczeladz-1buskerudinewhampshirecipesaro-urbino-pesarourb" + - "inopesaromaniwakuratelekommunikationissandoybuzenissayokoshibahi" + - "kariwanumataketomisatokuyamatteledatabaseballooningriwataraidynd" + - "ns-remotegildeskalmykiabuzzgorzeleccollegersundyndns-serverbania" + - "bwfashionissedalouvrepbodyndns-blogdnsasebofagebzhitomirkutsklep" + - "palermomasvuotnakatsugawacloudfunctionscholarshipschooluroycntkm" + - "axxn--1ck2e1balestrandabergamoarekembroideryonagoyastronomykolai" + - "vanovosibirskiptveterinairebungoonomichinomiyakepnordkappgjemnes" + - "3-eu-west-1colognewmexicoldwarmiamiastapleschulezajskddielddanuo" + - "rrikuzentakatajirissagaeroclubmedecincinnationwidealerimo-i-rana" + - "dexchangeiseiyoichiropracticbcn-north-1colonialwilliamsburgujols" + - "tercoloradoplateaudiocolumbusheycommunitysneschwarzgwangjuifmina" + - "mibosogndalutskfhskhabarovskhakassiacomobaracomparemarkerryhotel" + - "schweizgradcompute-1computerhistoryofscience-fictioncomsecuritys" + - "vardoharuhrcondoshichinohedmarkhangelskypescaravantaaconferencec" + - "onstructionconsuladollsciencecentersciencehistoryconsultanthropo" + - "logyconsultingvolluxembourgulencontactmpamperedchefastlycontempo" + - "raryarteducationalchikugojomedicaltanissettaiwanairguardcontract" + - "orskenconventureshinodesashibetsuikimobetsuliguriacookingchannel" + - "veruminamidaitomangotembaixadacoolkuszippodlasiellakasamatsudoos" + - "andiegokaseljordcoopocznorthwesternmutualuxurycopenhagencycloped" + - "icatholicasertaishinomakikuchikuseikarugapartmentsaskatchewanggo" + - "uvicenzacorsicagliaridagawarszawashingtondclkharkivalledaostakko" + - "fueluzerncorvettemasekharkovallee-aosteroycosenzamamibuilderscie" + - "ntistor-elvdalvivano-frankivskhersoncostumedio-campidano-medioca" + - "mpidanomediocouncilcouponscjohnsoncoursescrapper-sitecq-acranbro" + - "okuwanalyticscrappingunmarriottoyokawacreditcardcreditunioncremo" + - "nashorokanaiecrewiiheyaizuwakamatsubushikusakadogawacricketrzync" + - "rimeacrotonewportlligatewaycrowncrsettsurfauskedsmokorsetagayase" + - "lls-for-lessevastopolecruisesevenassisicilycuisinellajollamerica" + - "nexpressexyzjcbnlculturalcentertainmentoyonakagyokutoshimacuneoc" + - "upcakecxn--1ctwolominamatamayukis-a-geekhmelnitskiyamashikecymru" + - "ovatoyonezawacyouthdfcbankhmelnytskyivalleeaosteigenfilminamifur" + - "anofinalfinancefineartshangrilangevagrarboretumbriafinlandfinnoy" + - "firebaseappanamafirenzefirestonextdirectoryfirmdalegolfedjejuego" + - "shikiminokamoenairlinebraskaunbieidsvollfishingonohejis-a-greenf" + - "itjarqhachiojiyahikobeautydalfitnessettlementoyookarasjohkaminoy" + - "amatsuris-a-gurulsandvikcoromantovalle-daostavangerfjalerflickra" + - "gerotikaluganskhvanylveniceflightsharis-a-hard-workerflirumansio" + - "nsharpanasonicdn77-sslattumetlifeinsurancefloguchikuzenfloraflor" + - "encefloridafloristanohatakaharussiafloromskogxn--1lqs03nflowersh" + - "awaiijimarylandflsmidthruhereggiocalabriaflynnhubalsanagochihaya" + - "akasakawaharaumakeupowiathletajimabariakemergencyberlevagangavii" + - "kanonjibigawaugustowadaegubs3-external-1fndfolldalfoodnetworkang" + - "erfor-better-thandafor-ourfor-somedizinhistorischeshellaspeziafo" + - "r-theaterforexrothachirogatakanezawaforgotdnshimokawaforli-cesen" + - "a-forlicesenaforlikes-piedmontblancomeereshimokitayamaforsaleika" + - "ngerforsandasuolodingenfortmissoulan-udell-ogliastrakhanawawilli" + - "amhillfortworthadanotogawaforuminamiiselectoyosatotalfosneshimon" + - "itayanagivingzlgfotoyotaris-a-hunterfoxn--1lqs71dfreiburgfreight" + - "cmwinbalsfjordishakotankaruizawaukraanghke164freseniusdecorative" + - "artshimonosekikawafribourgfriuli-v-giuliafriuli-ve-giuliafriuli-" + - "vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliaf" + - "riuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafr" + - "iuliveneziagiuliafriulivgiuliafrlfroganshimosuwalkis-a-knightoyo" + - "tomiyazakis-a-landscaperugiafrognfrolandfrom-akrehamnfrom-alfrom" + - "-arfrom-azparaglidingfrom-canonoichikawamisatodayfrom-collection" + - "from-ctoyotsukaidovre-eikerfrom-dcheltenham-radio-operaunitelema" + - "rkautokeinofrom-dellogliastraderfrom-flandershimotsukefrom-gaula" + - "rdalfrom-higashiagatsumagoirmitakeharafrom-iafrom-idfrom-ilfrom-" + - "incheonfrom-kshimotsumafrom-kyknetoyourafrom-lanbibaidarfrom-man" + - "xn--1qqw23afrom-mdfrom-meetoystre-slidrettozawafrom-microsoftban" + - "klabudhabikinokawabarthadselfipirangafrom-mnfrom-modalenfrom-msh" + - "inichinanfrom-mtnfrom-nchelyabinskydivingrongausdalovegaskimitsu" + - "batamicabbottjmaxxxjaworznofrom-ndfrom-nexusgardenfrom-nhktozsde" + - "from-njcparisor-fronfrom-nminamiizukamitondabayashiogamagoriziaf" + - "rom-nvaolbia-tempio-olbiatempioolbialystokkemerovodkagoshimainte" + - "nancefrom-nyfrom-ohkurafrom-oketogurafrom-orfrom-paderbornfrom-p" + - "ratohmaoris-a-lawyerfrom-ris-a-liberalfrom-schoenbrunnfrom-sdnip" + - "ropetrovskmpspbaltimore-og-romsdalimitedunetflixilimoliserniaurs" + - "kog-holandroverhalla-speziaetnagahamaroygardenebakkeshibechambag" + - "riculturennebudapest-a-la-masionativeamericanantiques3-ap-southe" + - "ast-1kappleangaviikadenaamesjevuemielnoboribetsucks3-ap-northeas" + - "t-1from-tnfrom-txn--2m4a15efrom-utazuerichardlikescandyndns-at-h" + - "omedepotaruis-a-libertarianfrom-vadsochildrensgardenfrom-vtranby" + - "from-wafrom-wielunnerfrom-wvareserveftparliamentrani-andria-barl" + - "etta-trani-andriafrom-wyfrosinonefrostalowa-wolawafroyahabadajoz" + - "orahkkeravjudygarlandfstcgrouparmafujiiderafujikawaguchikonefuji" + - "minohtawaramotoineppugliafujinomiyadafujiokayamarburgfujisatosho" + - "nairportland-4-salernogiessengerdalaskanittedallasalleaseekloges" + - "uranceofujisawafujishiroishidakabiratoridelmenhorstalbanshinjour" + - "nalismailillehammerfest-mon-blogueurovisionfujitsurugashimarinef" + - "ujixeroxn--30rr7yfujiyoshidafukayabeardubaiduckdnsdojoburgfukuch" + - "iyamadafukudominichernigovernmentjomemorialowiczest-le-patrondhe" + - "imperiafukuis-a-linux-useranishiaritabashikaoizumizakitaurayasud" + - "afukumitsubishigakirkeneshinjukumanofukuokazakirovogradoyfukuroi" + - "shikarikaturindalfukusakiryuohaebaruminamimakis-a-llamarylhurste" + - "inkjerusalembetsukuis-a-musicianfukuyamagatakahashimamakisarazur" + - "e-mobileirfjordfunabashiriuchinadafunagatakahatakaishimoichinose" + - "kigaharafunahashikamiamakusatsumasendaisennangoodyearthagakhanam" + - "igawafundaciofuoiskujukuriyamarcheaparocherkasyzrankoshigayaltai" + - "kis-a-nascarfanfuosskoczowindmillfurnitureggioemiliaromagnakasat" + - "sunairtelecityeatshinkamigotoyohashimotomobellunordreisa-geekmsh" + - "inshinotsurgeonshalloffamelhustkamisunagawafurubiraquarelleasing" + - "leshinshirofurudonostiafurukawairtraffichernihivgucciprianiigata" + - "itoeiheijis-a-doctorayfusodegaurafussaikishiwadafutabayamaguchin" + - "omigawafutboldlygoingnowhere-for-moregontrailroadfuttsurugiminam" + - "iminowafvgfyis-a-nurseoullensvanguardfylkesbiblackfridayfyresdal" + - "hannovarggatraniandriabarlettatraniandriahanyuzenhapmirhappoulvi" + - "kolobrzegyptianpachigasakidstvedestrandhareidsbergenharstadharve" + - "stcelebrationhasamarahasaminami-alpssells-for-unzenhashbanghasud" + - "ahasvikomaganehatogayahoooshikamaishimofusartshinyoshitomiokanie" + - "pcehatoyamazakitahatakaokamikitayamatotakadahatsukaichiharahattf" + - "jelldalhayashimamotobuildinghazuminobusells-itranoyhboehringerik" + - "ehembygdsforbundhemneshiojirishirifujiedahemsedalherokussldheroy" + - "hgtvaroyhigashichichibungotakadatsunanjoetsuwanouchikujogaszkola" + - "dbrokesennumamurogawalterhigashihiroshimanehigashiizumozakitakam" + - "iizumisanofiatransportrapaniimimatakatoris-a-personaltrainerhiga" + - "shikagawahigashikagurasoedahigashikawakitaaikitakatakarazukamiko" + - "aniikappulawyhigashikurumeguroroskoleitungsenhigashimatsushimaru" + - "game-hostinghigashimatsuyamakitaakitadaitoigawahigashimurayamala" + - "tvuopmidoris-a-photographerokuappartis-a-playerhigashinarusellsy" + - "ourhomegoodshioyameloyalistockholmestrandhigashinehigashiomihach" + - "imanchesterhigashiosakasayamamotorcycleshirahamatonbetsurgeryhig" + - "ashishirakawamatakasagooglecodespotravelchannelhigashisumiyoshik" + - "awaminamiaikitakyushuaiahigashitsunowruzhgorodoyhigashiurausukit" + - "amidsundhigashiyamatokoriyamanakakogawahigashiyodogawahigashiyos" + - "hinogaris-a-republicancerresearchaeologicaliforniahiraizumisatoh" + - "noshoohirakatashinagawahiranais-a-rockstarachowicehirarahiratsuk" + - "agawahirayaitakasakitamotosumitakaginankokubunjis-a-socialistmei" + - "ndianapolis-a-bloggerhisayamanashiibaghdadultravelersinsurancehi" + - "storichouseshirakoenighitachiomiyaginowaniihamatamakawajimaritim" + - "odellinghitachiotagopartnershiranukanmakiwakunigamihamadahitoyos" + - "himifunehitradinghjartdalhjelmelandholeckobierzyceholidayhomeipa" + - "rtshiraois-a-soxfanhomelinuxn--32vp30hagebostadhomesensembokukit" + - "anakagusukumoduminamiogunicomcastresistancehomeunixn--3bst00mina" + - "misanrikubetsupplyhondahonefosshiraokannamiharuhoneywellhongorge" + - "honjyoitakashimarumorimachidahornindalhorseminehortendofinternet" + - "rdhoteleshiratakahagis-a-studentalhotmailhoyangerhoylandetroitsk" + - "omakiyosatokamachippubetsubetsugaruhumanitieshishikuis-a-teacher" + - "kassymantechnologyhurdalhurumajis-a-techietis-a-therapistoiahyll" + - "estadhyogoris-an-accountantshisognehyugawarahyundaiwafunejgorajl" + - "chiryukyuragifuefukihaborokunohealthcareersassaris-a-financialad" + - "visor-aurdalucaniajlljmpartyjnjelenia-gorajoyokaichibahcavuotnag" + - "araholtalenjpmorganichitachinakagawatchandclockazimierz-dolnyjpn" + - "chitosetogitsuldaluccapebretonamiasakuchinotsuchiurakawassamukaw" + - "ataricohdavvenjargamvikazojprshoujis-an-anarchistoricalsocietyju" + - "niperjurkristiansandcatshowakristiansundkrodsheradkrokstadelvald" + - "aostarostwodzislawinnershowtimemerckommunekryminamitanekumatorin" + - "okumejimasudakumenanyokkaichirurgiens-dentisteshriramsterdambula" + - "ncekunisakis-certifiedekakegawakunitachiarailwaykunitomigusukuma" + - "motoyamassa-carrara-massacarraramassabunkyonanaoshimageandsounda" + - "ndvisionkunneppupasadenamsosnowiechloekunstsammlungkunstunddesig" + - "nkuokgroupassagensienarashinokurepairbusantiquest-a-la-maisondre" + - "-landebusinessebykleclerchocolatelevisioniyodogawakurgankurobela" + - "udiblebesbyglandkurogimilitarykuroisoftwarendalenugkuromatsunais" + - "-foundationkurotakikawasakis-gonekurskomonokushirogawakustanais-" + - "into-animeiwamaseratis-an-actorkusupersportrentino-sud-tirolkutc" + - "hanelkutnokuzbassnillfjordkuzumakis-into-carshizukuishimogosenkv" + - "afjordkvalsundkvamlidlugolekagaminord-aurdalvdalipayufuchukotkaf" + - "jordkvanangenkvinesdalkvinnheradkviteseidskogkvitsoykwpspjelkavi" + - "komorotsukamishihoronobeokaminokawanishiaizubangekyotobetsupplie" + - "sigdalkyowariasahikawamishimatsumotofukemissilelmisugitokonamega" + - "takayamatsunomitourismolanciamitoyoakemiuramiyazurewebsiteshikag" + - "amiishibukawamiyotamanomjondalenmlbarcelonagasakijobservercellie" + - "rneues3-us-west-1monmouthaibarakitagawamonstermonticellolmontrea" + - "lestatefarmequipmentrentino-sudtirolmonza-brianzaporizhzhekinann" + - "estadmonza-e-della-brianzaporizhzhiamonzabrianzapposlombardiamon" + - "dsimple-urlmonzaebrianzaramonzaedellabrianzamoparachutingmordovi" + - "ajessheiminamiuonumatsumaebashimodatemoriyamatsusakahoginozawaon" + - "senmoriyoshiokamitsuemormoneymoroyamatsushigemortgagemoscowiostr" + - "olekaneyamaxunjargamoseushistorymosjoenmoskenesirdalmosslingmosv" + - "ikongsvingermoviemovistargardmtpccwitdkoninjamisonmtranakayamats" + - "uuramuenstermugithubusercontentrentino-sued-tirolmuikamogawamuko" + - "chikushinonsenergymulhouservebbslupskonskowolancashireisenmultic" + - "hoicemunakatanemuncieszynmuosattemupassenger-associationmurmansk" + - "onsulatrobeermurotorcraftrentino-suedtirolmusashimurayamatsuzaki" + - "s-lostre-toteneis-an-actresshisuifuettertdasnetzwindowshitaramam" + - "usashinoharamuseetrentinoa-adigemuseumverenigingmutsuzawamutuell" + - "evangermypetsmolenskonyveloftrentino-s-tirollagrigentomologyeong" + - "giehtavuoatnagaivuotnagaokakyotambabydgoszczecinemailmyphotoshib" + - "ahccavuotnagareyamaizurubtsovskjakdnepropetrovskiervaapsteiermar" + - "kooris-an-artistjohnmytis-a-bookkeeperminamiyamashirokawanabelgo" + - "rodeophiladelphiaareadmyblogsitephilatelyphilipsyphoenixn--3e0b7" + - "07ephotographysiopiagetmyipaviancapetownpictetrentinoaadigepictu" + - "resokanrapiemontepilotsokndalpinkopervikomitamamurapioneerpippup" + - "iszpittsburghofermobilypiwatepizzapkoryolasiteplanetariuminanopl" + - "antationplantsolarssonplatformincommbankomvuxn--3ds443gplaystati" + - "onplazaplchofunatorientexpressatxn--0trq7p7nnrwhalingrossetouchi" + - "jiwadeltajimicrolightingroundhandlingroznyplombardyndns-at-worki" + - "nggroupfizerplumbingotvbarclaycards3-us-west-2plusterpmnpodzonep" + - "ohlpokerpokrovskosaigawapolitiendapolkowicepoltavalle-aostathell" + - "exuslivinghistorypomorzeszowithgoogleapisa-hockeynutrentinoalto-" + - "adigepordenonepornporsangerporsangugeporsgrunnanpoznanpraxis-a-b" + - "ruinsfansologneprdpreservationpresidioprimelbourneprincipeprivne" + - "prochowiceproductionsolundbeckosakaerodromegallupinbananarepubli" + - "cargodaddynathomebuiltarumizusawaustinnaturbruksgymnaturhistoris" + - "ches3-fips-us-gov-west-1proferraraprogressivenneslaskerrylogisti" + - "csolutionsomaprojectrentinoaltoadigepromombetsupportrentinos-tir" + - "olpropertyprotectionprudentialpruszkowithyoutubeneventochiokinos" + - "himalselvendrellprzeworskogptzpvtrentinostirolpwchonanbugattipsc" + - "hmidtre-gauldalucernepzqldqponqslgbtrentinosud-tirolqvchoseiroum" + - "uenchenstudiostudyndns-freemasonryokamikawanehonbetsurutaharastu" + - "ff-4-salestuttgartrentinosuedtirolsurnadalsurreysusakis-uberleet" + - "rentino-aadigesusonosuzakanumazurysuzukanzakiwiensuzukis-very-ba" + - "daddjamalborkdalsvalbardudinkakamigaharasveiosvelvikosherbrookeg" + - "awasvizzeraswedenswidnicapitalonewhollandswiebodzindianmarketing" + - "swiftcoverisignswinoujscienceandhistoryswisshikis-very-evillages" + - "xn--3oq18vl8pn36atunesopotrentinosudtirolturystykarasjoksnesor-o" + - "daltuscanytushuissier-justicetuvalle-d-aostatoilvestnesorfoldves" + - "tre-slidreamhostersorreisahayakawakamiichikaiseis-slickomatsushi" + - "mashikiyosemitevestre-totennishiawakuravestvagoyvevelstadvibo-va" + - "lentiavibovalentiavideovillaskoyabearalvahkihokumakogeniwaizumio" + - "tsukumiyamazonawsabaerobaticketsaritsynologyeongbukoshunantokash" + - "ikizunokunimilanovinnicarbonia-iglesias-carboniaiglesiascarbonia" + - "vinnytsiavipsinaappharmaciensnoasaitamatsukuris-not-certifiedoga" + - "warabikomaezakirunoshiroomuravirginiavirtualvirtuelvisakatakinou" + - "evistaprintuitrevisohughesomnaritakurashikis-saveducatorahimeshi" + - "makanegasakinkobayashikshacknetnedalviterboknowsitallvivoldavlad" + - "ikavkazanvladimirvladivostokaizukarasuyamazoevlogvolkenkundersea" + - "portroandinosaureportrentottoris-very-goodhandsonvolkswagentsort" + - "landvologdanskostromahachijorpelandvolvolgogradvolyngdalvoronezh" + - "ytomyrvossevangenvotevotingvotoursoruminnesotaketakatsukis-into-" + - "cartoonshizuokanoyakagevrnworse-thangglidingwowiwatsukiyonowrite" + - "sthisblogspotrogstadwroclawloclawekosugewtchoshibuyachiyodawtfer" + - "rarittogoldpointelligencewuozuwwworldwzmiuwajimaxn--4gq48lf9jeon" + - "namerikawauexn--4it168dxn--4it797kotouraxn--4pvxsouthcarolinazaw" + - "axn--54b7fta0cchromediaxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp4" + - "9chryslerxn--5rtq34kouhokutamakis-an-entertainerxn--5su34j936bgs" + - "gxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a" + - "264chtrainingrpalmspringsakerxn--80adxhksouthwestfalenxn--80ao21" + - "axn--80aqecdr1axn--80asehdbarclaysakuraibmdivtasvuodnakaiwamizaw" + - "australiaisondriodejaneirochesterxn--80aswgxn--80audnedalnxn--8l" + - "tr62kounosunndalxn--8pvr4uxn--8y0a063axn--90a3academydscloudapps" + - "potenzachpomorskiendoftheinternetcimdbarefootballangenoamishiras" + - "atobishimalopolskanlandivttasvuotnakamagayachtsakyotanabellevuel" + - "osangelesjaguarchitecturealtychyattorneyagawalbrzycharternopilaw" + - "alesundiyonaguniversityoriikasaokamiokamiminersalangenayoroceano" + - "graphicsalondonetskashibatakasugaiinetatamotorsaltdalindasiaustr" + - "heimatunduhrennesoyekaterinburgjerdrumckinseyokosukareliancebina" + - "gisoccertificationatuurwetenschappenaumburgjerstadotsuruokamakur" + - "azakisofukushimarnardalaziobiragroks-thisamitsukeisenbahnaturalh" + - "istorymuseumcentereviewskrakowebhopagefrontappagespeedmobilizero" + - "bihirosakikamijimaeroportalabamagasakishimabarackmaze-burggfarme" + - "rseinewyorkshireggio-emilia-romagnakanotoddenasushiobarabruzzool" + - "ogicalvinklein-addrammenuernbergdyniabogadocscbg12000xn--90aisho" + - "baraomoriguchiharagusaarlandxn--90azhair-surveillancexn--9dbhblg" + - "6diethnologyxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aropo" + - "rt-byanagawaxn--asky-iraxn--aurskog-hland-jnbargainstitutelefoni" + - "cafederationflatangerxn--avery-yuasakegawaxn--b-5gaxn--b4w605fer" + - "dxn--bck1b9a5dre4chungbukazunoxn--bdddj-mrabdxn--bearalvhki-y4ax" + - "n--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nac" + - "hikatsuuraxn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptambovers" + - "aillesor-varangerxn--blt-elaborxn--bmlo-graingerxn--bod-2naroyxn" + - "--brnny-wuaccident-investigationjukudoyamaceratabuseat-band-camp" + - "aniamallamadridvagsoyericssonlineat-urlxn--brnnysund-m8accident-" + - "preventionxn--brum-voagatromsakakinokiaxn--btsfjord-9zaxn--c1avg" + - "xn--c2br7gxn--c3s14minternationalfirearmsimbirskongsbergxn--cck2" + - "b3barreauctionfshostrodawarauthordalandroidgcahcesuolocalhistory" + - "azannefrankfurtargets-itargi234xn--cg4bkis-very-nicexn--ciqpnxn-" + - "-clchc0ea0b2g2a9gcdn77-securecreationxn--comunicaes-v6a2oxn--cor" + - "reios-e-telecomunicaes-ghc29axn--czr694barrel-of-knowledgeologyu" + - "kuhashimojiitatebayashijonawatextileksvikashiharautomotivecodyn-" + - "o-saurlandes3-sa-east-1xn--czrs0tromsojavald-aostarnbergxn--czru" + - "2dxn--czrw28barrell-of-knowledgeometre-experts-comptablesalvador" + - "dalibabaikaliszczytnordlandnpalacemersongdalenviknakanojohanamak" + - "inoharautoscanadaejeonbukariyakumoldebinosegawakunedre-eikerhclo" + - "udcontrolledds3-ap-southeast-2xn--d1acj3bashkiriaveroykenvironme" + - "ntalconservationaustdalillesandefjordigitalillyokotebizenakaniik" + - "awatanaguramusementarantomsk-uralsk12xn--d1alfaromeoxn--d1atrust" + - "eexn--d5qv7z876chungnamdalseidfjordyndns-wikindlegnicamerakershu" + - "s-east-1xn--davvenjrga-y4axn--djrs72d6uyxn--djty4kouyamashikokuc" + - "huoxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--eckvd" + - "tc9dxn--efvn9sowaxn--efvy88hakatanotteroyxn--ehqz56nxn--elqq16ha" + - "kodatevaksdalxn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct429ko" + - "uzushimasoyxn--fhbeiarnxn--finny-yuaxn--fiq228c5hspreadbettingxn" + - "--fiq64basilicataniavocatanzaroweddingjesdalimanowarudasmatartan" + - "ddesignieznorddalavagiske12xn--fiqs8spydebergxn--fiqz9srlxn--fjo" + - "rd-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--fpcrj9c3dx" + - "n--frde-grandrapidsrtrentinosued-tirolxn--frna-woaraisaijosoyrov" + - "igorlicexn--frya-hraxn--fzc2c9e2churchaseljeepilepsydneyxn--fzys" + - "8d69uvgmailxn--g2xx48chuvashiaxn--gckr3f0ferreroticampobassociat" + - "esewildlifestylexn--gecrj9circlegallocuscountryestateofdelawared" + - "umbrellahppiacenzakopanerairforcechirealtorlandyndns-workshoppda" + - "lukowhoswhokksundyroyrvikingruexn--ggaviika-8ya47hakonexn--gilde" + - "skl-g0axn--givuotna-8yandexn--3pxu8kotohiradomainsureitrentino-s" + - "tirolxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-very-s" + - "weetrentino-alto-adigexn--gmqw5axn--h-2fairwindsrvdonskoseis-an-" + - "engineeringxn--h1aeghakubankokonoexn--h2brj9circuscultureggio-ca" + - "labriaxn--hbmer-xqaxn--hcesuolo-7ya35basketballfinanz-2xn--hery-" + - "iraxn--hgebostad-g3axn--hmmrfeasta-s4acoachampionshiphopenair-tr" + - "affic-controlleyxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hp" + - "mir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2e" + - "xn--imr513nxn--indery-fyaotsurgutsiracusaitokyotangovtrverranzan" + - "xn--io0a7is-with-thebandoomdnsaliascolipicenord-odalxn--j1aefets" + - "undxn--j1amhakuis-a-painteractivegarsheis-a-patsfanxn--j6w193gxn" + - "--jlq61u9w7batochigiftsalzburgladeloittenrightathomeftpaccessame" + - "gawavoues3-us-gov-west-1xn--jlster-byaroslavlaanderenxn--jrpelan" + - "d-54axn--jvr189misakis-into-gamessinashikitchenxn--k7yn95exn--ka" + - "rmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--kl" + - "t787dxn--kltp7dxn--kltx9axn--klty5xn--42c2d9axn--koluokta-7ya57h" + - "akusandnessjoenxn--kprw13dxn--kpry57dxn--kpu716fguovdageaidnulmi" + - "namiechizenxn--kput3isleofmandalxn--krager-gyasakaiminatoyakokam" + - "isatohobby-sitexasdaburyatiaarpharmacysnzxn--kranghke-b0axn--krd" + - "sherad-m8axn--krehamn-dxaxn--krjohka-hwab49jetztrentino-altoadig" + - "exn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugissmarterthanyouxn" + - "--kvnangen-k0axn--l-1faitheguardianquanconagawakuyabukicks-assed" + - "icitadeliverybnikahokutogliattiresauheradxn--l1accentureklamborg" + - "hiniizaxn--laheadju-7yasuokaratexn--langevg-jxaxn--lcvr32dxn--ld" + - "ingen-q1axn--leagaviika-52batsfjordrangedalindesnesamnangerxn--l" + - "esund-huaxn--lgbbat1ad8jevnakerxn--lgrd-poaciticasinorfolkebible" + - "frakkestadyndns-weberlincolnisshinguernseyxn--lhppi-xqaxn--linds" + - "-pramericanartrysilkoshimizumakiyosumyokohamamatsudaxn--lns-qlan" + - "xesstoragexn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liaci" + - "vilaviationxn--lten-granexn--lury-iraxn--mely-iraxn--merker-kuax" + - "n--mgb2ddestordalxn--mgb9awbfidelityxn--mgba3a3ejtulansooxn--mgb" + - "a3a4f16axn--mgba3a4franamizuholdingsmileirvikozagawaxn--mgba7c0b" + - "bn0axn--mgbaakc7dvfidonnakamuratakahamannortonsbergushikamifuran" + - "otairesfranziskanerimamateramochizukiraxn--mgbaam7a8haldenxn--mg" + - "bab2bdxn--mgbai9a5eva00bauhausposts-and-telecommunicationsnasado" + - "dgeorgeorgiaxasnesoddenmarkets3-eu-central-1xn--mgbai9azgqp6jewe" + - "lryxn--mgbayh7gpaduaxn--mgbb9fbpobanazawaxn--mgbbh1a71exn--mgbc0" + - "a9azcgxn--mgbca7dzdownloadxn--mgberp4a5d4a87gxn--mgberp4a5d4arxn" + - "--mgbi4ecexposedxn--mgbpl2fhvalerxn--mgbqly7c0a67fbcivilisationx" + - "n--mgbqly7cvafredrikstadtvstorenburgxn--mgbt3dhdxn--mgbtf8flekke" + - "fjordxn--mgbtx2bbcarrierxn--mgbx4cd0abbvieeexn--mix082fieldxn--m" + - "ix891figuerestaurantoyonoxn--mjndalen-64axn--mk0axindustriesteam" + - "famberkeleyxn--mk1bu44civilizationxn--mkru45iwchernovtsykkylvene" + - "togakushimotoganewjerseyxn--mlatvuopmi-s4axn--mli-tlapyatigorsko" + - "zakis-byxn--mlselv-iuaxn--moreke-juaxn--mori-qsakuhokkaidontexis" + - "teingeekpnxn--mosjen-eyatominamiawajikixn--mot-tlaquilancasterxn" + - "--mre-og-romsdal-qqbbtatarstanhsamsclubindalinkashiwaraxn--msy-u" + - "la0halsaintlouis-a-anarchistoirehabmerxn--mtta-vrjjat-k7afamilyc" + - "ompanycivilwarmanagementjxn--11b4c3dxn--muost-0qaxn--mxtq1misasa" + - "guris-leetrentino-a-adigexn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45b" + - "rj9choyodobashichikashukujitawaraxn--nit225kppspiegelxn--nmesjev" + - "uemie-tcbajddarchaeologyxn--nnx388axn--nodessakuragawaxn--nqv7fs" + - "00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeserveg" + - "ame-serverdalxn--nvuotna-hwaxn--nyqy26axn--o1achattanooganorilsk" + - "odjeffersonxn--o3cw4hammarfeastafricamagichernivtsiciliaxn--od0a" + - "lgxn--od0aq3bbvacationswatch-and-clockerxn--ogbpf8flesbergxn--op" + - "pegrd-ixaxn--ostery-fyatsukaratsuginamikatagamihoboleslawieclaim" + - "savannahgaxn--osyro-wuaxn--p1acfdxn--p1aixn--pbt977clickchristia" + - "nsburguideventsaves-the-whalessandria-trani-barletta-andriatrani" + - "barlettaandriaxn--pgbs0dhlxn--porsgu-sta26filateliaxn--pssu33lxn" + - "--pssy2uxn--q9jyb4clinicateringebudejjuedischesapeakebayernuremb" + - "ergrondarxn--qcka1pmcdonaldstorfjordxn--qqqt11misawaxn--qxamursk" + - "inderoyxn--rady-iraxn--rdal-poaxn--rde-ularvikrasnodarxn--rdy-0n" + - "abarixn--rennesy-v1axn--rhkkervju-01aflakstadaokagakibichuoxn--r" + - "holt-mragowoodsidexn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--ri" + - "sa-5narusawaxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-b" + - "yatsushiroxn--rny31hamurakamigoriginshintokushimaxn--rovu88bentl" + - "eyurihonjournalistjordalshalsenikiiyamanobeauxartsandcraftsamsun" + - "glassassinationalheritagematsubarakawagoemrxn--rros-granvindafjo" + - "rdxn--rskog-uuaxn--rst-0narutokorozawaxn--rsta-francaiseharaxn--" + - "ryken-vuaxn--ryrvik-byawaraxn--s-1fareastcoastaldefencexn--s9brj" + - "9cliniquenoharaxn--sandnessjen-ogbizhevskrasnoyarskommunalforbun" + - "dxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gratangenxn--skie" + - "rv-utazaskvolloabathsbclintonoshoesavonaplesaxoxn--skjervy-v1axn" + - "--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5narviikananporov" + - "noxn--slt-elabourxn--smla-hraxn--smna-gratis-a-bulls-fanxn--snas" + - "e-nraxn--sndre-land-0cbremangerxn--snes-poaxn--snsa-roaxn--sr-au" + - "rdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeppubol" + - "ognagasukepostfoldnavyatkakudamatsuepsonyoursidegreevje-og-hornn" + - "esanfranciscotlandrivelandrobaknoluoktainaibetsubamericanfamilyd" + - "smynasperschlesischesquarezzoologyeongnamegawakembuchikumagayaga" + - "wakkanaikawachinaganoharamcoalaheadjudaicaaarborteaches-yogasawa" + - "racingroks-theatreexn--srfold-byawatahamaxn--srreisa-q1axn--srum" + - "-grazxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqberndunlo" + - "ppacificartierxn--stre-toten-zcbstpetersburgxn--t60b56axn--tckwe" + - "atherchannelxn--tiq49xqyjewishartgalleryxn--tjme-hraxn--tn0agrin" + - "etbankzxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trgstad-r1axn-" + - "-trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atversicherungxn--uc0a" + - "y4axn--uist22hangoutsystemscloudcontrolapparshintomikasaharaxn--" + - "uisz3gxn--unjrga-rtaobaokinawashirosatobamagazinemurorangemologi" + - "callyngenglandxn--unup4yxn--uuwu58axn--vads-jraxn--vard-jraxn--v" + - "egrshei-c0axn--vermgensberater-ctbeskidynaliascoli-picenord-fron" + - "tierxn--vermgensberatung-pwbestbuyshousesangoceanographiquexn--v" + - "estvgy-ixa6oxn--vg-yiabcgxn--vgan-qoaxn--vgsy-qoa0jfkomforbamble" + - "borkarumaifarmsteadiskstationavigationavuotnakhodkanagawaustevol" + - "lavangenaturalsciencesnaturelles3-external-2xn--vgu402clothingui" + - "tarsbschokoladenxn--vhquvestfoldxn--vler-qoaxn--vre-eiker-k8axn-" + - "-vrggt-xqadxn--vry-yla5gxn--vuq861betainaboxfordeatnuorockartuzy" + - "usuharaxn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1cloudf" + - "rontdoorxn--wgbl6axn--xhq521bhartiffanynysafetysfjordunsagamihar" + - "axn--xkc2al3hye2axn--xkc2dl3a5ee0hannanmokuizumodernxn--y9a3aqua" + - "riumisconfusedxn--yer-znarvikredstonexn--yfro4i67oxn--ygarden-p1" + - "axn--ygbi2ammxn--45q11christmasakikugawatchesaudaxn--ystre-slidr" + - "e-ujbielawallonieruchomoscienceandindustrynikkoebenhavnikolaever" + - "bankashiwazakiyokawaraxn--zbx025dxn--zf0ao64axn--zf0avxn--4gbrim" + - "iningxn--zfr164biellaakesvuemieleccexperiaxz" +const text = "bifukagawalterbihorologybikedagestangeorgeorgiaxasnesoddenmarkha" + + "ngelskjakdnepropetrovskiervaapsteiermarkaragandabruzzoologicalvi" + + "nklein-addrammenuernberggfarmerseine12bilbaogakidsmynasushiobara" + + "gusartsalangeninohekinannestadray-dnsiskinkyotobetsumidatlantica" + + "tholicheltenham-radio-opencraftranagatorodoybillustrationinomiya" + + "konojosoyrorosalondonetskarpaczeladzjavald-aostarnbergladegreevj" + + "e-og-hornnesaltdalimitedraydnsupdaternopilawabioceanographiquebi" + + "rdartcenterprisesakikuchikuseikarugamvikaruizawabirkenesoddtange" + + "novaraumalopolskanlandrivelandrobaknoluoktachikawakembuchikumaga" + + "yagawakkanaibetsubamericanfamilydscloudcontrolledekafjordrudunsa" + + "lvadordalibabalatinord-aurdalvdalaskanittedallasalleasinglesuran" + + "certmgretagajobojinzais-a-candidatebirthplacebjarkoybjerkreimbal" + + "sfjordgcahcesuolocus-1bjugnirasakis-a-catererblockbustermezlglas" + + "sassinationalheritagematsubarakawagoebloombergbauernishiazais-a-" + + "celticsfanishigoddabloxcmsalzburgliwicebluedancebmoattachmentsam" + + "egawabmsamnangerbmwegroweibolzanordkappgafanquannefrankfurtjmaxx" + + "xboxenapponazure-mobilebnpparibaselburglobalashovhachinohedmarka" + + "rumaifarmsteadupontariomutashinais-a-chefarsundurbanamexnethnolo" + + "gybnrweirbonnishiharabookinglobodoes-itvedestrandurhamburglogowf" + + "ashionishiizunazukis-a-conservativefsnillfjordvrcambridgestonexu" + + "s-2bootsamsclubindalimoliserniaboschaefflerdalindashorokanaiebos" + + "tikasaokaminokawanishiaizubangebostonakijinsekikogentingloppenza" + + "ogashimadachicagoboatsamsungmbhartiffanybotanicalgardenishikatak" + + "ayamatta-varjjatjometlifeinsurancebotanicgardenishikatsuragithub" + + "usercontentjxfinitybotanybouncemerckmsdnipropetrovskjervoyagebou" + + "nty-fullensakerrypropertiesandvikcoromantovalle-d-aostatic-acces" + + "sanfranciscofreakunemurorangeiseiyoichippubetsubetsugaruhrboutiq" + + "uebecngminakamichiharabozentsujiiebplacedogawarabikomaezakirunor" + + "dlandvrdnsangoppdalindesnesanjournalismailillesandefjordyndns-at" + + "-workinggroupaleobrandywinevalleybrasiliabresciabrindisibenikebr" + + "istoloslocalhistorybritishcolumbialowiezachpomorskienishikawazuk" + + "amitondabayashiogamagoriziabroadcastlegallocalhostrodawaravennag" + + "asukebroadwaybroke-itkmaxxjaworznowtvalled-aostavangerbrokerbron" + + "noysundyndns-blogdnsannanishimerabrothermesaverdeatnurembergmode" + + "nakasatsunais-a-cpadualstackspace-to-rentalstomakomaibarabrowser" + + "safetymarketsannohelplfinancialivornobrumunddalombardiamondsanok" + + "ashibatakashimaseratis-a-cubicle-slavellinotteroybrunelasticbean" + + "stalkashiharabrusselsantabarbarabruxellesantacruzsantafedjeffers" + + "onishinomiyashironobryanskleppalermomahachijorpelandyndns-freebo" + + "x-ostrowwlkpmgmxn--0trq7p7nnishinoomotegobrynewhollandyndns-home" + + "dnsanukis-a-democratmpalmspringsakerbuskerudinewmexicodyn-vpnplu" + + "sterbuzenishinoshimattelefonicarbonia-iglesias-carboniaiglesiasc" + + "arboniabuzzpamperedchefastlylbaltimore-og-romsdalwaysdatabasebal" + + "langenoamishirasatochigiessensiositelemarkarateu-1bwhalingrimsta" + + "dyndns-ipirangaulardalombardynamisches-dnsaotomemergencyachtsapo" + + "dlasiellaktyubinskiptveterinairealtorlandyndns-mailomzaporizhzhe" + + "guris-a-designerimarumorimachidabzhitomirumalselvendrellorenskog" + + "ripescaravantaacondoshichinohealth-carereformitakeharaconference" + + "constructionconsuladoesntexistanbullensvanguardyndns1consultanth" + + "ropologyconsultingvolluroycontactoyotsukaidownloadynnsaskatchewa" + + "ncontemporaryarteducationalchikugodoharuovatoyouracontractorsken" + + "conventureshinodesashibetsuikinderoycookingchannelblagdenesnaase" + + "ralingenkainanaejrietisalatinabenonichernivtsiciliacoolkuszczytn" + + "ore-og-uvdalutskasuyameldaluxembourgrpanamacooperaunitenrightath" + + "omeftpanasonichernovtsykkylvenetogakushimotoganewspapercopenhage" + + "ncyclopedichirurgiens-dentistes-en-francecorsicagliaridagawarsza" + + "washingtondclkaszubycorvettevadsoccertificationcosenzagancosidns" + + "dojoetsuwanouchikujogaszkoladbrokesassaris-a-huntercostumedio-ca" + + "mpidano-mediocampidanomediocouchpotatofriesatxn--11b4c3dynv6coun" + + "ciluxurycouponsaudacoursesauheradynvpnchiryukyuragifuchungbukhar" + + "acq-acranbrookuwanalyticsavannahgacreditcardyroyrvikingruecredit" + + "unioncremonashgabadaddjambyluzerncrewiiheyakagecricketrzyncrimea" + + "st-kazakhstanangercrotonextdirectoystre-slidrettozawacrownprovid" + + "ercrsvparaglidinguitarsaves-the-whalessandria-trani-barletta-and" + + "riatranibarlettaandriacruisesavonaplesaxocryptonomichigangwoncui" + + "sinellahppiacenzakopanerairguardiannakadomarinebraskaunjargalsac" + + "eoculturalcentertainmentozsdeltaitogliattiresbschokoladencuneocu" + + "pcakecxn--12c1fe0bradescorporationcyberlevagangaviikanonjis-a-kn" + + "ightpointtokaizukamikitayamatsuris-a-landscapercymrussiacyonabar" + + "ulvikatowicecyouthdfcbankatsushikabeeldengeluidfidonnakamurataji" + + "mibuildingulenfieldfiguerestaurantraniandriabarlettatraniandriaf" + + "ilateliafilegearthachiojiyahoofilminamidaitomangotsukisosakitaga" + + "wafinalfinancefineartschwarzgwangjuifminamiechizenfinlandfinnoyf" + + "irebaseapparisor-fronfirenzefirestonefirmdaleirvikaufenfishingol" + + "ffanschweizwildlifedorainfracloudfrontdoorfitjarmeniafitnessettl" + + "ementranoyfjalerflesbergunmarburguovdageaidnuslivinghistoryflick" + + "ragerotikakamigaharaflightsciencecentersciencehistoryflirflogint" + + "ogurafloraflorencefloridavvesiidazaifudaigojomedizinhistorisches" + + "cientistoragefloripaderbornfloristanohatakahamangyshlakasamatsud" + + "ontexisteingeekautokeinoflorogerscjohnsonflowerscotlandflynnhuba" + + "mblefrakkestadiscountysnes3-sa-east-1fndfoodnetworkshoppingushik" + + "amifuranortonsbergxn--12co0c3b4evalleaostatoilfor-ourfor-someetn" + + "edalfor-theaterforexrothachirogatakahatakaishimogosenforgotdnscr" + + "apper-siteforli-cesena-forlicesenaforlikescandynamic-dnscrapping" + + "forsaleitungsenforsandasuolodingenfortmissoulair-traffic-control" + + "leyfortworthadanosegawaforuminamifuranofosneserveftparliamentran" + + "sportransurlfotaruis-a-lawyerfoxfordedyn-ip24freeboxoservegame-s" + + "erversailleservehalflifestylefreemasonryfreetlservehttparmafreib" + + "urgfreightcminamiiselectrapaniimimatakatoris-a-liberalfresenius-" + + "3fribourgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-ve" + + "nezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriu" + + "live-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiulia" + + "friulivgiuliafrlfroganservehumourfrognfrolandfrom-akrehamnfrom-a" + + "lfrom-arqhadselfiparocherkasyno-dserveirchitachinakagawassamukaw" + + "ataricohdatsunanjoburgriwataraidyndns-office-on-the-webcampobass" + + "ociatesapporofrom-azfrom-capebretonamiastapleserveminecraftravel" + + "channelfrom-collectionfrom-ctravelersinsurancefrom-dchitosetogit" + + "suldalotenkawafrom-defenseljordfrom-flanderservemp3from-gausdalf" + + "rom-higashiagatsumagoizumizakirkeneservep2parservepicservequakef" + + "rom-iafrom-idfrom-ilfrom-incheonfrom-kservesarcasmatartanddesign" + + "from-kyowariasahikawafrom-lajollamericanexpressexyfrom-maniwakur" + + "atextileksvikazofrom-mdfrom-megurokunohealthcareerservicesettsur" + + "geonshalloffamemorialfrom-microsoftbankazunofrom-mnfrom-modellin" + + "gfrom-msevastopolefrom-mtnfrom-nchloefrom-ndfrom-nefrom-nhktrdfr" + + "om-njcbnlfrom-nminamiizukamisatokamachintaifun-dnsaliasdaburfrom" + + "-nvalledaostavernfrom-nyfrom-ohkurafrom-oketohmannorth-kazakhsta" + + "nfrom-orfrom-padovaksdalfrom-pratohnoshoooshikamaishimodatefrom-" + + "rivnefrom-schoenbrunnfrom-sdfrom-tnfrom-txn--1ck2e1bananarepubli" + + "caseihichisobetsuitainairforcechirealminamiawajikibmdiscoveryomb" + + "ondishakotanavigationavoiitatebayashiibahcavuotnagaraholtaleniwa" + + "izumiotsukumiyamazonawsadodgemologicallyngenvironmentalconservat" + + "ionavuotnaklodzkodairassnasabaerobaticketselinogradultashkentata" + + "motors3-ap-northeast-2from-utazuerichardlillehammerfeste-ipartis" + + "-a-libertarianfrom-val-daostavalleyfrom-vtrentino-a-adigefrom-wa" + + "from-wielunnerfrom-wvallee-aosteroyfrom-wyfrosinonefrostalowa-wo" + + "lawafroyahikobeardubaiduckdnsevenassisicilyfstcgroupartnersewill" + + "iamhillfujiiderafujikawaguchikonefujiminohtawaramotoineppubologn" + + "akanotoddenfujinomiyadafujiokayamansionsfranziskanerdpolicefujis" + + "atoshonairtelecityeatsharis-a-linux-useranishiaritabashijonawate" + + "fujisawafujishiroishidakabiratoridefinimakanegasakindlegokasells" + + "-for-lessharpartshawaiijimarugame-hostrolekameokameyamatotakadaf" + + "ujitsurugashimaritimekeepingfujixeroxn--1ctwolominamatakkokamino" + + "yamaxunusualpersonfujiyoshidafukayabeatshellaspeziafukuchiyamada" + + "fukudominichocolatemasekashiwazakiyosatokashikiyosemitefukuis-a-" + + "llamarylandfukumitsubishigakirovogradoyfukuokazakiryuohaebarumin" + + "amimakis-a-musicianfukuroishikarikaturindalfukusakisarazurewebsi" + + "teshikagamiishibukawafukuyamagatakaharustkanoyakumoldeloittexasc" + + "olipicenoipifonynysaarlandfunabashiriuchinadafunagatakahashimama" + + "kishiwadafunahashikamiamakusatsumasendaisennangonohejis-a-nascar" + + "fanfundaciofuoiskujukuriyamanxn--1lqs03nfuosskoczowinbarcelonaga" + + "sakijobserverisignieznord-frontiereviewskrakowedeployomitanobihi" + + "rosakikamijimastronomy-gatewaybomloans3-ap-south-1furnituredston" + + "efurubiraquarelleborkangerfurudonostiaarpartyfurukawairtrafficho" + + "funatoriginsurecifedexhibitionishiokoppegardyndns-picsardegnamss" + + "koganeis-a-doctorayfusodegaurafussaikisofukushimaoris-a-nurserve" + + "bbshimojis-a-painteractivegarsheis-a-patsfanfutabayamaguchinomig" + + "awafutboldlygoingnowhere-for-moregontrailroadfuttsurugimperiafut" + + "urehostingfuturemailingfvgfyis-a-personaltrainerfylkesbiblackfri" + + "dayfyresdalhangoutsystemscloudfunctionshimokawahannanmokuizumode" + + "rnhannotaireshimokitayamahanyuzenhapmirhareidsbergenharstadharve" + + "stcelebrationhasamarcheapassagenshimonitayanagitlaborhasaminami-" + + "alpssells-itrentino-aadigehashbanghasudahasura-appassenger-assoc" + + "iationhasvikddielddanuorrikuzentakataiwanairlinedre-eikerhatogay" + + "aitakamoriokalmykiahatoyamazakitahiroshimarnardalhatsukaichikais" + + "eis-a-republicancerresearchaeologicaliforniahattfjelldalhayashim" + + "amotobungotakadapliernewjerseyhazuminobusellsyourhomegoodshimono" + + "sekikawahboehringerikehelsinkitakamiizumisanofidelitysvardollshi" + + "mosuwalkis-a-rockstarachowicehembygdsforbundhemneshimotsukehemse" + + "dalhepforgeherokussldheroyhgtvalleeaosteigenhigashichichibunkyon" + + "anaoshimageandsoundandvisionhigashihiroshimanehigashiizumozakita" + + "katakanabeautydalhigashikagawahigashikagurasoedahigashikawakitaa" + + "ikitakyushuaiahigashikurumeiwamarriottrentino-alto-adigehigashim" + + "atsushimarshallstatebankfhappouhigashimatsuyamakitaakitadaitoiga" + + "wahigashimurayamamotorcycleshimotsumahigashinarusembokukitamidor" + + "is-a-socialistmein-vigorgehigashinehigashiomihachimanchesterhiga" + + "shiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiy" + + "oshikawaminamiaikitamotosumitakagildeskaliszhigashitsunotogawahi" + + "gashiurausukitanakagusukumoduminamiminowahigashiyamatokoriyamana" + + "shifteditchyouripaviancarrierhigashiyodogawahigashiyoshinogaris-" + + "a-soxfanhiraizumisatohobby-sitehirakatashinagawahiranais-a-stude" + + "ntalhirarahiratsukagawahirayaizuwakamatsubushikusakadogawahistor" + + "ichouseshinichinanhitachiomiyaginankokubunjis-a-teacherkassymant" + + "echnologyhitachiotagooglecodespotrentino-altoadigehitraeumtgerad" + + "elmenhorstalbanshinjournalistjohnhjartdalhjelmelandholeckobierzy" + + "ceholidayhomeipfizerhomelinkhakassiahomelinuxn--1lqs71dhomeoffic" + + "ehomesecuritymaceratakaokaluganskolevangerhomesecuritypccwindmil" + + "lhomesenseminehomeunixn--1qqw23ahondahoneywellbeingzonehongopocz" + + "northwesternmutualhonjyoitakarazukamakurazakitashiobarahornindal" + + "horseoulminamiogunicomcastresistancehortendofinternet-dnshinjuku" + + "manohospitalhoteleshinkamigotoyohashimotoshimahotmailhoyangerhoy" + + "landetroitskydivinghumanitieshinshinotsurgeryhurdalhurumajis-a-t" + + "echietis-a-therapistoiahyllestadhyogoris-an-accountantshinshiroh" + + "yugawarahyundaiwafunehzchoseiroumuenchenishitosashimizunaminamia" + + "shigarajfkhmelnitskiyamashikejgorajlchoyodobashichikashukujitawa" + + "rajlljmpharmacienshiojirishirifujiedajnjcpgfoggiajoyokaichibahcc" + + "avuotnagareyamalborkdalpha-myqnapcloudapplebesbyglandjpmorganjpn" + + "jprshioyanaizujuniperjurkoshimizumakis-an-engineeringkoshunantok" + + "igawakosugekotohiradomainshirakofuefukihaboromskoguchikuzenkotou" + + "rakouhokutamakis-an-entertainerkounosupplieshiranukamogawakouyam" + + "ashikokuchuokouzushimasoykozagawakozakis-bykpnkppspdnshiraois-ce" + + "rtifieducatorahimeshimamateramochizukirakrasnodarkredirectmelhus" + + "cultureggio-calabriakristiansandcatshiraokanagawakristiansundkro" + + "dsheradkrokstadelvaldaostarostwodzislawindowshiratakahagivestbyk" + + "ryminamisanrikubetsupportrentino-sued-tirolkumatorinokumejimasud" + + "akumenanyokkaichiropractichristmasakikugawatchandclockasukabedzi" + + "n-the-bandaikawachinaganoharamcoachampionshiphoptobishimaizurugb" + + "ydgoszczecinemakeupowiathletajimabariakeisenbahnishiwakis-a-fina" + + "ncialadvisor-aurdalottokonamegatakasugais-a-geekgalaxykunisakis-" + + "foundationkunitachiarailwaykunitomigusukumamotoyamassa-carrara-m" + + "assacarraramassabusinessebytomaritimobarakunneppulawykunstsammlu" + + "ngkunstunddesignkuokgrouphdkureggio-emilia-romagnakatsugawakurga" + + "nkurobelaudiblebtimnetzkurogimilanokuroisoftwarendalenugkuromats" + + "unais-gonekurotakikawasakis-into-animelbournekushirogawakustanai" + + "s-into-carshintomikasaharakusupplykutchanelkutnokuzumakis-into-c" + + "artoonshinyoshitomiokamitsuekvafjordkvalsundkvamfamberkeleykvana" + + "ngenkvinesdalkvinnheradkviteseidskogkvitsoykwpspiegelkzmissilewi" + + "smillermisugitokorozawamitourismolancastermitoyoakemiuramiyazumi" + + "yotamanomjondalenmlbfanmonmouthagebostadmonstermonticellolmontre" + + "alestatefarmequipmentrentino-suedtirolmonza-brianzaporizhzhiamon" + + "za-e-della-brianzapposhishikuis-not-certifiedunetbankharkovanylv" + + "enicemonzabrianzaptokuyamatsusakahoginowaniihamatamakawajimaphil" + + "adelphiaareadmyblogsitemonzaebrianzaramonzaedellabrianzamoonscal" + + "exusdecorativeartshisognemoparachutingmordoviajessheiminamitanem" + + "oriyamatsushigemoriyoshimilitarymormoneymoroyamatsuuramortgagemo" + + "scowinnershisuifuelveruminamiuonumatsumotofukemoseushistorymosjo" + + "enmoskeneshitaramamosshizukuishimofusaitamatsukuris-savedmosvikn" + + "x-serveronakatombetsunndalmoteginozawaonsenmoviemovistargardmtpc" + + "hromedicaltanissettairamtranbymuenstermugithubcloudusercontentre" + + "ntinoa-adigemuikamishihoronobeauxartsandcraftshizuokananporovigo" + + "tpantheonsitemukochikushinonsenergymulhouservebeermunakatanemunc" + + "ieszynmuosattemuphilatelymurmanskolobrzegersundmurotorcraftrenti" + + "noaadigemusashimurayamatsuzakis-slickhersonmusashinoharamuseetre" + + "ntinoalto-adigemuseumverenigingmusicargodaddynaliascoli-picenogi" + + "ftshoujis-uberleetrentino-stirolmutsuzawamy-vigorlicemy-wanggouv" + + "icenzamyactivedirectorymyasustor-elvdalmycdn77-securechtrainingm" + + "ydissentrentinoaltoadigemydrobofagemydshowamyeffectrentinos-tiro" + + "lmyfirewallonieruchomoscienceandindustrynmyfritzmyftpaccesshowti" + + "meteorapphilipsynology-diskstationmyfusionmyhome-serverrankoshig" + + "ayanagawamykolaivaporcloudmymailermymediapchryslermyokohamamatsu" + + "damypepsongdalenviknakanojohanamakinoharamypetshriramlidlugoleka" + + "gaminoduminamiyamashirokawanabelembroideryggeelvincklabudhabikin" + + "okawabarthagakhanamigawamyphotoshibajddarchaeologyeongnamegawalb" + + "rzycharternidmypsxn--30rr7ymysecuritycamerakermyshopblocksienara" + + "shinomytis-a-bookkeeperugiamyvnchungnamdalseidfjordyndns-remotew" + + "dyndns-serverdalouvreggioemiliaromagnakayamatsumaebashikshacknet" + + "oyookanmakiwakunigamidsundyndns-weberlincolnissandnessjoenissayo" + + "koshibahikariwanumatakazakis-a-greenissedalowiczest-le-patrondhe" + + "immobilienisshingugepicturesilkomaganepiemontepilotsimple-urlpim" + + "ientaketomisatolgapinkomakiyosumy-routerpioneerpippuphonefossigd" + + "alpiszpittsburghofauskedsmokorsetagayasells-for-unzenpiwatepizza" + + "pkomatsushimashikizunokunimihoboleslawiechristiansburgroks-thisa" + + "yamanobeokakudamatsueplanetariuminanoplantationplantsirdalplatfo" + + "rmshangrilanciaplaystationplazaplchurchaseljeepostfoldnavyplumbi" + + "ngopmnpodzonepohlpoivronpokerpokrovskomforbarclays3-us-gov-west-" + + "1politiendapolkowicepoltavalle-aostathellezajskommunalforbundpom" + + "orzeszowioslingpordenonepornporsangerporsanguidellogliastradingp" + + "orsgrunnanpoznanpraxis-a-bruinsfanprdpreservationpresidioprgmrpr" + + "imeloyalistockholmestrandprincipeprivatizehealthinsuranceprochow" + + "iceproductionslupskommuneprofbsbxn--12cfi8ixb8lvivano-frankivska" + + "tsuyamasfjordenprogressivegasiapromombetsurfbx-oscholarshipschoo" + + "lpropertyprotectionprotonetrentinosud-tirolprudentialpruszkowitd" + + "komonoprzeworskogptplusgardenpvtrentinosudtirolpwcirclegnicafede" + + "rationiyodogawapzqldqponqslgbtrentinosued-tirolquicksytesnoasait" + + "omobellevuelosangelesjaguarchitecturealtychyattorneyagawalesundq" + + "uipelementsokanazawaqvcircustomerstuff-4-salestufftoread-booksne" + + "solognestuttgartritonsusakis-very-evillagesusonosuzakaneyamazoes" + + "uzukaniepcesuzukis-very-goodhandsonsvalbardunloppacificitadelive" + + "rysveiosvelvikongsbergsvizzeraswedenswidnicartierswiebodzindiana" + + "polis-a-bloggerswiftcoversicherungswinoujscienceandhistoryswissh" + + "ikis-very-nicesynology-dsolundbeckomorotsukamiokamikoaniikappugl" + + "iatushuissier-justicetuvalle-daostaticsomatuxfamilytwmailvennesl" + + "askerrylogisticsomnaritakurashikis-very-badajozoravestfoldvestne" + + "soovestre-slidreamhostersopotrentinosuedtirolvestre-totennishiaw" + + "akuravestvagoyvevelstadvibo-valentiavibovalentiavideovillaskimit" + + "subatamicable-modembetsukuis-very-sweetpeppervinnicartoonartdeco" + + "ffeedbackplaneappspotagervinnytsiavipsinaappiagetmyiphoenixn--32" + + "vp30haibarakitahatakamatsukawavirginiavirtualvirtueeldomeindianm" + + "arketingvirtuelvisakegawavistaprinternationalfirearmsor-odalvite" + + "rboltrogstadvivoldavixn--3bst00minnesotaketakatsukis-into-gamess" + + "inatsukigatakasagotembaixadavlaanderenvladikavkazimierz-dolnyvla" + + "dimirvlogoipictetrentinostirolvolkswagentsor-varangervologdansko" + + "ninjamisonvolvolkenkundenvolyngdalvossevangenvotevotingvotoyonak" + + "agyokutoursorfoldwloclawekonskowolayangroupharmacyshirahamatonbe" + + "tsurnadalwmflabsorreisahayakawakamiichikawamisatotalworldworse-t" + + "handawowithgoogleapisa-hockeynutsiracusakatakinouewritesthisblog" + + "sytewroclawithyoutubeneventoeidsvollwtcitichernigovernmentoyonow" + + "tfbxoschulewuozuwwwiwatsukiyonowruzhgorodeowzmiuwajimaxn--45brj9" + + "civilaviationxn--45q11civilisationxn--4gbriminingxn--4it168dxn--" + + "4it797konyveloftrentino-sudtirolxn--4pvxs4allxn--54b7fta0ccivili" + + "zationxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilwarmanageme" + + "ntoyosatoyakokonoexn--5rtq34kooris-an-anarchistoricalsocietyxn--" + + "5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986" + + "b3xlxn--7t0a264claimsarlucaniaxn--80adxhksortlandxn--80ao21axn--" + + "80aqecdr1axn--80asehdbarreauctionflfanfshostrowiecasertaipeiheij" + + "iiyamanouchikuhokuryugasakitaurayasudaukraanghkeymachineustarhub" + + "alsanagochihayaakasakawaharanzanpachigasakicks-assedicasadelamon" + + "edatingjemnes3-ap-southeast-2xn--80aswgxn--80audnedalnxn--8ltr62" + + "kopervikhmelnytskyivaolbia-tempio-olbiatempioolbialystokkepnogat" + + "aijis-an-actresshintokushimaxn--8pvr4uxn--8y0a063axn--90a3academ" + + "y-firewall-gatewayxn--90aishobaraomoriguchiharahkkeravjuedisches" + + "apeakebayernrtromsakakinokiaxn--90azhytomyrxn--9dbhblg6dietcimdb" + + "arrel-of-knowledgeologyonagoyaurskog-holandroverhalla-speziaerop" + + "ortalaheadjudaicaaarborteaches-yogasawaracingroks-theatree164xn-" + + "-9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byandexn--3d" + + "s443gxn--asky-iraxn--aurskog-hland-jnbarrell-of-knowledgeometre-" + + "experts-comptables3-us-west-1xn--avery-yuasakuhokkaidoomdnshome-" + + "webservercellikes-piedmontblancomeeresorumincommbankmpspbarclayc" + + "ards3-us-east-2xn--b-5gaxn--b4w605ferdxn--bck1b9a5dre4cldmailucc" + + "apitalonewportlligatoyotaris-a-gurulsandoyxn--bdddj-mrabdxn--bea" + + "ralvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7ax" + + "n--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsurreyxn--bj" + + "ddar-ptamayufuettertdasnetzxn--blt-elabourxn--bmlo-graingerxn--b" + + "od-2naroyxn--brnny-wuaccident-investigation-aptibleaseating-orga" + + "nicbcn-north-1xn--brnnysund-m8accident-prevention-webhopenairbus" + + "antiquest-a-la-maisondre-landebudapest-a-la-masionionjukudoyamag" + + "entositelekommunikationthewifiat-band-campaniaxn--brum-voagatrom" + + "sojampagefrontapphotographysioxn--btsfjord-9zaxn--c1avgxn--c2br7" + + "gxn--c3s14mintelligencexn--cck2b3barsyonlinewhampshirebungoonord" + + "-odalazioceanographics3-us-west-2xn--cg4bkis-with-thebandovre-ei" + + "kerxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumisakis-leetrentino" + + "-s-tirollagrigentomologyeongbukharkivgucciprianiigataishinomakim" + + "obetsuliguriaxn--comunicaes-v6a2oxn--correios-e-telecomunicaes-g" + + "hc29axn--czr694bashkiriaustevollarvikarasjohkamiminers3-ca-centr" + + "al-1xn--czrs0trusteexn--czru2dxn--czrw28basilicataniaustinnatura" + + "lsciencesnaturelles3-eu-central-1xn--d1acj3basketballfinanzgorau" + + "straliaisondriodejaneirochesterepbodynathomebuiltatarantottoribe" + + "staddnskingjerdrumckinseyokosukanzakiwienaturbruksgymnaturhistor" + + "isches3-eu-west-1xn--d1alfaromeoxn--d1atrvarggatroandinosaureise" + + "nxn--d5qv7z876clickasumigaurawa-mazowszextraspacekitagatajirissa" + + "gamiharaxn--davvenjrga-y4axn--djrs72d6uyxn--djty4koryokamikawane" + + "honbetsurutaharaxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry" + + "-iraxn--e1a4clinichernihivanovodkagoshimalvikashiwaraxn--eckvdtc" + + "9dxn--efvn9southcarolinazawaxn--efvy88hair-surveillancexn--ehqz5" + + "6nxn--elqq16hakatanoshiroomuraxn--estv75gxn--eveni-0qa01gaxn--f6" + + "qx53axn--fct429kosaigawaxn--fhbeiarnxn--finny-yuaxn--fiq228c5hso" + + "uthwestfalenxn--fiq64batodayonaguniversityoriikariyaltakasakiyok" + + "awaraustrheimatunduhrennesoyokoteastcoastaldefencebinagisochildr" + + "ensgardenatuurwetenschappenaumburgjerstadotsuruokakegawaetnagaha" + + "maroygardenebakkeshibechambagriculturennebudejjudygarlandd-dnsfo" + + "r-better-thanawawdev-myqnapcloudcontrolapplinzi234xn--fiqs8sowax" + + "n--fiqz9spjelkavikomvuxn--2m4a15exn--fjord-lraxn--fjq720axn--fl-" + + "ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn--frde-grandrapidspread" + + "bettingxn--frna-woaraisaijotrysiljanxn--frya-hraxn--fzc2c9e2clin" + + "iquenoharaxn--fzys8d69uvgmailxn--g2xx48clintonoshoesarpsborgrond" + + "arxn--gckr3f0fedorapeopleirfjordxn--gecrj9clothingrongaxn--ggavi" + + "ika-8ya47hakodatexn--gildeskl-g0axn--givuotna-8yasakaiminatoyone" + + "zawaxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050isleofmand" + + "alxn--gmqw5axn--h-2failxn--h1aeghakonexn--h2brj9cnsarufutsunomiy" + + "awakasaikaitakoelnxn--h3cuzk1digitalxn--hbmer-xqaxn--hcesuolo-7y" + + "a35batsfjordivtasvuodnakaiwamizawauthordalandroiddnss3-eu-west-2" + + "xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta-s4acctulangevagrarbo" + + "retumbriaxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqa" + + "xn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr" + + "513nxn--indery-fyasugissmarterthanyouxn--io0a7iwchoshibuyachiyod" + + "avvenjargapartmentsardiniaxn--j1aefedoraprojectrani-andria-barle" + + "tta-trani-andriaxn--j1amhakubaghdadxn--j6w193gxn--jlq61u9w7bauha" + + "usposts-and-telecommunicationsncfdivttasvuotnakamagayahababyklec" + + "lercasinordre-landiyoshiokaracoldwarmiamihamadautomotivecoalipay" + + "okozebinorfolkebibleikangereportateshinanomachimkentateyamagroce" + + "rybnikahokutobamaintenancebetsukubank12xn--jlster-byasuokanraxn-" + + "-jrpeland-54axn--jvr189misasaguris-lostre-toteneis-an-actorxn--k" + + "7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--kl" + + "bu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--3e0b707exn--ko" + + "luokta-7ya57hakuis-a-photographerokuappasadenamsosnowiechonanbui" + + "lderschmidtre-gauldalottexn--kprw13dxn--kpry57dxn--kpu716fermoda" + + "lenxn--kput3ixn--krager-gyatomitamamuraxn--kranghke-b0axn--krdsh" + + "erad-m8axn--krehamn-dxaxn--krjohka-hwab49jeonnamerikawauexn--ksn" + + "es-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanumazuryxn--kvnangen-k0a" + + "xn--l-1fairwindspydebergxn--l1accentureklamborghiniizaxn--lahead" + + "ju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leaga" + + "viika-52bbcateringebugattipschlesisches3-website-ap-northeast-1x" + + "n--lesund-huaxn--lgbbat1ad8jetztrentino-sud-tirolxn--lgrd-poacnt" + + "oyotomiyazakis-a-hard-workerxn--lhppi-xqaxn--linds-pramericanart" + + "unesolutionsokndalxn--lns-qlansrlxn--loabt-0qaxn--lrdal-sraxn--l" + + "renskog-54axn--lt-liacolonialwilliamsburgrossetouchijiwadell-ogl" + + "iastraderxn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--m" + + "erker-kuaxn--mgb2ddesrtrentoyokawaxn--mgb9awbferraraxn--mgba3a3e" + + "jtunkongsvingerxn--mgba3a4f16axn--mgba3a4franamizuholdingsmilelx" + + "n--mgba7c0bbn0axn--mgbaakc7dvferrarittogoldpoint2thisamitsukexn-" + + "-mgbaam7a8hakusandiegoodyearxn--mgbab2bdxn--mgbai9a5eva00bbtatto" + + "olsztynsettlers3-website-ap-southeast-1xn--mgbai9azgqp6jevnakers" + + "huscountryestateofdelawarezzoologyxn--mgbayh7gpagespeedmobilizer" + + "oxn--mgbb9fbpobanazawaxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzd" + + "oxn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbi4ecexposedxn--mgbpl" + + "2fhskodjejuegoshikiminokamoenairportland-4-salernoboribetsucksrv" + + "areserveblogspotrevisohughesolarssonxn--mgbqly7c0a67fbcoloradopl" + + "ateaudioxn--mgbqly7cvafredrikstadtvstordalxn--mgbt3dhdxn--mgbtf8" + + "flatangerxn--mgbtx2bbvacationswatch-and-clockerhcloudns3-website" + + "-ap-southeast-2xn--mgbx4cd0abbotturystykannamifunexn--mix082ferr" + + "eroticanonoichinomiyakexn--mix891fetsundxn--mjndalen-64axn--mk0a" + + "xindustriesteambulancexn--mk1bu44columbusheyxn--mkru45ixn--mlatv" + + "uopmi-s4axn--mli-tlanxesstorehabmerxn--mlselv-iuaxn--moreke-juax" + + "n--mori-qsakuragawaxn--mosjen-eyawaraxn--mot-tlapyatigorskypexn-" + + "-mre-og-romsdal-qqbentleyukinfinitintuitaxihuanhlfanhs3-website-" + + "eu-west-1xn--msy-ula0haldenxn--mtta-vrjjat-k7afamilycompanycommu" + + "nitysfjordyndns-wikinkobayashikaoirminamibosogndalucernexn--muos" + + "t-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--3oq18vl8" + + "pn36axn--nit225kosakaerodromegallupinbarefootballooningjovikarat" + + "suginamikatagamiharuconnectatsunobiraugustowadaegubs3-ap-southea" + + "st-1xn--nmesjevuemie-tcbalestrandabergamoarekexn--nnx388axn--nod" + + "exn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery" + + "-byaeservecounterstrikexn--nvuotna-hwaxn--nyqy26axn--o1achattano" + + "oganordreisa-geekoseis-an-artisteinkjerusalemrxn--o3cw4halsaintl" + + "ouis-a-anarchistoiredumbrellanbibaidarxn--o3cyx2axn--od0algxn--o" + + "d0aq3beppublishproxyzgorzeleccolognewyorkshirecipesaro-urbino-pe" + + "sarourbinopesaromasvuotnaharimamurogawatches3-website-sa-east-1x" + + "n--ogbpf8flekkefjordxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osy" + + "ro-wuaxn--p1acfgujolsterxn--p1aixn--pbt977comobilyxn--pgbs0dhlxn" + + "--porsgu-sta26fhvalerxn--pssu33lxn--pssy2uxn--q9jyb4comparemarke" + + "rryhotelsasayamaxn--qcka1pmcdonaldstorfjordxn--qqqt11misconfused" + + "xn--qxamuneuestorjelenia-goraxn--rady-iraxn--rdal-poaxn--rde-ula" + + "quilancashireggiocalabriaxn--rdy-0nabarixn--rennesy-v1axn--rhkke" + + "rvju-01aflakstadaokagakibichuoxn--rholt-mragowoodsidexn--rhqv96g" + + "xn--rht27zxn--rht3dxn--rht61exn--risa-5narusawaxn--risr-iraxn--r" + + "land-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31hammarfeastafricap" + + "etownnews-stagingxn--rovu88bernuorockartuzyukuhashimoichinosekig" + + "aharautoscanadaejeonbukarasjokarasuyamarylhurstjordalshalsenaust" + + "dalavagiskebizenakaniikawatanaguramusementarnobrzegyptianaturalh" + + "istorymuseumcenterepaircraftarumizusawabogadocscbgdyniabkhaziama" + + "llamagazineat-url-o-g-i-nativeamericanantiques3-ap-northeast-1ka" + + "ppchizippodhaleangaviikadenadexetereit3l3p0rtargets-itargiving12" + + "000emmafanconagawakayamadridvagsoyericssonyoursidealerimo-i-rana" + + "amesjevuemielno-ip6xn--rros-granvindafjordxn--rskog-uuaxn--rst-0" + + "narutokyotangovtuscanyxn--rsta-francaiseharaxn--ryken-vuaxn--ryr" + + "vik-byaxn--s-1faithruherecreationxn--s9brj9compute-1xn--sandness" + + "jen-ogbizxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gratangen" + + "xn--skierv-utazaskoyabearalvahkihokumakogengerdalcestpetersburgx" + + "n--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5" + + "narviikamisunagawaxn--slt-elabbvieeexn--smla-hraxn--smna-gratis-" + + "a-bulls-fanxn--snase-nraxn--sndre-land-0cbremangerxn--snes-poaxn" + + "--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-" + + "varanger-ggbeskidyn-o-saurlandes3-website-us-east-1xn--srfold-by" + + "axn--srreisa-q1axn--srum-grazxn--stfold-9xaxn--stjrdal-s1axn--st" + + "jrdalshalsen-sqbestbuyshouses3-website-us-west-1xn--stre-toten-z" + + "cbstreamsterdamnserverbaniaxn--t60b56axn--tckweatherchannelxn--t" + + "iq49xqyjewelryxn--tjme-hraxn--tn0agrinet-freakstudioxn--tnsberg-" + + "q1axn--tor131oxn--trany-yuaxn--trgstad-r1axn--trna-woaxn--troms-" + + "zuaxn--tysvr-vraxn--uc0atvaroyxn--uc0ay4axn--uist22hamurakamigor" + + "is-a-playerxn--uisz3gxn--unjrga-rtaobaokinawashirosatochiokinosh" + + "imalatvuopmiasakuchinotsuchiurakawakuyabukievenestudyndns-at-hom" + + "edepotenzamamicrolightingxn--unup4yxn--uuwu58axn--vads-jraxn--va" + + "rd-jraxn--vegrshei-c0axn--vermgensberater-ctbetainaboxfusejnyuri" + + "honjoyentgoryusuharaveroykenglandds3-external-1xn--vermgensberat" + + "ung-pwbieigersundnpalaceu-3utilitiesquare7xn--vestvgy-ixa6oxn--v" + + "g-yiabcgxn--vgan-qoaxn--vgsy-qoa0jewishartgalleryxn--vgu402compu" + + "terhistoryofscience-fictionxn--vhquvbargainstitutelevisionayorov" + + "nobninskarelianceu-2xn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadx" + + "n--vry-yla5gxn--vuq861bielawalmartjeldsundrangedalillyusuisserve" + + "exchangevents3-website-us-west-2xn--w4r85el8fhu5dnraxn--w4rs40lx" + + "n--wcvs22dxn--wgbh1comsecuritytacticsaseboknowsitallukowhoswhokk" + + "sundyndns-workisboringroundhandlingroznyxn--wgbl6axn--xhq521biel" + + "laakesvuemielecceverbankarlsoyuufcfanikinuyamashinashikitchenikk" + + "oebenhavnikolaevennodessagaeroclubmedecincinnationwidealstahauge" + + "sunderseaportsinfolldalabamagasakishimabarackmazerbaijan-mayendo" + + "ftheinternetflixilovecollegefantasyleaguernseyuzawavocatanzarowe" + + "ddingjesdalavangenaval-d-aosta-valleyolasitehimejibigawaskvolloa" + + "bathsbc66xn--xkc2al3hye2axn--xkc2dl3a5ee0hangglidingxn--y9a3aqua" + + "riumishimatsunoxn--yer-znarvikosherbrookegawaxn--yfro4i67oxn--yg" + + "arden-p1axn--ygbi2ammxn--3pxu8konsulatrobeepilepsydneyxn--ystre-" + + "slidre-ujbieszczadygeyachimataikikonaioirasebastopologyeonggieht" + + "avuoatnagaivuotnagaokakyotambabia-goracleaningatlantabuseekloges" + + "t-mon-blogueurovisionikonantankarmoyxn--zbx025dxn--zf0ao64axn--z" + + "f0avxn--42c2d9axn--zfr164bievatmallorcadaquesakurainvestmentsaky" + + "otanabellunorddalimanowarudavoues3-fips-us-gov-west-1xperiaxz" // nodes is the list of nodes. Each node is represented as a uint32, which // encodes the node's children, wildcard bit and node type (as an index into // the children array), ICANN bit and text. // -// In the //-comment after each node's data, the nodes indexes of the children -// are formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The // nodeType is printed as + for normal, ! for exception, and o for parent-only // nodes that have children but don't match a domain label in their own right. // An I denotes an ICANN domain. @@ -466,7869 +489,8268 @@ const text = "bieszczadygeyachimataipeigersundupontarioddabievatmallorcadaques" // [15 bits] text index // [ 6 bits] text length var nodes = [...]uint32{ - 0x0038da43, // n0x0000 c0x0000 (---------------) + I aaa - 0x0034e6c4, // n0x0001 c0x0000 (---------------) + I aarp - 0x00267446, // n0x0002 c0x0000 (---------------) + I abarth - 0x002691c3, // n0x0003 c0x0000 (---------------) + I abb - 0x002691c6, // n0x0004 c0x0000 (---------------) + I abbott - 0x00364906, // n0x0005 c0x0000 (---------------) + I abbvie - 0x00399543, // n0x0006 c0x0000 (---------------) + I abc - 0x00327d04, // n0x0007 c0x0000 (---------------) + I able - 0x00314907, // n0x0008 c0x0000 (---------------) + I abogado - 0x00267088, // n0x0009 c0x0000 (---------------) + I abudhabi - 0x01a00342, // n0x000a c0x0006 (n0x0607-n0x060d) + I ac - 0x00308b87, // n0x000b c0x0000 (---------------) + I academy - 0x00352bc9, // n0x000c c0x0000 (---------------) + I accenture - 0x002ac44a, // n0x000d c0x0000 (---------------) + I accountant - 0x002ac44b, // n0x000e c0x0000 (---------------) + I accountants - 0x002358c3, // n0x000f c0x0000 (---------------) + I aco - 0x003466c6, // n0x0010 c0x0000 (---------------) + I active - 0x0023af45, // n0x0011 c0x0000 (---------------) + I actor - 0x01e001c2, // n0x0012 c0x0007 (n0x060d-n0x060e) + I ad - 0x00211344, // n0x0013 c0x0000 (---------------) + I adac - 0x002675c3, // n0x0014 c0x0000 (---------------) + I ads - 0x002a1a05, // n0x0015 c0x0000 (---------------) + I adult - 0x022075c2, // n0x0016 c0x0008 (n0x060e-n0x0616) + I ae - 0x00254583, // n0x0017 c0x0000 (---------------) + I aeg - 0x02632584, // n0x0018 c0x0009 (n0x0616-n0x066d) + I aero - 0x0026f685, // n0x0019 c0x0000 (---------------) + I aetna - 0x02a074c2, // n0x001a c0x000a (n0x066d-n0x0672) + I af - 0x0036be0e, // n0x001b c0x0000 (---------------) + I afamilycompany - 0x00250ec3, // n0x001c c0x0000 (---------------) + I afl - 0x003727c6, // n0x001d c0x0000 (---------------) + I africa - 0x003727cb, // n0x001e c0x0000 (---------------) + I africamagic - 0x02e01b42, // n0x001f c0x000b (n0x0672-n0x0677) + I ag - 0x00282dc7, // n0x0020 c0x0000 (---------------) + I agakhan - 0x0023dd06, // n0x0021 c0x0000 (---------------) + I agency - 0x03200502, // n0x0022 c0x000c (n0x0677-n0x067b) + I ai - 0x002140c3, // n0x0023 c0x0000 (---------------) + I aig - 0x002140c4, // n0x0024 c0x0000 (---------------) + I aigo - 0x002b8886, // n0x0025 c0x0000 (---------------) + I airbus - 0x0033a648, // n0x0026 c0x0000 (---------------) + I airforce - 0x002850c6, // n0x0027 c0x0000 (---------------) + I airtel - 0x002d4804, // n0x0028 c0x0000 (---------------) + I akdn - 0x03600cc2, // n0x0029 c0x000d (n0x067b-n0x0682) + I al - 0x0032cac9, // n0x002a c0x0000 (---------------) + I alfaromeo - 0x00328047, // n0x002b c0x0000 (---------------) + I alibaba - 0x002beb86, // n0x002c c0x0000 (---------------) + I alipay - 0x00340b09, // n0x002d c0x0000 (---------------) + I allfinanz - 0x00203808, // n0x002e c0x0000 (---------------) + I allstate - 0x00396044, // n0x002f c0x0000 (---------------) + I ally - 0x0021ec86, // n0x0030 c0x0000 (---------------) + I alsace - 0x00206946, // n0x0031 c0x0000 (---------------) + I alstom - 0x03a01ec2, // n0x0032 c0x000e (n0x0682-n0x0683) + I am - 0x00246e4f, // n0x0033 c0x0000 (---------------) + I americanexpress - 0x0038bc4e, // n0x0034 c0x0000 (---------------) + I americanfamily - 0x00207d44, // n0x0035 c0x0000 (---------------) + I amex - 0x00365f85, // n0x0036 c0x0000 (---------------) + I amfam - 0x002690c5, // n0x0037 c0x0000 (---------------) + I amica - 0x002b4cc9, // n0x0038 c0x0000 (---------------) + I amsterdam - 0x00243109, // n0x0039 c0x0000 (---------------) + I analytics - 0x00321907, // n0x003a c0x0000 (---------------) + I android - 0x00351846, // n0x003b c0x0000 (---------------) + I anquan - 0x002c5b43, // n0x003c c0x0000 (---------------) + I anz - 0x03e02e82, // n0x003d c0x000f (n0x0683-n0x0689) + I ao - 0x0026b183, // n0x003e c0x0000 (---------------) + I aol - 0x0023ea0a, // n0x003f c0x0000 (---------------) + I apartments - 0x00212343, // n0x0040 c0x0000 (---------------) + I app - 0x00271185, // n0x0041 c0x0000 (---------------) + I apple - 0x00200ec2, // n0x0042 c0x0000 (---------------) + I aq - 0x00286c89, // n0x0043 c0x0000 (---------------) + I aquarelle - 0x04200942, // n0x0044 c0x0010 (n0x0689-n0x0692) + I ar - 0x00205844, // n0x0045 c0x0000 (---------------) + I arab - 0x0038d586, // n0x0046 c0x0000 (---------------) + I aramco - 0x002b1a85, // n0x0047 c0x0000 (---------------) + I archi - 0x00349fc4, // n0x0048 c0x0000 (---------------) + I army - 0x04a29144, // n0x0049 c0x0012 (n0x0693-n0x0699) + I arpa - 0x0023a104, // n0x004a c0x0000 (---------------) + I arte - 0x04e03b02, // n0x004b c0x0013 (n0x0699-n0x069a) + I as - 0x0034e404, // n0x004c c0x0000 (---------------) + I asda - 0x0030de04, // n0x004d c0x0000 (---------------) + I asia - 0x00338e0a, // n0x004e c0x0000 (---------------) + I associates - 0x05200482, // n0x004f c0x0014 (n0x069a-n0x06a1) + I at - 0x002535c7, // n0x0050 c0x0000 (---------------) + I athleta - 0x0030b808, // n0x0051 c0x0000 (---------------) + I attorney - 0x05a06142, // n0x0052 c0x0016 (n0x06a2-n0x06b4) + I au - 0x003211c7, // n0x0053 c0x0000 (---------------) + I auction - 0x00234404, // n0x0054 c0x0000 (---------------) + I audi - 0x002b9fc7, // n0x0055 c0x0000 (---------------) + I audible - 0x00234405, // n0x0056 c0x0000 (---------------) + I audio - 0x0035e6c7, // n0x0057 c0x0000 (---------------) + I auspost - 0x003216c6, // n0x0058 c0x0000 (---------------) + I author - 0x00263084, // n0x0059 c0x0000 (---------------) + I auto - 0x00329185, // n0x005a c0x0000 (---------------) + I autos - 0x002d76c7, // n0x005b c0x0000 (---------------) + I avianca - 0x06a01bc2, // n0x005c c0x001a (n0x06c2-n0x06c3) + I aw - 0x002f4383, // n0x005d c0x0000 (---------------) + I aws - 0x0021db82, // n0x005e c0x0000 (---------------) + I ax - 0x0035f2c3, // n0x005f c0x0000 (---------------) + I axa - 0x06e06342, // n0x0060 c0x001b (n0x06c3-n0x06cf) + I az - 0x00280f05, // n0x0061 c0x0000 (---------------) + I azure - 0x07202e42, // n0x0062 c0x001c (n0x06cf-n0x06da) + I ba - 0x002d3844, // n0x0063 c0x0000 (---------------) + I baby - 0x0027ba05, // n0x0064 c0x0000 (---------------) + I baidu - 0x00207c87, // n0x0065 c0x0000 (---------------) + I banamex - 0x002e2cce, // n0x0066 c0x0000 (---------------) + I bananarepublic - 0x00203f04, // n0x0067 c0x0000 (---------------) + I band - 0x00203a04, // n0x0068 c0x0000 (---------------) + I bank - 0x00205803, // n0x0069 c0x0000 (---------------) + I bar - 0x002c3889, // n0x006a c0x0000 (---------------) + I barcelona - 0x002dd5cb, // n0x006b c0x0000 (---------------) + I barclaycard - 0x00306688, // n0x006c c0x0000 (---------------) + I barclays - 0x00309948, // n0x006d c0x0000 (---------------) + I barefoot - 0x00317b08, // n0x006e c0x0000 (---------------) + I bargains - 0x0022b948, // n0x006f c0x0000 (---------------) + I baseball - 0x0034094a, // n0x0070 c0x0000 (---------------) + I basketball - 0x0035e5c7, // n0x0071 c0x0000 (---------------) + I bauhaus - 0x00378d46, // n0x0072 c0x0000 (---------------) + I bayern - 0x07669202, // n0x0073 c0x001d (n0x06da-n0x06e4) + I bb - 0x003643c3, // n0x0074 c0x0000 (---------------) + I bbc - 0x0036a4c3, // n0x0075 c0x0000 (---------------) + I bbt - 0x00373304, // n0x0076 c0x0000 (---------------) + I bbva - 0x00399583, // n0x0077 c0x0000 (---------------) + I bcg - 0x00233703, // n0x0078 c0x0000 (---------------) + I bcn - 0x01719882, // n0x0079 c0x0005 (---------------)* o I bd - 0x07a03c42, // n0x007a c0x001e (n0x06e4-n0x06e6) + I be - 0x0021a3c5, // n0x007b c0x0000 (---------------) + I beats - 0x0024d506, // n0x007c c0x0000 (---------------) + I beauty - 0x002cf244, // n0x007d c0x0000 (---------------) + I beer - 0x0037eec7, // n0x007e c0x0000 (---------------) + I bentley - 0x00356486, // n0x007f c0x0000 (---------------) + I berlin - 0x00227e44, // n0x0080 c0x0000 (---------------) + I best - 0x003986c7, // n0x0081 c0x0000 (---------------) + I bestbuy - 0x00216483, // n0x0082 c0x0000 (---------------) + I bet - 0x07f5a6c2, // n0x0083 c0x001f (n0x06e6-n0x06e7) + I bf - 0x08303f42, // n0x0084 c0x0020 (n0x06e7-n0x070c) + I bg - 0x087117c2, // n0x0085 c0x0021 (n0x070c-n0x0711) + I bh - 0x0039f706, // n0x0086 c0x0000 (---------------) + I bharti - 0x08a00002, // n0x0087 c0x0022 (n0x0711-n0x0716) + I bi - 0x00355ec5, // n0x0088 c0x0000 (---------------) + I bible - 0x0031adc3, // n0x0089 c0x0000 (---------------) + I bid - 0x00202a04, // n0x008a c0x0000 (---------------) + I bike - 0x002dd404, // n0x008b c0x0000 (---------------) + I bing - 0x002dd405, // n0x008c c0x0000 (---------------) + I bingo - 0x00205903, // n0x008d c0x0000 (---------------) + I bio - 0x08f2bd03, // n0x008e c0x0023 (n0x0716-n0x071e) + I biz - 0x09208fc2, // n0x008f c0x0024 (n0x071e-n0x0722) + I bj - 0x0028ab45, // n0x0090 c0x0000 (---------------) + I black - 0x0028ab4b, // n0x0091 c0x0000 (---------------) + I blackfriday - 0x00257786, // n0x0092 c0x0000 (---------------) + I blanco - 0x0020afcb, // n0x0093 c0x0000 (---------------) + I blockbuster - 0x0022d7c4, // n0x0094 c0x0000 (---------------) + I blog - 0x0020b709, // n0x0095 c0x0000 (---------------) + I bloomberg - 0x0020c804, // n0x0096 c0x0000 (---------------) + I blue - 0x0960d882, // n0x0097 c0x0025 (n0x0722-n0x0727) + I bm - 0x0020dec3, // n0x0098 c0x0000 (---------------) + I bms - 0x0020f143, // n0x0099 c0x0000 (---------------) + I bmw - 0x01610b82, // n0x009a c0x0005 (---------------)* o I bn - 0x00247383, // n0x009b c0x0000 (---------------) + I bnl - 0x00210b8a, // n0x009c c0x0000 (---------------) + I bnpparibas - 0x09a0cb02, // n0x009d c0x0026 (n0x0727-n0x0730) + I bo - 0x002115c5, // n0x009e c0x0000 (---------------) + I boats - 0x00290d0a, // n0x009f c0x0000 (---------------) + I boehringer - 0x0022da44, // n0x00a0 c0x0000 (---------------) + I bofa - 0x00212143, // n0x00a1 c0x0000 (---------------) + I bom - 0x002124c4, // n0x00a2 c0x0000 (---------------) + I bond - 0x00215283, // n0x00a3 c0x0000 (---------------) + I boo - 0x00215284, // n0x00a4 c0x0000 (---------------) + I book - 0x00215287, // n0x00a5 c0x0000 (---------------) + I booking - 0x00216e05, // n0x00a6 c0x0000 (---------------) + I boots - 0x00217ec5, // n0x00a7 c0x0000 (---------------) + I bosch - 0x00218746, // n0x00a8 c0x0000 (---------------) + I bostik - 0x00219146, // n0x00a9 c0x0000 (---------------) + I boston - 0x0021a6c3, // n0x00aa c0x0000 (---------------) + I bot - 0x0021b588, // n0x00ab c0x0000 (---------------) + I boutique - 0x00218503, // n0x00ac c0x0000 (---------------) + I box - 0x09e1bfc2, // n0x00ad c0x0027 (n0x0730-n0x0776) + I br - 0x0021bfc8, // n0x00ae c0x0000 (---------------) + I bradesco - 0x00221e4b, // n0x00af c0x0000 (---------------) + I bridgestone - 0x002209c8, // n0x00b0 c0x0000 (---------------) + I broadway - 0x00220ec6, // n0x00b1 c0x0000 (---------------) + I broker - 0x002222c7, // n0x00b2 c0x0000 (---------------) + I brother - 0x002255c8, // n0x00b3 c0x0000 (---------------) + I brussels - 0x0a654682, // n0x00b4 c0x0029 (n0x0777-n0x077c) + I bs - 0x0aa206c2, // n0x00b5 c0x002a (n0x077c-n0x0781) + I bt - 0x00270308, // n0x00b6 c0x0000 (---------------) + I budapest - 0x002e7d87, // n0x00b7 c0x0000 (---------------) + I bugatti - 0x00240d45, // n0x00b8 c0x0000 (---------------) + I build - 0x00240d48, // n0x00b9 c0x0000 (---------------) + I builders - 0x002b9188, // n0x00ba c0x0000 (---------------) + I business - 0x002ffc03, // n0x00bb c0x0000 (---------------) + I buy - 0x0022c5c4, // n0x00bc c0x0000 (---------------) + I buzz - 0x00364982, // n0x00bd c0x0000 (---------------) + I bv - 0x0ae2d002, // n0x00be c0x002b (n0x0781-n0x0783) + I bw - 0x0b20d242, // n0x00bf c0x002c (n0x0783-n0x0787) + I by - 0x0ba2dbc2, // n0x00c0 c0x002e (n0x0788-n0x078e) + I bz - 0x0022dbc3, // n0x00c1 c0x0000 (---------------) + I bzh - 0x0be00e02, // n0x00c2 c0x002f (n0x078e-n0x079f) + I ca - 0x00269183, // n0x00c3 c0x0000 (---------------) + I cab - 0x00318004, // n0x00c4 c0x0000 (---------------) + I cafe - 0x0021a843, // n0x00c5 c0x0000 (---------------) + I cal - 0x00396004, // n0x00c6 c0x0000 (---------------) + I call - 0x003140cb, // n0x00c7 c0x0000 (---------------) + I calvinklein - 0x0032dd06, // n0x00c8 c0x0000 (---------------) + I camera - 0x00241bc4, // n0x00c9 c0x0000 (---------------) + I camp - 0x0029e4ce, // n0x00ca c0x0000 (---------------) + I cancerresearch - 0x00261685, // n0x00cb c0x0000 (---------------) + I canon - 0x002d7808, // n0x00cc c0x0000 (---------------) + I capetown - 0x002ed5c7, // n0x00cd c0x0000 (---------------) + I capital - 0x002ed5ca, // n0x00ce c0x0000 (---------------) + I capitalone - 0x0020a143, // n0x00cf c0x0000 (---------------) + I car - 0x00237ac7, // n0x00d0 c0x0000 (---------------) + I caravan - 0x002dd785, // n0x00d1 c0x0000 (---------------) + I cards - 0x002ad904, // n0x00d2 c0x0000 (---------------) + I care - 0x002ad906, // n0x00d3 c0x0000 (---------------) + I career - 0x002ad907, // n0x00d4 c0x0000 (---------------) + I careers - 0x002bda44, // n0x00d5 c0x0000 (---------------) + I cars - 0x003901c7, // n0x00d6 c0x0000 (---------------) + I cartier - 0x002139c4, // n0x00d7 c0x0000 (---------------) + I casa - 0x00216844, // n0x00d8 c0x0000 (---------------) + I case - 0x00216846, // n0x00d9 c0x0000 (---------------) + I caseih - 0x002cdbc4, // n0x00da c0x0000 (---------------) + I cash - 0x00355bc6, // n0x00db c0x0000 (---------------) + I casino - 0x0020c643, // n0x00dc c0x0000 (---------------) + I cat - 0x003785c8, // n0x00dd c0x0000 (---------------) + I catering - 0x0023e048, // n0x00de c0x0000 (---------------) + I catholic - 0x00249543, // n0x00df c0x0000 (---------------) + I cba - 0x00247343, // n0x00e0 c0x0000 (---------------) + I cbn - 0x00388544, // n0x00e1 c0x0000 (---------------) + I cbre - 0x00390783, // n0x00e2 c0x0000 (---------------) + I cbs - 0x0c22c882, // n0x00e3 c0x0030 (n0x079f-n0x07a3) + I cc - 0x0c650342, // n0x00e4 c0x0031 (n0x07a3-n0x07a4) + I cd - 0x00208f43, // n0x00e5 c0x0000 (---------------) + I ceb - 0x00206f06, // n0x00e6 c0x0000 (---------------) + I center - 0x00279103, // n0x00e7 c0x0000 (---------------) + I ceo - 0x002e8404, // n0x00e8 c0x0000 (---------------) + I cern - 0x0ca135c2, // n0x00e9 c0x0032 (n0x07a4-n0x07a5) + I cf - 0x002135c3, // n0x00ea c0x0000 (---------------) + I cfa - 0x00375783, // n0x00eb c0x0000 (---------------) + I cfd - 0x0021ae02, // n0x00ec c0x0000 (---------------) + I cg - 0x0ce00382, // n0x00ed c0x0033 (n0x07a5-n0x07a6) + I ch - 0x002bcfc6, // n0x00ee c0x0000 (---------------) + I chanel - 0x0023be47, // n0x00ef c0x0000 (---------------) + I channel - 0x00337905, // n0x00f0 c0x0000 (---------------) + I chase - 0x00219b04, // n0x00f1 c0x0000 (---------------) + I chat - 0x002837c5, // n0x00f2 c0x0000 (---------------) + I cheap - 0x00201447, // n0x00f3 c0x0000 (---------------) + I chintai - 0x002b7845, // n0x00f4 c0x0000 (---------------) + I chloe - 0x003a2589, // n0x00f5 c0x0000 (---------------) + I christmas - 0x00302386, // n0x00f6 c0x0000 (---------------) + I chrome - 0x00303048, // n0x00f7 c0x0000 (---------------) + I chrysler - 0x00337806, // n0x00f8 c0x0000 (---------------) + I church - 0x0d204d82, // n0x00f9 c0x0034 (n0x07a6-n0x07b5) + I ci - 0x00287cc8, // n0x00fa c0x0000 (---------------) + I cipriani - 0x00339686, // n0x00fb c0x0000 (---------------) + I circle - 0x0038b205, // n0x00fc c0x0000 (---------------) + I cisco - 0x00352047, // n0x00fd c0x0000 (---------------) + I citadel - 0x00355ac4, // n0x00fe c0x0000 (---------------) + I citi - 0x00355ac5, // n0x00ff c0x0000 (---------------) + I citic - 0x00285284, // n0x0100 c0x0000 (---------------) + I city - 0x00285288, // n0x0101 c0x0000 (---------------) + I cityeats - 0x0d60a682, // n0x0102 c0x0035 (n0x07b5-n0x07b6)* o I ck - 0x0da17582, // n0x0103 c0x0036 (n0x07b6-n0x07bb) + I cl - 0x00374ec6, // n0x0104 c0x0000 (---------------) + I claims - 0x00224588, // n0x0105 c0x0000 (---------------) + I cleaning - 0x00375cc5, // n0x0106 c0x0000 (---------------) + I click - 0x00378486, // n0x0107 c0x0000 (---------------) + I clinic - 0x00383048, // n0x0108 c0x0000 (---------------) + I clinique - 0x0039bd88, // n0x0109 c0x0000 (---------------) + I clothing - 0x0022e645, // n0x010a c0x0000 (---------------) + I cloud - 0x00232684, // n0x010b c0x0000 (---------------) + I club - 0x00232687, // n0x010c c0x0000 (---------------) + I clubmed - 0x0de5b002, // n0x010d c0x0037 (n0x07bb-n0x07bf) + I cm - 0x0e21b802, // n0x010e c0x0038 (n0x07bf-n0x07ec) + I cn - 0x0fa0a442, // n0x010f c0x003e (n0x07f1-n0x07fe) + I co - 0x00341985, // n0x0110 c0x0000 (---------------) + I coach - 0x0029b6c5, // n0x0111 c0x0000 (---------------) + I codes - 0x0020a446, // n0x0112 c0x0000 (---------------) + I coffee - 0x0022c8c7, // n0x0113 c0x0000 (---------------) + I college - 0x002312c7, // n0x0114 c0x0000 (---------------) + I cologne - 0x10234803, // n0x0115 c0x0040 (n0x07ff-n0x08cf) + I com - 0x002a6687, // n0x0116 c0x0000 (---------------) + I comcast - 0x002da808, // n0x0117 c0x0000 (---------------) + I commbank - 0x00234809, // n0x0118 c0x0000 (---------------) + I community - 0x0036bfc7, // n0x0119 c0x0000 (---------------) + I company - 0x00235b07, // n0x011a c0x0000 (---------------) + I compare - 0x00236508, // n0x011b c0x0000 (---------------) + I computer - 0x00236d06, // n0x011c c0x0000 (---------------) + I comsec - 0x002372c6, // n0x011d c0x0000 (---------------) + I condos - 0x00237fcc, // n0x011e c0x0000 (---------------) + I construction - 0x0023910a, // n0x011f c0x0000 (---------------) + I consulting - 0x002397c7, // n0x0120 c0x0000 (---------------) + I contact - 0x0023ae0b, // n0x0121 c0x0000 (---------------) + I contractors - 0x0023bc87, // n0x0122 c0x0000 (---------------) + I cooking - 0x0023bc8e, // n0x0123 c0x0000 (---------------) + I cookingchannel - 0x0023c784, // n0x0124 c0x0000 (---------------) + I cool - 0x0023d404, // n0x0125 c0x0000 (---------------) + I coop - 0x0023f207, // n0x0126 c0x0000 (---------------) + I corsica - 0x00339a07, // n0x0127 c0x0000 (---------------) + I country - 0x002424c6, // n0x0128 c0x0000 (---------------) + I coupon - 0x002424c7, // n0x0129 c0x0000 (---------------) + I coupons - 0x00242887, // n0x012a c0x0000 (---------------) + I courses - 0x11607442, // n0x012b c0x0045 (n0x08ed-n0x08f4) + I cr - 0x00243986, // n0x012c c0x0000 (---------------) + I credit - 0x0024398a, // n0x012d c0x0000 (---------------) + I creditcard - 0x00243c0b, // n0x012e c0x0000 (---------------) + I creditunion - 0x00244d07, // n0x012f c0x0000 (---------------) + I cricket - 0x002456c5, // n0x0130 c0x0000 (---------------) + I crown - 0x00245803, // n0x0131 c0x0000 (---------------) + I crs - 0x00246586, // n0x0132 c0x0000 (---------------) + I cruise - 0x00246587, // n0x0133 c0x0000 (---------------) + I cruises - 0x002432c3, // n0x0134 c0x0000 (---------------) + I csc - 0x11a17482, // n0x0135 c0x0046 (n0x08f4-n0x08fa) + I cu - 0x00246aca, // n0x0136 c0x0000 (---------------) + I cuisinella - 0x11f53d02, // n0x0137 c0x0047 (n0x08fa-n0x08fb) + I cv - 0x122cb842, // n0x0138 c0x0048 (n0x08fb-n0x08ff) + I cw - 0x12648182, // n0x0139 c0x0049 (n0x08ff-n0x0901) + I cx - 0x12a3de02, // n0x013a c0x004a (n0x0901-n0x090e) o I cy - 0x00248ec5, // n0x013b c0x0000 (---------------) + I cymru - 0x00249344, // n0x013c c0x0000 (---------------) + I cyou - 0x13200142, // n0x013d c0x004c (n0x090f-n0x0910) + I cz - 0x0034e485, // n0x013e c0x0000 (---------------) + I dabur - 0x002a19c3, // n0x013f c0x0000 (---------------) + I dad - 0x00224005, // n0x0140 c0x0000 (---------------) + I dance - 0x0020aec4, // n0x0141 c0x0000 (---------------) + I date - 0x0020c906, // n0x0142 c0x0000 (---------------) + I dating - 0x00292786, // n0x0143 c0x0000 (---------------) + I datsun - 0x00261b83, // n0x0144 c0x0000 (---------------) + I day - 0x0023fa44, // n0x0145 c0x0000 (---------------) + I dclk - 0x0032a2c3, // n0x0146 c0x0000 (---------------) + I dds - 0x13607802, // n0x0147 c0x004d (n0x0910-n0x0918) + I de - 0x00207804, // n0x0148 c0x0000 (---------------) + I deal - 0x00232c06, // n0x0149 c0x0000 (---------------) + I dealer - 0x00207805, // n0x014a c0x0000 (---------------) + I deals - 0x0038ab86, // n0x014b c0x0000 (---------------) + I degree - 0x00352148, // n0x014c c0x0000 (---------------) + I delivery - 0x00258984, // n0x014d c0x0000 (---------------) + I dell - 0x00347808, // n0x014e c0x0000 (---------------) + I deloitte - 0x002dc105, // n0x014f c0x0000 (---------------) + I delta - 0x00224c48, // n0x0150 c0x0000 (---------------) + I democrat - 0x002a9706, // n0x0151 c0x0000 (---------------) + I dental - 0x002b4987, // n0x0152 c0x0000 (---------------) + I dentist - 0x00226284, // n0x0153 c0x0000 (---------------) + I desi - 0x00226286, // n0x0154 c0x0000 (---------------) + I design - 0x00376203, // n0x0155 c0x0000 (---------------) + I dev - 0x003775c3, // n0x0156 c0x0000 (---------------) + I dhl - 0x002c6e88, // n0x0157 c0x0000 (---------------) + I diamonds - 0x00316044, // n0x0158 c0x0000 (---------------) + I diet - 0x0032b907, // n0x0159 c0x0000 (---------------) + I digital - 0x0024b886, // n0x015a c0x0000 (---------------) + I direct - 0x0024b889, // n0x015b c0x0000 (---------------) + I directory - 0x00227388, // n0x015c c0x0000 (---------------) + I discount - 0x00227f88, // n0x015d c0x0000 (---------------) + I discover - 0x0025b344, // n0x015e c0x0000 (---------------) + I dish - 0x0030c183, // n0x015f c0x0000 (---------------) + I diy - 0x0024be02, // n0x0160 c0x0000 (---------------) + I dj - 0x13a30d02, // n0x0161 c0x004e (n0x0918-n0x0919) + I dk - 0x13e04542, // n0x0162 c0x004f (n0x0919-n0x091e) + I dm - 0x00328683, // n0x0163 c0x0000 (---------------) + I dnp - 0x1420cb82, // n0x0164 c0x0050 (n0x091e-n0x0928) + I do - 0x00314a44, // n0x0165 c0x0000 (---------------) + I docs - 0x0035ef85, // n0x0166 c0x0000 (---------------) + I dodge - 0x00244b83, // n0x0167 c0x0000 (---------------) + I dog - 0x002370c4, // n0x0168 c0x0000 (---------------) + I doha - 0x0033cac7, // n0x0169 c0x0000 (---------------) + I domains - 0x0023cf06, // n0x016a c0x0000 (---------------) + I doosan - 0x0030fc43, // n0x016b c0x0000 (---------------) + I dot - 0x00361488, // n0x016c c0x0000 (---------------) + I download - 0x0038b445, // n0x016d c0x0000 (---------------) + I drive - 0x0028c684, // n0x016e c0x0000 (---------------) + I dstv - 0x003635c3, // n0x016f c0x0000 (---------------) + I dtv - 0x0027b985, // n0x0170 c0x0000 (---------------) + I dubai - 0x0027bac4, // n0x0171 c0x0000 (---------------) + I duck - 0x0038fec6, // n0x0172 c0x0000 (---------------) + I dunlop - 0x0039fd04, // n0x0173 c0x0000 (---------------) + I duns - 0x002007c6, // n0x0174 c0x0000 (---------------) + I dupont - 0x00207bc6, // n0x0175 c0x0000 (---------------) + I durban - 0x0031e384, // n0x0176 c0x0000 (---------------) + I dvag - 0x002108c3, // n0x0177 c0x0000 (---------------) + I dwg - 0x14603cc2, // n0x0178 c0x0051 (n0x0928-n0x0930) + I dz - 0x00282c85, // n0x0179 c0x0000 (---------------) + I earth - 0x0021a403, // n0x017a c0x0000 (---------------) + I eat - 0x14a0a102, // n0x017b c0x0052 (n0x0930-n0x093c) + I ec - 0x002b5505, // n0x017c c0x0000 (---------------) + I edeka - 0x0023a1c3, // n0x017d c0x0000 (---------------) + I edu - 0x0023a1c9, // n0x017e c0x0000 (---------------) + I education - 0x14e0a542, // n0x017f c0x0053 (n0x093c-n0x0946) + I ee - 0x1560c402, // n0x0180 c0x0055 (n0x0947-n0x0950) + I eg - 0x002d3c05, // n0x0181 c0x0000 (---------------) + I email - 0x002b3786, // n0x0182 c0x0000 (---------------) + I emerck - 0x00328847, // n0x0183 c0x0000 (---------------) + I emerson - 0x002cd306, // n0x0184 c0x0000 (---------------) + I energy - 0x0033f008, // n0x0185 c0x0000 (---------------) + I engineer - 0x0033f00b, // n0x0186 c0x0000 (---------------) + I engineering - 0x00206f4b, // n0x0187 c0x0000 (---------------) + I enterprises - 0x0038a245, // n0x0188 c0x0000 (---------------) + I epost - 0x0038a8c5, // n0x0189 c0x0000 (---------------) + I epson - 0x002c52c9, // n0x018a c0x0000 (---------------) + I equipment - 0x01600682, // n0x018b c0x0005 (---------------)* o I er - 0x0031e548, // n0x018c c0x0000 (---------------) + I ericsson - 0x0020b204, // n0x018d c0x0000 (---------------) + I erni - 0x15e00082, // n0x018e c0x0057 (n0x0951-n0x0956) + I es - 0x0038c4c3, // n0x018f c0x0000 (---------------) + I esq - 0x002c5046, // n0x0190 c0x0000 (---------------) + I estate - 0x00278f88, // n0x0191 c0x0000 (---------------) + I esurance - 0x16607e42, // n0x0192 c0x0059 (n0x0957-n0x095f) + I et - 0x00223788, // n0x0193 c0x0000 (---------------) + I etisalat - 0x00207602, // n0x0194 c0x0000 (---------------) + I eu - 0x0027a84a, // n0x0195 c0x0000 (---------------) + I eurovision - 0x00226d83, // n0x0196 c0x0000 (---------------) + I eus - 0x00376246, // n0x0197 c0x0000 (---------------) + I events - 0x003a3f08, // n0x0198 c0x0000 (---------------) + I everbank - 0x00233048, // n0x0199 c0x0000 (---------------) + I exchange - 0x003279c6, // n0x019a c0x0000 (---------------) + I expert - 0x00362247, // n0x019b c0x0000 (---------------) + I exposed - 0x00247047, // n0x019c c0x0000 (---------------) + I express - 0x002064ca, // n0x019d c0x0000 (---------------) + I extraspace - 0x0022dac4, // n0x019e c0x0000 (---------------) + I fage - 0x00213604, // n0x019f c0x0000 (---------------) + I fail - 0x0033e9c9, // n0x01a0 c0x0000 (---------------) + I fairwinds - 0x00351545, // n0x01a1 c0x0000 (---------------) + I faith - 0x0036be46, // n0x01a2 c0x0000 (---------------) + I family - 0x0020eac3, // n0x01a3 c0x0000 (---------------) + I fan - 0x002e1144, // n0x01a4 c0x0000 (---------------) + I fans - 0x002c51c4, // n0x01a5 c0x0000 (---------------) + I farm - 0x00312ec7, // n0x01a6 c0x0000 (---------------) + I farmers - 0x0022d087, // n0x01a7 c0x0000 (---------------) + I fashion - 0x00239c84, // n0x01a8 c0x0000 (---------------) + I fast - 0x0020e2c5, // n0x01a9 c0x0000 (---------------) + I fedex - 0x0020a508, // n0x01aa c0x0000 (---------------) + I feedback - 0x002fff47, // n0x01ab c0x0000 (---------------) + I ferrari - 0x00338a47, // n0x01ac c0x0000 (---------------) + I ferrero - 0x16a016c2, // n0x01ad c0x005a (n0x095f-n0x0962) + I fi - 0x00294284, // n0x01ae c0x0000 (---------------) + I fiat - 0x0035a708, // n0x01af c0x0000 (---------------) + I fidelity - 0x0035c484, // n0x01b0 c0x0000 (---------------) + I fido - 0x00249d04, // n0x01b1 c0x0000 (---------------) + I film - 0x0024a0c5, // n0x01b2 c0x0000 (---------------) + I final - 0x0024a207, // n0x01b3 c0x0000 (---------------) + I finance - 0x00204c49, // n0x01b4 c0x0000 (---------------) + I financial - 0x0024b004, // n0x01b5 c0x0000 (---------------) + I fire - 0x0024b5c9, // n0x01b6 c0x0000 (---------------) + I firestone - 0x0024bac8, // n0x01b7 c0x0000 (---------------) + I firmdale - 0x0024ca04, // n0x01b8 c0x0000 (---------------) + I fish - 0x0024ca07, // n0x01b9 c0x0000 (---------------) + I fishing - 0x0024cfc3, // n0x01ba c0x0000 (---------------) + I fit - 0x0024d747, // n0x01bb c0x0000 (---------------) + I fitness - 0x01618b02, // n0x01bc c0x0005 (---------------)* o I fj - 0x01799cc2, // n0x01bd c0x0005 (---------------)* o I fk - 0x0024ee46, // n0x01be c0x0000 (---------------) + I flickr - 0x0024f6c7, // n0x01bf c0x0000 (---------------) + I flights - 0x0024fd44, // n0x01c0 c0x0000 (---------------) + I flir - 0x002512c7, // n0x01c1 c0x0000 (---------------) + I florist - 0x00251e07, // n0x01c2 c0x0000 (---------------) + I flowers - 0x002523c8, // n0x01c3 c0x0000 (---------------) + I flsmidth - 0x00252a43, // n0x01c4 c0x0000 (---------------) + I fly - 0x00234ec2, // n0x01c5 c0x0000 (---------------) + I fm - 0x00201882, // n0x01c6 c0x0000 (---------------) + I fo - 0x00254c83, // n0x01c7 c0x0000 (---------------) + I foo - 0x00254c8b, // n0x01c8 c0x0000 (---------------) + I foodnetwork - 0x00309a48, // n0x01c9 c0x0000 (---------------) + I football - 0x0039db04, // n0x01ca c0x0000 (---------------) + I ford - 0x00256245, // n0x01cb c0x0000 (---------------) + I forex - 0x00257d87, // n0x01cc c0x0000 (---------------) + I forsale - 0x00259685, // n0x01cd c0x0000 (---------------) + I forum - 0x002bb04a, // n0x01ce c0x0000 (---------------) + I foundation - 0x0025a903, // n0x01cf c0x0000 (---------------) + I fox - 0x16e41542, // n0x01d0 c0x005b (n0x0962-n0x097a) + I fr - 0x002e95c4, // n0x01d1 c0x0000 (---------------) + I free - 0x0025bb49, // n0x01d2 c0x0000 (---------------) + I fresenius - 0x0025f783, // n0x01d3 c0x0000 (---------------) + I frl - 0x0025f847, // n0x01d4 c0x0000 (---------------) + I frogans - 0x0039efc9, // n0x01d5 c0x0000 (---------------) + I frontdoor - 0x00397f08, // n0x01d6 c0x0000 (---------------) + I frontier - 0x00207503, // n0x01d7 c0x0000 (---------------) + I ftr - 0x0027aac7, // n0x01d8 c0x0000 (---------------) + I fujitsu - 0x0027afc9, // n0x01d9 c0x0000 (---------------) + I fujixerox - 0x00283144, // n0x01da c0x0000 (---------------) + I fund - 0x00284849, // n0x01db c0x0000 (---------------) + I furniture - 0x002891c6, // n0x01dc c0x0000 (---------------) + I futbol - 0x0028a283, // n0x01dd c0x0000 (---------------) + I fyi - 0x00201b82, // n0x01de c0x0000 (---------------) + I ga - 0x0021ec43, // n0x01df c0x0000 (---------------) + I gal - 0x00391847, // n0x01e0 c0x0000 (---------------) + I gallery - 0x00339805, // n0x01e1 c0x0000 (---------------) + I gallo - 0x002e2ac6, // n0x01e2 c0x0000 (---------------) + I gallup - 0x00297004, // n0x01e3 c0x0000 (---------------) + I game - 0x00349705, // n0x01e4 c0x0000 (---------------) + I games - 0x0023e9c3, // n0x01e5 c0x0000 (---------------) + I gap - 0x0021a906, // n0x01e6 c0x0000 (---------------) + I garden - 0x0020b902, // n0x01e7 c0x0000 (---------------) + I gb - 0x00383804, // n0x01e8 c0x0000 (---------------) + I gbiz - 0x00223002, // n0x01e9 c0x0000 (---------------) + I gd - 0x0022d883, // n0x01ea c0x0000 (---------------) + I gdn - 0x17200282, // n0x01eb c0x005c (n0x097a-n0x0981) + I ge - 0x002b6d43, // n0x01ec c0x0000 (---------------) + I gea - 0x002195c4, // n0x01ed c0x0000 (---------------) + I gent - 0x002195c7, // n0x01ee c0x0000 (---------------) + I genting - 0x0035f046, // n0x01ef c0x0000 (---------------) + I george - 0x0025a3c2, // n0x01f0 c0x0000 (---------------) + I gf - 0x17601902, // n0x01f1 c0x005d (n0x0981-n0x0984) + I gg - 0x002281c4, // n0x01f2 c0x0000 (---------------) + I ggee - 0x17a4f782, // n0x01f3 c0x005e (n0x0984-n0x0989) + I gh - 0x17e01942, // n0x01f4 c0x005f (n0x0989-n0x098f) + I gi - 0x00347484, // n0x01f5 c0x0000 (---------------) + I gift - 0x00347485, // n0x01f6 c0x0000 (---------------) + I gifts - 0x00215b85, // n0x01f7 c0x0000 (---------------) + I gives - 0x0025a1c6, // n0x01f8 c0x0000 (---------------) + I giving - 0x1820b542, // n0x01f9 c0x0060 (n0x098f-n0x0994) + I gl - 0x00347745, // n0x01fa c0x0000 (---------------) + I glade - 0x00380005, // n0x01fb c0x0000 (---------------) + I glass - 0x00286fc3, // n0x01fc c0x0000 (---------------) + I gle - 0x0020bf46, // n0x01fd c0x0000 (---------------) + I global - 0x0020ca45, // n0x01fe c0x0000 (---------------) + I globo - 0x00215402, // n0x01ff c0x0000 (---------------) + I gm - 0x00338185, // n0x0200 c0x0000 (---------------) + I gmail - 0x00216183, // n0x0201 c0x0000 (---------------) + I gmo - 0x00218443, // n0x0202 c0x0000 (---------------) + I gmx - 0x1860aa42, // n0x0203 c0x0061 (n0x0994-n0x099a) + I gn - 0x002e30c7, // n0x0204 c0x0000 (---------------) + I godaddy - 0x003001c4, // n0x0205 c0x0000 (---------------) + I gold - 0x003001c9, // n0x0206 c0x0000 (---------------) + I goldpoint - 0x0024bcc4, // n0x0207 c0x0000 (---------------) + I golf - 0x00230783, // n0x0208 c0x0000 (---------------) + I goo - 0x002fb989, // n0x0209 c0x0000 (---------------) + I goodhands - 0x00282b48, // n0x020a c0x0000 (---------------) + I goodyear - 0x0029b544, // n0x020b c0x0000 (---------------) + I goog - 0x0029b546, // n0x020c c0x0000 (---------------) + I google - 0x002a3483, // n0x020d c0x0000 (---------------) + I gop - 0x0020ec83, // n0x020e c0x0000 (---------------) + I got - 0x002dd4c4, // n0x020f c0x0000 (---------------) + I gotv - 0x0027c5c3, // n0x0210 c0x0000 (---------------) + I gov - 0x18adad82, // n0x0211 c0x0062 (n0x099a-n0x09a0) + I gp - 0x00300d42, // n0x0212 c0x0000 (---------------) + I gq - 0x18e09d42, // n0x0213 c0x0063 (n0x09a0-n0x09a6) + I gr - 0x0031ca08, // n0x0214 c0x0000 (---------------) + I grainger - 0x0030d008, // n0x0215 c0x0000 (---------------) + I graphics - 0x00387986, // n0x0216 c0x0000 (---------------) + I gratis - 0x0024ce85, // n0x0217 c0x0000 (---------------) + I green - 0x00225485, // n0x0218 c0x0000 (---------------) + I gripe - 0x00209d45, // n0x0219 c0x0000 (---------------) + I group - 0x00296a42, // n0x021a c0x0000 (---------------) + I gs - 0x1923f942, // n0x021b c0x0064 (n0x09a6-n0x09ad) + I gt - 0x0160c442, // n0x021c c0x0005 (---------------)* o I gu - 0x003516c8, // n0x021d c0x0000 (---------------) + I guardian - 0x00287c05, // n0x021e c0x0000 (---------------) + I gucci - 0x002e0844, // n0x021f c0x0000 (---------------) + I guge - 0x00376145, // n0x0220 c0x0000 (---------------) + I guide - 0x0039bf47, // n0x0221 c0x0000 (---------------) + I guitars - 0x0024e304, // n0x0222 c0x0000 (---------------) + I guru - 0x002157c2, // n0x0223 c0x0000 (---------------) + I gw - 0x19602182, // n0x0224 c0x0065 (n0x09ad-n0x09b3) + I gy - 0x00315904, // n0x0225 c0x0000 (---------------) + I hair - 0x0020b3c7, // n0x0226 c0x0000 (---------------) + I hamburg - 0x00394347, // n0x0227 c0x0000 (---------------) + I hangout - 0x0035e684, // n0x0228 c0x0000 (---------------) + I haus - 0x00290cc3, // n0x0229 c0x0000 (---------------) + I hbo - 0x00249484, // n0x022a c0x0000 (---------------) + I hdfc - 0x00249488, // n0x022b c0x0000 (---------------) + I hdfcbank - 0x002ad786, // n0x022c c0x0000 (---------------) + I health - 0x002ad78a, // n0x022d c0x0000 (---------------) + I healthcare - 0x00204b04, // n0x022e c0x0000 (---------------) + I help - 0x00203288, // n0x022f c0x0000 (---------------) + I helsinki - 0x00252644, // n0x0230 c0x0000 (---------------) + I here - 0x002223c6, // n0x0231 c0x0000 (---------------) + I hermes - 0x00292004, // n0x0232 c0x0000 (---------------) + I hgtv - 0x00341c86, // n0x0233 c0x0000 (---------------) + I hiphop - 0x00310989, // n0x0234 c0x0000 (---------------) + I hisamitsu - 0x002a2587, // n0x0235 c0x0000 (---------------) + I hitachi - 0x00287b43, // n0x0236 c0x0000 (---------------) + I hiv - 0x19a09a82, // n0x0237 c0x0066 (n0x09b3-n0x09cb) + I hk - 0x00269e03, // n0x0238 c0x0000 (---------------) + I hkt - 0x0020da82, // n0x0239 c0x0000 (---------------) + I hm - 0x19e0d342, // n0x023a c0x0067 (n0x09cb-n0x09d1) + I hn - 0x002dfac6, // n0x023b c0x0000 (---------------) + I hockey - 0x0035b708, // n0x023c c0x0000 (---------------) + I holdings - 0x002a4c87, // n0x023d c0x0000 (---------------) + I holiday - 0x00272fc9, // n0x023e c0x0000 (---------------) + I homedepot - 0x002990c9, // n0x023f c0x0000 (---------------) + I homegoods - 0x002a5bc5, // n0x0240 c0x0000 (---------------) + I homes - 0x002a5bc9, // n0x0241 c0x0000 (---------------) + I homesense - 0x002a7485, // n0x0242 c0x0000 (---------------) + I honda - 0x002a7bc9, // n0x0243 c0x0000 (---------------) + I honeywell - 0x002a8985, // n0x0244 c0x0000 (---------------) + I horse - 0x00297144, // n0x0245 c0x0000 (---------------) + I host - 0x00297147, // n0x0246 c0x0000 (---------------) + I hosting - 0x00235ec3, // n0x0247 c0x0000 (---------------) + I hot - 0x002a9087, // n0x0248 c0x0000 (---------------) + I hoteles - 0x002a9887, // n0x0249 c0x0000 (---------------) + I hotmail - 0x002a2185, // n0x024a c0x0000 (---------------) + I house - 0x0029fb03, // n0x024b c0x0000 (---------------) + I how - 0x1a237242, // n0x024c c0x0068 (n0x09d1-n0x09d6) + I hr - 0x003854c4, // n0x024d c0x0000 (---------------) + I hsbc - 0x1a64f7c2, // n0x024e c0x0069 (n0x09d6-n0x09e7) + I ht - 0x0025af83, // n0x024f c0x0000 (---------------) + I htc - 0x1aa1dc42, // n0x0250 c0x006a (n0x09e7-n0x0a07) + I hu - 0x002f8506, // n0x0251 c0x0000 (---------------) + I hughes - 0x0030b785, // n0x0252 c0x0000 (---------------) + I hyatt - 0x002acb07, // n0x0253 c0x0000 (---------------) + I hyundai - 0x003069c3, // n0x0254 c0x0000 (---------------) + I ibm - 0x00233684, // n0x0255 c0x0000 (---------------) + I icbc - 0x00202443, // n0x0256 c0x0000 (---------------) + I ice - 0x00270043, // n0x0257 c0x0000 (---------------) + I icu - 0x1ae0ae82, // n0x0258 c0x006b (n0x0a07-n0x0a12) + I id - 0x1b600042, // n0x0259 c0x006d (n0x0a13-n0x0a15) + I ie - 0x00364a04, // n0x025a c0x0000 (---------------) + I ieee - 0x00234e83, // n0x025b c0x0000 (---------------) + I ifm - 0x0030d845, // n0x025c c0x0000 (---------------) + I iinet - 0x00253fc5, // n0x025d c0x0000 (---------------) + I ikano - 0x1ba02dc2, // n0x025e c0x006e (n0x0a15-n0x0a1d) + I il - 0x1c200402, // n0x025f c0x0070 (n0x0a1e-n0x0a25) + I im - 0x0035d4c6, // n0x0260 c0x0000 (---------------) + I imamat - 0x00309884, // n0x0261 c0x0000 (---------------) + I imdb - 0x00209484, // n0x0262 c0x0000 (---------------) + I immo - 0x0020948a, // n0x0263 c0x0000 (---------------) + I immobilien - 0x1ca01282, // n0x0264 c0x0072 (n0x0a27-n0x0a34) + I in - 0x00365c8a, // n0x0265 c0x0000 (---------------) + I industries - 0x00201648, // n0x0266 c0x0000 (---------------) + I infiniti - 0x1ce01804, // n0x0267 c0x0073 (n0x0a34-n0x0a3e) + I info - 0x0020c9c3, // n0x0268 c0x0000 (---------------) + I ing - 0x00203383, // n0x0269 c0x0000 (---------------) + I ink - 0x00317c49, // n0x026a c0x0000 (---------------) + I institute - 0x00250849, // n0x026b c0x0000 (---------------) + I insurance - 0x0033cbc6, // n0x026c c0x0000 (---------------) + I insure - 0x1d2014c3, // n0x026d c0x0074 (n0x0a3e-n0x0a3f) + I int - 0x00300345, // n0x026e c0x0000 (---------------) + I intel - 0x0032050d, // n0x026f c0x0000 (---------------) + I international - 0x002f8206, // n0x0270 c0x0000 (---------------) + I intuit - 0x00205c0b, // n0x0271 c0x0000 (---------------) + I investments - 0x1d6009c2, // n0x0272 c0x0075 (n0x0a3f-n0x0a45) + I io - 0x00267748, // n0x0273 c0x0000 (---------------) + I ipiranga - 0x1da1b682, // n0x0274 c0x0076 (n0x0a45-n0x0a4b) + I iq - 0x1de03542, // n0x0275 c0x0077 (n0x0a4b-n0x0a54) + I ir - 0x00291685, // n0x0276 c0x0000 (---------------) + I irish - 0x1e2022c2, // n0x0277 c0x0078 (n0x0a54-n0x0a5c) + I is - 0x00259907, // n0x0278 c0x0000 (---------------) + I iselect - 0x0027a0c7, // n0x0279 c0x0000 (---------------) + I ismaili - 0x002143c3, // n0x027a c0x0000 (---------------) + I ist - 0x002143c8, // n0x027b c0x0000 (---------------) + I istanbul - 0x1e601782, // n0x027c c0x0079 (n0x0a5c-n0x0bcd) + I it - 0x0027dd44, // n0x027d c0x0000 (---------------) + I itau - 0x0020ccc3, // n0x027e c0x0000 (---------------) + I itv - 0x00325dc5, // n0x027f c0x0000 (---------------) + I iveco - 0x00366b03, // n0x0280 c0x0000 (---------------) + I iwc - 0x0030b246, // n0x0281 c0x0000 (---------------) + I jaguar - 0x00326984, // n0x0282 c0x0000 (---------------) + I java - 0x00247303, // n0x0283 c0x0000 (---------------) + I jcb - 0x0026a183, // n0x0284 c0x0000 (---------------) + I jcp - 0x1ea09302, // n0x0285 c0x007a (n0x0bcd-n0x0bd0) + I je - 0x00337a84, // n0x0286 c0x0000 (---------------) + I jeep - 0x0034fac5, // n0x0287 c0x0000 (---------------) + I jetzt - 0x0035ff07, // n0x0288 c0x0000 (---------------) + I jewelry - 0x002779c3, // n0x0289 c0x0000 (---------------) + I jio - 0x002acf83, // n0x028a c0x0000 (---------------) + I jlc - 0x002ae483, // n0x028b c0x0000 (---------------) + I jll - 0x01669342, // n0x028c c0x0005 (---------------)* o I jm - 0x002ae543, // n0x028d c0x0000 (---------------) + I jmp - 0x002ae703, // n0x028e c0x0000 (---------------) + I jnj - 0x1ee01082, // n0x028f c0x007b (n0x0bd0-n0x0bd8) + I jo - 0x002c3c44, // n0x0290 c0x0000 (---------------) + I jobs - 0x0027bd06, // n0x0291 c0x0000 (---------------) + I joburg - 0x00201083, // n0x0292 c0x0000 (---------------) + I jot - 0x002aea83, // n0x0293 c0x0000 (---------------) + I joy - 0x1f2af2c2, // n0x0294 c0x007c (n0x0bd8-n0x0c47) + I jp - 0x002af2c8, // n0x0295 c0x0000 (---------------) + I jpmorgan - 0x002b1684, // n0x0296 c0x0000 (---------------) + I jprs - 0x0024bec6, // n0x0297 c0x0000 (---------------) + I juegos - 0x002b1f87, // n0x0298 c0x0000 (---------------) + I juniper - 0x0021fac6, // n0x0299 c0x0000 (---------------) + I kaufen - 0x00231d04, // n0x029a c0x0000 (---------------) + I kddi - 0x2ce02a82, // n0x029b c0x00b3 (n0x12db-n0x12dc)* o I ke - 0x00235d8b, // n0x029c c0x0000 (---------------) + I kerryhotels - 0x002e4d0e, // n0x029d c0x0000 (---------------) + I kerrylogistics - 0x00220f8f, // n0x029e c0x0000 (---------------) + I kerryproperties - 0x00235383, // n0x029f c0x0000 (---------------) + I kfh - 0x2d6b8102, // n0x02a0 c0x00b5 (n0x12dd-n0x12e3) + I kg - 0x0160d4c2, // n0x02a1 c0x0005 (---------------)* o I kh - 0x2da02f82, // n0x02a2 c0x00b6 (n0x12e3-n0x12ea) + I ki - 0x0022c503, // n0x02a3 c0x0000 (---------------) + I kia - 0x0023b883, // n0x02a4 c0x0000 (---------------) + I kim - 0x00379fc6, // n0x02a5 c0x0000 (---------------) + I kinder - 0x0032dac6, // n0x02a6 c0x0000 (---------------) + I kindle - 0x00349a07, // n0x02a7 c0x0000 (---------------) + I kitchen - 0x002ebac4, // n0x02a8 c0x0000 (---------------) + I kiwi - 0x2de2ef82, // n0x02a9 c0x00b7 (n0x12ea-n0x12fb) + I km - 0x2e25fdc2, // n0x02aa c0x00b8 (n0x12fb-n0x12ff) + I kn - 0x00225e45, // n0x02ab c0x0000 (---------------) + I koeln - 0x002f1d87, // n0x02ac c0x0000 (---------------) + I komatsu - 0x002eccc6, // n0x02ad c0x0000 (---------------) + I kosher - 0x2e60be82, // n0x02ae c0x00b9 (n0x12ff-n0x1305) + I kp - 0x0020be84, // n0x02af c0x0000 (---------------) + I kpmg - 0x003691c3, // n0x02b0 c0x0000 (---------------) + I kpn - 0x2ea093c2, // n0x02b1 c0x00ba (n0x1305-n0x1323) + I kr - 0x0034ef43, // n0x02b2 c0x0000 (---------------) + I krd - 0x003a1704, // n0x02b3 c0x0000 (---------------) + I kred - 0x002b8049, // n0x02b4 c0x0000 (---------------) + I kuokgroup - 0x016bfd02, // n0x02b5 c0x0005 (---------------)* o I kw - 0x2ee37982, // n0x02b6 c0x00bb (n0x1323-n0x1328) + I ky - 0x00265706, // n0x02b7 c0x0000 (---------------) + I kyknet - 0x002c0c45, // n0x02b8 c0x0000 (---------------) + I kyoto - 0x2f392142, // n0x02b9 c0x00bc (n0x1328-n0x132e) + I kz - 0x2f608942, // n0x02ba c0x00bd (n0x132e-n0x1337) + I la - 0x0033d987, // n0x02bb c0x0000 (---------------) + I lacaixa - 0x00292f89, // n0x02bc c0x0000 (---------------) + I ladbrokes - 0x00352e4b, // n0x02bd c0x0000 (---------------) + I lamborghini - 0x00246e05, // n0x02be c0x0000 (---------------) + I lamer - 0x00369d49, // n0x02bf c0x0000 (---------------) + I lancaster - 0x002c25c6, // n0x02c0 c0x0000 (---------------) + I lancia - 0x002577c7, // n0x02c1 c0x0000 (---------------) + I lancome - 0x002272c4, // n0x02c2 c0x0000 (---------------) + I land - 0x0026f189, // n0x02c3 c0x0000 (---------------) + I landrover - 0x00357f47, // n0x02c4 c0x0000 (---------------) + I lanxess - 0x00278bc7, // n0x02c5 c0x0000 (---------------) + I lasalle - 0x002238c3, // n0x02c6 c0x0000 (---------------) + I lat - 0x00227006, // n0x02c7 c0x0000 (---------------) + I latino - 0x002cf107, // n0x02c8 c0x0000 (---------------) + I latrobe - 0x0026d383, // n0x02c9 c0x0000 (---------------) + I law - 0x0026d386, // n0x02ca c0x0000 (---------------) + I lawyer - 0x2fa02e02, // n0x02cb c0x00be (n0x1337-n0x133c) + I lb - 0x2fe3a442, // n0x02cc c0x00bf (n0x133c-n0x1342) + I lc - 0x00224ec3, // n0x02cd c0x0000 (---------------) + I lds - 0x00278d05, // n0x02ce c0x0000 (---------------) + I lease - 0x002b9487, // n0x02cf c0x0000 (---------------) + I leclerc - 0x00355f86, // n0x02d0 c0x0000 (---------------) + I lefrak - 0x00339785, // n0x02d1 c0x0000 (---------------) + I legal - 0x0024bc44, // n0x02d2 c0x0000 (---------------) + I lego - 0x002defc5, // n0x02d3 c0x0000 (---------------) + I lexus - 0x002e8804, // n0x02d4 c0x0000 (---------------) + I lgbt - 0x30204e42, // n0x02d5 c0x00c0 (n0x1342-n0x1343) + I li - 0x00307187, // n0x02d6 c0x0000 (---------------) + I liaison - 0x002be484, // n0x02d7 c0x0000 (---------------) + I lidl - 0x00250744, // n0x02d8 c0x0000 (---------------) + I life - 0x0025074d, // n0x02d9 c0x0000 (---------------) + I lifeinsurance - 0x003391c9, // n0x02da c0x0000 (---------------) + I lifestyle - 0x002dc408, // n0x02db c0x0000 (---------------) + I lighting - 0x00257404, // n0x02dc c0x0000 (---------------) + I like - 0x0032ba85, // n0x02dd c0x0000 (---------------) + I lilly - 0x0026e847, // n0x02de c0x0000 (---------------) + I limited - 0x0026ec44, // n0x02df c0x0000 (---------------) + I limo - 0x00356547, // n0x02e0 c0x0000 (---------------) + I lincoln - 0x00354a85, // n0x02e1 c0x0000 (---------------) + I linde - 0x0036aac4, // n0x02e2 c0x0000 (---------------) + I link - 0x002d6b05, // n0x02e3 c0x0000 (---------------) + I lipsy - 0x0025e444, // n0x02e4 c0x0000 (---------------) + I live - 0x002df106, // n0x02e5 c0x0000 (---------------) + I living - 0x0026eb45, // n0x02e6 c0x0000 (---------------) + I lixil - 0x3060be42, // n0x02e7 c0x00c1 (n0x1343-n0x1352) + I lk - 0x00212204, // n0x02e8 c0x0000 (---------------) + I loan - 0x00212205, // n0x02e9 c0x0000 (---------------) + I loans - 0x00373886, // n0x02ea c0x0000 (---------------) + I locker - 0x003398c5, // n0x02eb c0x0000 (---------------) + I locus - 0x002d2644, // n0x02ec c0x0000 (---------------) + I loft - 0x002c4d83, // n0x02ed c0x0000 (---------------) + I lol - 0x0030d246, // n0x02ee c0x0000 (---------------) + I london - 0x002213c5, // n0x02ef c0x0000 (---------------) + I lotte - 0x00222b05, // n0x02f0 c0x0000 (---------------) + I lotto - 0x00268c84, // n0x02f1 c0x0000 (---------------) + I love - 0x00204b83, // n0x02f2 c0x0000 (---------------) + I lpl - 0x00204b8c, // n0x02f3 c0x0000 (---------------) + I lplfinancial - 0x30a89b02, // n0x02f4 c0x00c2 (n0x1352-n0x1357) + I lr - 0x30e03302, // n0x02f5 c0x00c3 (n0x1357-n0x1359) + I ls - 0x3120e982, // n0x02f6 c0x00c4 (n0x1359-n0x135b) + I lt - 0x0030dc03, // n0x02f7 c0x0000 (---------------) + I ltd - 0x0030dc04, // n0x02f8 c0x0000 (---------------) + I ltda - 0x316047c2, // n0x02f9 c0x00c5 (n0x135b-n0x135c) + I lu - 0x002e2588, // n0x02fa c0x0000 (---------------) + I lundbeck - 0x002e2b85, // n0x02fb c0x0000 (---------------) + I lupin - 0x00239444, // n0x02fc c0x0000 (---------------) + I luxe - 0x0023da06, // n0x02fd c0x0000 (---------------) + I luxury - 0x31a0d042, // n0x02fe c0x00c6 (n0x135c-n0x1365) + I lv - 0x31e39d82, // n0x02ff c0x00c7 (n0x1365-n0x136e) + I ly - 0x32200442, // n0x0300 c0x00c8 (n0x136e-n0x1374) + I ma - 0x0034e885, // n0x0301 c0x0000 (---------------) + I macys - 0x0031e246, // n0x0302 c0x0000 (---------------) + I madrid - 0x0039a1c4, // n0x0303 c0x0000 (---------------) + I maif - 0x002b8dc6, // n0x0304 c0x0000 (---------------) + I maison - 0x00253386, // n0x0305 c0x0000 (---------------) + I makeup - 0x002050c3, // n0x0306 c0x0000 (---------------) + I man - 0x0036c38a, // n0x0307 c0x0000 (---------------) + I management - 0x0023c3c5, // n0x0308 c0x0000 (---------------) + I mango - 0x002eddc6, // n0x0309 c0x0000 (---------------) + I market - 0x002eddc9, // n0x030a c0x0000 (---------------) + I marketing - 0x0035f5c7, // n0x030b c0x0000 (---------------) + I markets - 0x002435c8, // n0x030c c0x0000 (---------------) + I marriott - 0x002036c9, // n0x030d c0x0000 (---------------) + I marshalls - 0x002bc348, // n0x030e c0x0000 (---------------) + I maserati - 0x0022b686, // n0x030f c0x0000 (---------------) + I mattel - 0x0023c583, // n0x0310 c0x0000 (---------------) + I mba - 0x32619ac2, // n0x0311 c0x00c9 (n0x1374-n0x1376) + I mc - 0x003794c3, // n0x0312 c0x0000 (---------------) + I mcd - 0x003794c9, // n0x0313 c0x0000 (---------------) + I mcdonalds - 0x0030e948, // n0x0314 c0x0000 (---------------) + I mckinsey - 0x32a4bb82, // n0x0315 c0x00ca (n0x1376-n0x1377) + I md - 0x32e05d82, // n0x0316 c0x00cb (n0x1377-n0x1384) + I me - 0x002127c3, // n0x0317 c0x0000 (---------------) + I med - 0x00302485, // n0x0318 c0x0000 (---------------) + I media - 0x002665c4, // n0x0319 c0x0000 (---------------) + I meet - 0x002e1a49, // n0x031a c0x0000 (---------------) + I melbourne - 0x002b3744, // n0x031b c0x0000 (---------------) + I meme - 0x0027c8c8, // n0x031c c0x0000 (---------------) + I memorial - 0x00205d83, // n0x031d c0x0000 (---------------) + I men - 0x00314544, // n0x031e c0x0000 (---------------) + I menu - 0x00219c83, // n0x031f c0x0000 (---------------) + I meo - 0x00250687, // n0x0320 c0x0000 (---------------) + I metlife - 0x3320bf02, // n0x0321 c0x00cc (n0x1384-n0x138d) + I mg - 0x00259042, // n0x0322 c0x0000 (---------------) + I mh - 0x00231785, // n0x0323 c0x0000 (---------------) + I miami - 0x00266d09, // n0x0324 c0x0000 (---------------) + I microsoft - 0x002119c3, // n0x0325 c0x0000 (---------------) + I mil - 0x0027c344, // n0x0326 c0x0000 (---------------) + I mini - 0x003204c4, // n0x0327 c0x0000 (---------------) + I mint - 0x00228603, // n0x0328 c0x0000 (---------------) + I mit - 0x0027e14a, // n0x0329 c0x0000 (---------------) + I mitsubishi - 0x33765b42, // n0x032a c0x00cd (n0x138d-n0x1395) + I mk - 0x33a121c2, // n0x032b c0x00ce (n0x1395-n0x139c) + I ml - 0x002c3803, // n0x032c c0x0000 (---------------) + I mlb - 0x00368303, // n0x032d c0x0000 (---------------) + I mls - 0x016094c2, // n0x032e c0x0005 (---------------)* o I mm - 0x00372583, // n0x032f c0x0000 (---------------) + I mma - 0x33e20782, // n0x0330 c0x00cf (n0x139c-n0x13a0) + I mn - 0x00220784, // n0x0331 c0x0000 (---------------) + I mnet - 0x34209502, // n0x0332 c0x00d0 (n0x13a0-n0x13a5) + I mo - 0x34609504, // n0x0333 c0x00d1 (n0x13a5-n0x13a6) + I mobi - 0x002d9446, // n0x0334 c0x0000 (---------------) + I mobily - 0x00267c44, // n0x0335 c0x0000 (---------------) + I moda - 0x0024c2c3, // n0x0336 c0x0000 (---------------) + I moe - 0x00281d83, // n0x0337 c0x0000 (---------------) + I moi - 0x0022e143, // n0x0338 c0x0000 (---------------) + I mom - 0x00243f86, // n0x0339 c0x0000 (---------------) + I monash - 0x002c9605, // n0x033a c0x0000 (---------------) + I money - 0x002c49c7, // n0x033b c0x0000 (---------------) + I monster - 0x00257689, // n0x033c c0x0000 (---------------) + I montblanc - 0x002c7b05, // n0x033d c0x0000 (---------------) + I mopar - 0x002c9546, // n0x033e c0x0000 (---------------) + I mormon - 0x002c9b48, // n0x033f c0x0000 (---------------) + I mortgage - 0x002c9d46, // n0x0340 c0x0000 (---------------) + I moscow - 0x002772c4, // n0x0341 c0x0000 (---------------) + I moto - 0x0029a74b, // n0x0342 c0x0000 (---------------) + I motorcycles - 0x002cb303, // n0x0343 c0x0000 (---------------) + I mov - 0x002cb305, // n0x0344 c0x0000 (---------------) + I movie - 0x002cb448, // n0x0345 c0x0000 (---------------) + I movistar - 0x00229802, // n0x0346 c0x0000 (---------------) + I mp - 0x0033dc82, // n0x0347 c0x0000 (---------------) + I mq - 0x34a48f42, // n0x0348 c0x00d2 (n0x13a6-n0x13a8) + I mr - 0x34e0df02, // n0x0349 c0x00d3 (n0x13a8-n0x13ad) + I ms - 0x0026e743, // n0x034a c0x0000 (---------------) + I msd - 0x35207682, // n0x034b c0x00d4 (n0x13ad-n0x13b1) + I mt - 0x00268383, // n0x034c c0x0000 (---------------) + I mtn - 0x002cb744, // n0x034d c0x0000 (---------------) + I mtpc - 0x002cbc83, // n0x034e c0x0000 (---------------) + I mtr - 0x35a04582, // n0x034f c0x00d6 (n0x13b2-n0x13b9) + I mu - 0x002cde8b, // n0x0350 c0x0000 (---------------) + I multichoice - 0x35ed1806, // n0x0351 c0x00d7 (n0x13b9-n0x15dd) + I museum - 0x0023d8c6, // n0x0352 c0x0000 (---------------) + I mutual - 0x002d1e48, // n0x0353 c0x0000 (---------------) + I mutuelle - 0x362b14c2, // n0x0354 c0x00d8 (n0x15dd-n0x15eb) + I mv - 0x3660f182, // n0x0355 c0x00d9 (n0x15eb-n0x15f6) + I mw - 0x36a18482, // n0x0356 c0x00da (n0x15f6-n0x15fc) + I mx - 0x36e2c482, // n0x0357 c0x00db (n0x15fc-n0x1604) + I my - 0x37213742, // n0x0358 c0x00dc (n0x1604-n0x1605)* o I mz - 0x0021374b, // n0x0359 c0x0000 (---------------) + I mzansimagic - 0x376012c2, // n0x035a c0x00dd (n0x1605-n0x1616) + I na - 0x0021a343, // n0x035b c0x0000 (---------------) + I nab - 0x00232f85, // n0x035c c0x0000 (---------------) + I nadex - 0x0022fb46, // n0x035d c0x0000 (---------------) + I nagoya - 0x37a07d04, // n0x035e c0x00de (n0x1616-n0x1618) + I name - 0x0038c0c7, // n0x035f c0x0000 (---------------) + I naspers - 0x00232a0a, // n0x0360 c0x0000 (---------------) + I nationwide - 0x00310e06, // n0x0361 c0x0000 (---------------) + I natura - 0x0038a484, // n0x0362 c0x0000 (---------------) + I navy - 0x0025b103, // n0x0363 c0x0000 (---------------) + I nba - 0x38604d42, // n0x0364 c0x00e1 (n0x161a-n0x161b) + I nc - 0x002030c2, // n0x0365 c0x0000 (---------------) + I ne - 0x00228cc3, // n0x0366 c0x0000 (---------------) + I nec - 0x38a207c3, // n0x0367 c0x00e2 (n0x161b-n0x1650) + I net - 0x00391fc7, // n0x0368 c0x0000 (---------------) + I netbank - 0x0026ea47, // n0x0369 c0x0000 (---------------) + I netflix - 0x00254d87, // n0x036a c0x0000 (---------------) + I network - 0x00226d47, // n0x036b c0x0000 (---------------) + I neustar - 0x00222083, // n0x036c c0x0000 (---------------) + I new - 0x002ed7ca, // n0x036d c0x0000 (---------------) + I newholland - 0x00222084, // n0x036e c0x0000 (---------------) + I news - 0x0024b784, // n0x036f c0x0000 (---------------) + I next - 0x0024b78a, // n0x0370 c0x0000 (---------------) + I nextdirect - 0x002699c5, // n0x0371 c0x0000 (---------------) + I nexus - 0x39e01682, // n0x0372 c0x00e7 (n0x1658-n0x1662) + I nf - 0x00250d83, // n0x0373 c0x0000 (---------------) + I nfl - 0x3a202902, // n0x0374 c0x00e8 (n0x1662-n0x166b) + I ng - 0x00230743, // n0x0375 c0x0000 (---------------) + I ngo - 0x00269dc3, // n0x0376 c0x0000 (---------------) + I nhk - 0x3aa01742, // n0x0377 c0x00ea (n0x166c-n0x167a) o I ni - 0x002a6604, // n0x0378 c0x0000 (---------------) + I nico - 0x0021e984, // n0x0379 c0x0000 (---------------) + I nike - 0x00202645, // n0x037a c0x0000 (---------------) + I nikon - 0x002cba05, // n0x037b c0x0000 (---------------) + I ninja - 0x0022a8c6, // n0x037c c0x0000 (---------------) + I nissan - 0x0022ac06, // n0x037d c0x0000 (---------------) + I nissay - 0x3ae473c2, // n0x037e c0x00eb (n0x167a-n0x167d) + I nl - 0x3b201342, // n0x037f c0x00ec (n0x167d-n0x1953) + I no - 0x0031f885, // n0x0380 c0x0000 (---------------) + I nokia - 0x0023d5d2, // n0x0381 c0x0000 (---------------) + I northwesternmutual - 0x0035ca06, // n0x0382 c0x0000 (---------------) + I norton - 0x00217883, // n0x0383 c0x0000 (---------------) + I now - 0x0029c886, // n0x0384 c0x0000 (---------------) + I nowruz - 0x00217885, // n0x0385 c0x0000 (---------------) + I nowtv - 0x01610bc2, // n0x0386 c0x0005 (---------------)* o I np - 0x4360ba82, // n0x0387 c0x010d (n0x197b-n0x1982) + I nr - 0x002d8183, // n0x0388 c0x0000 (---------------) + I nra - 0x002dbb03, // n0x0389 c0x0000 (---------------) + I nrw - 0x00370bc3, // n0x038a c0x0000 (---------------) + I ntt - 0x43a04fc2, // n0x038b c0x010e (n0x1982-n0x1985) + I nu - 0x0036c103, // n0x038c c0x0000 (---------------) + I nyc - 0x43e04ec2, // n0x038d c0x010f (n0x1985-n0x1995) + I nz - 0x00209543, // n0x038e c0x0000 (---------------) + I obi - 0x002c3c88, // n0x038f c0x0000 (---------------) + I observer - 0x0020a483, // n0x0390 c0x0000 (---------------) + I off - 0x00221946, // n0x0391 c0x0000 (---------------) + I office - 0x003955c7, // n0x0392 c0x0000 (---------------) + I okinawa - 0x00209bc6, // n0x0393 c0x0000 (---------------) + I olayan - 0x00209bcb, // n0x0394 c0x0000 (---------------) + I olayangroup - 0x0038a3c7, // n0x0395 c0x0000 (---------------) + I oldnavy - 0x003852c4, // n0x0396 c0x0000 (---------------) + I ollo - 0x44601382, // n0x0397 c0x0111 (n0x1996-n0x199f) + I om - 0x002e2a05, // n0x0398 c0x0000 (---------------) + I omega - 0x00213c03, // n0x0399 c0x0000 (---------------) + I one - 0x00268a83, // n0x039a c0x0000 (---------------) + I ong - 0x0031e6c3, // n0x039b c0x0000 (---------------) + I onl - 0x0031e6c6, // n0x039c c0x0000 (---------------) + I online - 0x0038a98a, // n0x039d c0x0000 (---------------) + I onyourside - 0x0028e5c3, // n0x039e c0x0000 (---------------) + I ooo - 0x0023dbc4, // n0x039f c0x0000 (---------------) + I open - 0x002244c6, // n0x03a0 c0x0000 (---------------) + I oracle - 0x00395d06, // n0x03a1 c0x0000 (---------------) + I orange - 0x44a25403, // n0x03a2 c0x0112 (n0x199f-n0x19dc) + I org - 0x002af387, // n0x03a3 c0x0000 (---------------) + I organic - 0x002db44d, // n0x03a4 c0x0000 (---------------) + I orientexpress - 0x0037e787, // n0x03a5 c0x0000 (---------------) + I origins - 0x0029a485, // n0x03a6 c0x0000 (---------------) + I osaka - 0x00262246, // n0x03a7 c0x0000 (---------------) + I otsuka - 0x00221403, // n0x03a8 c0x0000 (---------------) + I ott - 0x0020c183, // n0x03a9 c0x0000 (---------------) + I ovh - 0x46206642, // n0x03aa c0x0118 (n0x1a19-n0x1a24) + I pa - 0x00311884, // n0x03ab c0x0000 (---------------) + I page - 0x002399cc, // n0x03ac c0x0000 (---------------) + I pamperedchef - 0x00250149, // n0x03ad c0x0000 (---------------) + I panasonic - 0x0033a507, // n0x03ae c0x0000 (---------------) + I panerai - 0x0026a205, // n0x03af c0x0000 (---------------) + I paris - 0x00394a44, // n0x03b0 c0x0000 (---------------) + I pars - 0x002a3508, // n0x03b1 c0x0000 (---------------) + I partners - 0x002a4f85, // n0x03b2 c0x0000 (---------------) + I parts - 0x002ae5c5, // n0x03b3 c0x0000 (---------------) + I party - 0x002b8249, // n0x03b4 c0x0000 (---------------) + I passagens - 0x002bec43, // n0x03b5 c0x0000 (---------------) + I pay - 0x002bec44, // n0x03b6 c0x0000 (---------------) + I payu - 0x002cb7c4, // n0x03b7 c0x0000 (---------------) + I pccw - 0x46600582, // n0x03b8 c0x0119 (n0x1a24-n0x1a2c) + I pe - 0x00210243, // n0x03b9 c0x0000 (---------------) + I pet - 0x46add182, // n0x03ba c0x011a (n0x1a2c-n0x1a2f) + I pf - 0x002dd186, // n0x03bb c0x0000 (---------------) + I pfizer - 0x01630e02, // n0x03bc c0x0005 (---------------)* o I pg - 0x46e983c2, // n0x03bd c0x011b (n0x1a2f-n0x1a37) + I ph - 0x0034e788, // n0x03be c0x0000 (---------------) + I pharmacy - 0x002d6a47, // n0x03bf c0x0000 (---------------) + I philips - 0x002983c5, // n0x03c0 c0x0000 (---------------) + I photo - 0x002d70cb, // n0x03c1 c0x0000 (---------------) + I photography - 0x002d3dc6, // n0x03c2 c0x0000 (---------------) + I photos - 0x002d72c6, // n0x03c3 c0x0000 (---------------) + I physio - 0x002d7446, // n0x03c4 c0x0000 (---------------) + I piaget - 0x002251c4, // n0x03c5 c0x0000 (---------------) + I pics - 0x002d7a06, // n0x03c6 c0x0000 (---------------) + I pictet - 0x002d7ec8, // n0x03c7 c0x0000 (---------------) + I pictures - 0x00241c83, // n0x03c8 c0x0000 (---------------) + I pid - 0x002160c3, // n0x03c9 c0x0000 (---------------) + I pin - 0x002160c4, // n0x03ca c0x0000 (---------------) + I ping - 0x002d8744, // n0x03cb c0x0000 (---------------) + I pink - 0x002d8cc7, // n0x03cc c0x0000 (---------------) + I pioneer - 0x002d9745, // n0x03cd c0x0000 (---------------) + I pizza - 0x472d9882, // n0x03ce c0x011c (n0x1a37-n0x1a45) + I pk - 0x47604bc2, // n0x03cf c0x011d (n0x1a45-n0x1aea) + I pl - 0x00208e85, // n0x03d0 c0x0000 (---------------) + I place - 0x00298a44, // n0x03d1 c0x0000 (---------------) + I play - 0x002dadcb, // n0x03d2 c0x0000 (---------------) + I playstation - 0x002dd308, // n0x03d3 c0x0000 (---------------) + I plumbing - 0x002ddb84, // n0x03d4 c0x0000 (---------------) + I plus - 0x0020bec2, // n0x03d5 c0x0000 (---------------) + I pm - 0x47e30c02, // n0x03d6 c0x011f (n0x1b19-n0x1b1e) + I pn - 0x002aff83, // n0x03d7 c0x0000 (---------------) + I pnc - 0x002ddfc4, // n0x03d8 c0x0000 (---------------) + I pohl - 0x002de0c5, // n0x03d9 c0x0000 (---------------) + I poker - 0x002de607, // n0x03da c0x0000 (---------------) + I politie - 0x002e0384, // n0x03db c0x0000 (---------------) + I porn - 0x0035e784, // n0x03dc c0x0000 (---------------) + I post - 0x48207082, // n0x03dd c0x0120 (n0x1b1e-n0x1b2b) + I pr - 0x00357049, // n0x03de c0x0000 (---------------) + I pramerica - 0x002e0d85, // n0x03df c0x0000 (---------------) + I praxi - 0x002470c5, // n0x03e0 c0x0000 (---------------) + I press - 0x002e1985, // n0x03e1 c0x0000 (---------------) + I prime - 0x486210c3, // n0x03e2 c0x0121 (n0x1b2b-n0x1b32) + I pro - 0x002e2284, // n0x03e3 c0x0000 (---------------) + I prod - 0x002e228b, // n0x03e4 c0x0000 (---------------) + I productions - 0x002e4604, // n0x03e5 c0x0000 (---------------) + I prof - 0x002e488b, // n0x03e6 c0x0000 (---------------) + I progressive - 0x002e5905, // n0x03e7 c0x0000 (---------------) + I promo - 0x002210ca, // n0x03e8 c0x0000 (---------------) + I properties - 0x002e6088, // n0x03e9 c0x0000 (---------------) + I property - 0x002e628a, // n0x03ea c0x0000 (---------------) + I protection - 0x002e6503, // n0x03eb c0x0000 (---------------) + I pru - 0x002e650a, // n0x03ec c0x0000 (---------------) + I prudential - 0x48a25342, // n0x03ed c0x0122 (n0x1b32-n0x1b39) + I ps - 0x48e30382, // n0x03ee c0x0123 (n0x1b39-n0x1b42) + I pt - 0x0029e383, // n0x03ef c0x0000 (---------------) + I pub - 0x492e7b82, // n0x03f0 c0x0124 (n0x1b42-n0x1b48) + I pw - 0x002e7b83, // n0x03f1 c0x0000 (---------------) + I pwc - 0x49734942, // n0x03f2 c0x0125 (n0x1b48-n0x1b4f) + I py - 0x49b1b542, // n0x03f3 c0x0126 (n0x1b4f-n0x1b58) + I qa - 0x002e8684, // n0x03f4 c0x0000 (---------------) + I qpon - 0x0021b6c6, // n0x03f5 c0x0000 (---------------) + I quebec - 0x002b8b05, // n0x03f6 c0x0000 (---------------) + I quest - 0x002e8d03, // n0x03f7 c0x0000 (---------------) + I qvc - 0x0038e006, // n0x03f8 c0x0000 (---------------) + I racing - 0x0022be44, // n0x03f9 c0x0000 (---------------) + I raid - 0x49e06842, // n0x03fa c0x0127 (n0x1b58-n0x1b5c) + I re - 0x002d6484, // n0x03fb c0x0000 (---------------) + I read - 0x002c4f4a, // n0x03fc c0x0000 (---------------) + I realestate - 0x0033a907, // n0x03fd c0x0000 (---------------) + I realtor - 0x0030b5c6, // n0x03fe c0x0000 (---------------) + I realty - 0x00229947, // n0x03ff c0x0000 (---------------) + I recipes - 0x00239b03, // n0x0400 c0x0000 (---------------) + I red - 0x003a1748, // n0x0401 c0x0000 (---------------) + I redstone - 0x00339f4b, // n0x0402 c0x0000 (---------------) + I redumbrella - 0x0036b785, // n0x0403 c0x0000 (---------------) + I rehab - 0x002cdd05, // n0x0404 c0x0000 (---------------) + I reise - 0x002cdd06, // n0x0405 c0x0000 (---------------) + I reisen - 0x0033ccc4, // n0x0406 c0x0000 (---------------) + I reit - 0x0030ed08, // n0x0407 c0x0000 (---------------) + I reliance - 0x00206843, // n0x0408 c0x0000 (---------------) + I ren - 0x00206844, // n0x0409 c0x0000 (---------------) + I rent - 0x00206847, // n0x040a c0x0000 (---------------) + I rentals - 0x002b87c6, // n0x040b c0x0000 (---------------) + I repair - 0x002fb3c6, // n0x040c c0x0000 (---------------) + I report - 0x0029e30a, // n0x040d c0x0000 (---------------) + I republican - 0x0024b644, // n0x040e c0x0000 (---------------) + I rest - 0x0036528a, // n0x040f c0x0000 (---------------) + I restaurant - 0x00311446, // n0x0410 c0x0000 (---------------) + I review - 0x00311447, // n0x0411 c0x0000 (---------------) + I reviews - 0x002562c7, // n0x0412 c0x0000 (---------------) + I rexroth - 0x00272984, // n0x0413 c0x0000 (---------------) + I rich - 0x00272989, // n0x0414 c0x0000 (---------------) + I richardli - 0x002b10c5, // n0x0415 c0x0000 (---------------) + I ricoh - 0x00347a4b, // n0x0416 c0x0000 (---------------) + I rightathome - 0x0024a6c3, // n0x0417 c0x0000 (---------------) + I ril - 0x00200983, // n0x0418 c0x0000 (---------------) + I rio - 0x002254c3, // n0x0419 c0x0000 (---------------) + I rip - 0x002646c4, // n0x041a c0x0000 (---------------) + I rmit - 0x4a202082, // n0x041b c0x0128 (n0x1b5c-n0x1b68) + I ro - 0x00283946, // n0x041c c0x0000 (---------------) + I rocher - 0x0029f885, // n0x041d c0x0000 (---------------) + I rocks - 0x002d6005, // n0x041e c0x0000 (---------------) + I rodeo - 0x00208706, // n0x041f c0x0000 (---------------) + I rogers - 0x002f7584, // n0x0420 c0x0000 (---------------) + I room - 0x4a6006c2, // n0x0421 c0x0129 (n0x1b68-n0x1b6f) + I rs - 0x00208804, // n0x0422 c0x0000 (---------------) + I rsvp - 0x4aa0e0c2, // n0x0423 c0x012a (n0x1b6f-n0x1bf2) + I ru - 0x002371c4, // n0x0424 c0x0000 (---------------) + I ruhr - 0x00222e03, // n0x0425 c0x0000 (---------------) + I run - 0x4aedbb42, // n0x0426 c0x012b (n0x1bf2-n0x1bfb) + I rw - 0x0030f583, // n0x0427 c0x0000 (---------------) + I rwe - 0x002ad0c6, // n0x0428 c0x0000 (---------------) + I ryukyu - 0x4b200fc2, // n0x0429 c0x012c (n0x1bfb-n0x1c03) + I sa - 0x00315508, // n0x042a c0x0000 (---------------) + I saarland - 0x0039fa44, // n0x042b c0x0000 (---------------) + I safe - 0x0039fa46, // n0x042c c0x0000 (---------------) + I safety - 0x00306846, // n0x042d c0x0000 (---------------) + I sakura - 0x00257e44, // n0x042e c0x0000 (---------------) + I sale - 0x0030d1c5, // n0x042f c0x0000 (---------------) + I salon - 0x0036a7c8, // n0x0430 c0x0000 (---------------) + I samsclub - 0x0037fe87, // n0x0431 c0x0000 (---------------) + I samsung - 0x0024e447, // n0x0432 c0x0000 (---------------) + I sandvik - 0x0024e44f, // n0x0433 c0x0000 (---------------) + I sandvikcoromant - 0x00294186, // n0x0434 c0x0000 (---------------) + I sanofi - 0x002116c3, // n0x0435 c0x0000 (---------------) + I sap - 0x002116c4, // n0x0436 c0x0000 (---------------) + I sapo - 0x00221304, // n0x0437 c0x0000 (---------------) + I sarl - 0x0021fdc3, // n0x0438 c0x0000 (---------------) + I sas - 0x00222504, // n0x0439 c0x0000 (---------------) + I save - 0x00385ac4, // n0x043a c0x0000 (---------------) + I saxo - 0x4b625382, // n0x043b c0x012d (n0x1c03-n0x1c08) + I sb - 0x0028aa83, // n0x043c c0x0000 (---------------) + I sbi - 0x0039c0c3, // n0x043d c0x0000 (---------------) + I sbs - 0x4ba17f42, // n0x043e c0x012e (n0x1c08-n0x1c0d) + I sc - 0x00237a83, // n0x043f c0x0000 (---------------) + I sca - 0x00314b03, // n0x0440 c0x0000 (---------------) + I scb - 0x00217f4a, // n0x0441 c0x0000 (---------------) + I schaeffler - 0x002e7f87, // n0x0442 c0x0000 (---------------) + I schmidt - 0x0022e98c, // n0x0443 c0x0000 (---------------) + I scholarships - 0x0022ec46, // n0x0444 c0x0000 (---------------) + I school - 0x00231a86, // n0x0445 c0x0000 (---------------) + I schule - 0x00234b07, // n0x0446 c0x0000 (---------------) + I schwarz - 0x00236947, // n0x0447 c0x0000 (---------------) + I science - 0x00242649, // n0x0448 c0x0000 (---------------) + I scjohnson - 0x0021c104, // n0x0449 c0x0000 (---------------) + I scor - 0x0038b284, // n0x044a c0x0000 (---------------) + I scot - 0x4be5bd42, // n0x044b c0x012f (n0x1c0d-n0x1c15) + I sd - 0x4c202342, // n0x044c c0x0130 (n0x1c15-n0x1c3e) + I se - 0x0031dc84, // n0x044d c0x0000 (---------------) + I seat - 0x003235c6, // n0x044e c0x0000 (---------------) + I secure - 0x00236dc8, // n0x044f c0x0000 (---------------) + I security - 0x00278dc4, // n0x0450 c0x0000 (---------------) + I seek - 0x00259946, // n0x0451 c0x0000 (---------------) + I select - 0x002cd2c5, // n0x0452 c0x0000 (---------------) + I sener - 0x00202348, // n0x0453 c0x0000 (---------------) + I services - 0x00207143, // n0x0454 c0x0000 (---------------) + I ses - 0x00246705, // n0x0455 c0x0000 (---------------) + I seven - 0x00339043, // n0x0456 c0x0000 (---------------) + I sew - 0x002471c3, // n0x0457 c0x0000 (---------------) + I sex - 0x002471c4, // n0x0458 c0x0000 (---------------) + I sexy - 0x0035d183, // n0x0459 c0x0000 (---------------) + I sfr - 0x4c669ac2, // n0x045a c0x0131 (n0x1c3e-n0x1c45) + I sg - 0x4ca01202, // n0x045b c0x0132 (n0x1c45-n0x1c4c) + I sh - 0x0024a589, // n0x045c c0x0000 (---------------) + I shangrila - 0x00250045, // n0x045d c0x0000 (---------------) + I sharp - 0x00251f84, // n0x045e c0x0000 (---------------) + I shaw - 0x00255c85, // n0x045f c0x0000 (---------------) + I shell - 0x0020e644, // n0x0460 c0x0000 (---------------) + I shia - 0x002f94c7, // n0x0461 c0x0000 (---------------) + I shiksha - 0x00385785, // n0x0462 c0x0000 (---------------) + I shoes - 0x002b1746, // n0x0463 c0x0000 (---------------) + I shouji - 0x002b25c4, // n0x0464 c0x0000 (---------------) + I show - 0x002b35c8, // n0x0465 c0x0000 (---------------) + I showtime - 0x002b4b87, // n0x0466 c0x0000 (---------------) + I shriram - 0x4ce03342, // n0x0467 c0x0133 (n0x1c4c-n0x1c4d) + I si - 0x00357404, // n0x0468 c0x0000 (---------------) + I silk - 0x002f6304, // n0x0469 c0x0000 (---------------) + I sina - 0x00286f07, // n0x046a c0x0000 (---------------) + I singles - 0x00242c44, // n0x046b c0x0000 (---------------) + I site - 0x0024dd42, // n0x046c c0x0000 (---------------) + I sj - 0x4d20d642, // n0x046d c0x0134 (n0x1c4d-n0x1c4e) + I sk - 0x002302c3, // n0x046e c0x0000 (---------------) + I ski - 0x00379f84, // n0x046f c0x0000 (---------------) + I skin - 0x00237943, // n0x0470 c0x0000 (---------------) + I sky - 0x00237945, // n0x0471 c0x0000 (---------------) + I skype - 0x4d617682, // n0x0472 c0x0135 (n0x1c4e-n0x1c53) + I sl - 0x002cadc5, // n0x0473 c0x0000 (---------------) + I sling - 0x00211982, // n0x0474 c0x0000 (---------------) + I sm - 0x00350c05, // n0x0475 c0x0000 (---------------) + I smart - 0x0035b8c5, // n0x0476 c0x0000 (---------------) + I smile - 0x4da13542, // n0x0477 c0x0136 (n0x1c53-n0x1c5b) + I sn - 0x00213544, // n0x0478 c0x0000 (---------------) + I sncf - 0x4de08102, // n0x0479 c0x0137 (n0x1c5b-n0x1c5e) + I so - 0x0030f086, // n0x047a c0x0000 (---------------) + I soccer - 0x002a0d06, // n0x047b c0x0000 (---------------) + I social - 0x00266e48, // n0x047c c0x0000 (---------------) + I softbank - 0x002ba8c8, // n0x047d c0x0000 (---------------) + I software - 0x002f8484, // n0x047e c0x0000 (---------------) + I sohu - 0x002da345, // n0x047f c0x0000 (---------------) + I solar - 0x002e5049, // n0x0480 c0x0000 (---------------) + I solutions - 0x00328944, // n0x0481 c0x0000 (---------------) + I song - 0x0038a944, // n0x0482 c0x0000 (---------------) + I sony - 0x00228503, // n0x0483 c0x0000 (---------------) + I soy - 0x00206605, // n0x0484 c0x0000 (---------------) + I space - 0x0036eb87, // n0x0485 c0x0000 (---------------) + I spiegel - 0x0029b7c4, // n0x0486 c0x0000 (---------------) + I spot - 0x00332c4d, // n0x0487 c0x0000 (---------------) + I spreadbetting - 0x00334d82, // n0x0488 c0x0000 (---------------) + I sr - 0x00334d83, // n0x0489 c0x0000 (---------------) + I srl - 0x00336483, // n0x048a c0x0000 (---------------) + I srt - 0x4e202c02, // n0x048b c0x0138 (n0x1c5e-n0x1c6a) + I st - 0x0037ba45, // n0x048c c0x0000 (---------------) + I stada - 0x00231907, // n0x048d c0x0000 (---------------) + I staples - 0x00226e04, // n0x048e c0x0000 (---------------) + I star - 0x00226e07, // n0x048f c0x0000 (---------------) + I starhub - 0x002038c9, // n0x0490 c0x0000 (---------------) + I statebank - 0x002c5089, // n0x0491 c0x0000 (---------------) + I statefarm - 0x002f0a47, // n0x0492 c0x0000 (---------------) + I statoil - 0x00276583, // n0x0493 c0x0000 (---------------) + I stc - 0x00276588, // n0x0494 c0x0000 (---------------) + I stcgroup - 0x00299649, // n0x0495 c0x0000 (---------------) + I stockholm - 0x003580c7, // n0x0496 c0x0000 (---------------) + I storage - 0x00363685, // n0x0497 c0x0000 (---------------) + I store - 0x002e91c6, // n0x0498 c0x0000 (---------------) + I studio - 0x002e9345, // n0x0499 c0x0000 (---------------) + I study - 0x003392c5, // n0x049a c0x0000 (---------------) + I style - 0x4e600702, // n0x049b c0x0139 (n0x1c6a-n0x1c8a) + I su - 0x00271b85, // n0x049c c0x0000 (---------------) + I sucks - 0x002bc84a, // n0x049d c0x0000 (---------------) + I supersport - 0x002c0e48, // n0x049e c0x0000 (---------------) + I supplies - 0x002a7306, // n0x049f c0x0000 (---------------) + I supply - 0x002e5b47, // n0x04a0 c0x0000 (---------------) + I support - 0x00245984, // n0x04a1 c0x0000 (---------------) + I surf - 0x0029ad87, // n0x04a2 c0x0000 (---------------) + I surgery - 0x002ebc46, // n0x04a3 c0x0000 (---------------) + I suzuki - 0x4ea08842, // n0x04a4 c0x013a (n0x1c8a-n0x1c8f) + I sv - 0x00373586, // n0x04a5 c0x0000 (---------------) + I swatch - 0x002ee00a, // n0x04a6 c0x0000 (---------------) + I swiftcover - 0x002ee9c5, // n0x04a7 c0x0000 (---------------) + I swiss - 0x4eeeefc2, // n0x04a8 c0x013b (n0x1c8f-n0x1c90) + I sx - 0x4f283b42, // n0x04a9 c0x013c (n0x1c90-n0x1c96) + I sy - 0x00337c86, // n0x04aa c0x0000 (---------------) + I sydney - 0x002ab0c8, // n0x04ab c0x0000 (---------------) + I symantec - 0x00394507, // n0x04ac c0x0000 (---------------) + I systems - 0x4f6000c2, // n0x04ad c0x013d (n0x1c96-n0x1c99) + I sz - 0x0020dc83, // n0x04ae c0x0000 (---------------) + I tab - 0x002004c6, // n0x04af c0x0000 (---------------) + I taipei - 0x0021fa04, // n0x04b0 c0x0000 (---------------) + I talk - 0x00395486, // n0x04b1 c0x0000 (---------------) + I taobao - 0x00322386, // n0x04b2 c0x0000 (---------------) + I target - 0x0030d94a, // n0x04b3 c0x0000 (---------------) + I tatamotors - 0x0036a545, // n0x04b4 c0x0000 (---------------) + I tatar - 0x0020f486, // n0x04b5 c0x0000 (---------------) + I tattoo - 0x0021db43, // n0x04b6 c0x0000 (---------------) + I tax - 0x0021db44, // n0x04b7 c0x0000 (---------------) + I taxi - 0x00206ec2, // n0x04b8 c0x0000 (---------------) + I tc - 0x00309803, // n0x04b9 c0x0000 (---------------) + I tci - 0x4fa0a382, // n0x04ba c0x013e (n0x1c99-n0x1c9a) + I td - 0x002cb903, // n0x04bb c0x0000 (---------------) + I tdk - 0x00365f04, // n0x04bc c0x0000 (---------------) + I team - 0x002ab204, // n0x04bd c0x0000 (---------------) + I tech - 0x002ab20a, // n0x04be c0x0000 (---------------) + I technology - 0x0022a4c3, // n0x04bf c0x0000 (---------------) + I tel - 0x00285188, // n0x04c0 c0x0000 (---------------) + I telecity - 0x00317e0a, // n0x04c1 c0x0000 (---------------) + I telefonica - 0x002403c7, // n0x04c2 c0x0000 (---------------) + I temasek - 0x002f2606, // n0x04c3 c0x0000 (---------------) + I tennis - 0x00331104, // n0x04c4 c0x0000 (---------------) + I teva - 0x0026eac2, // n0x04c5 c0x0000 (---------------) + I tf - 0x002076c2, // n0x04c6 c0x0000 (---------------) + I tg - 0x4fe03e02, // n0x04c7 c0x013f (n0x1c9a-n0x1ca1) + I th - 0x00249443, // n0x04c8 c0x0000 (---------------) + I thd - 0x00256087, // n0x04c9 c0x0000 (---------------) + I theater - 0x0038e2c7, // n0x04ca c0x0000 (---------------) + I theatre - 0x0035160b, // n0x04cb c0x0000 (---------------) + I theguardian - 0x0034e644, // n0x04cc c0x0000 (---------------) + I tiaa - 0x002f4647, // n0x04cd c0x0000 (---------------) + I tickets - 0x002de706, // n0x04ce c0x0000 (---------------) + I tienda - 0x0039f807, // n0x04cf c0x0000 (---------------) + I tiffany - 0x002e7ec4, // n0x04d0 c0x0000 (---------------) + I tips - 0x00352745, // n0x04d1 c0x0000 (---------------) + I tires - 0x002bcdc5, // n0x04d2 c0x0000 (---------------) + I tirol - 0x50224e02, // n0x04d3 c0x0140 (n0x1ca1-n0x1cb0) + I tj - 0x00269306, // n0x04d4 c0x0000 (---------------) + I tjmaxx - 0x0036c5c3, // n0x04d5 c0x0000 (---------------) + I tjx - 0x00219bc2, // n0x04d6 c0x0000 (---------------) + I tk - 0x0022ef46, // n0x04d7 c0x0000 (---------------) + I tkmaxx - 0x5060f842, // n0x04d8 c0x0141 (n0x1cb0-n0x1cb1) + I tl - 0x50a00c42, // n0x04d9 c0x0142 (n0x1cb1-n0x1cb9) + I tm - 0x00200c45, // n0x04da c0x0000 (---------------) + I tmall - 0x50e2e342, // n0x04db c0x0143 (n0x1cb9-n0x1ccd) + I tn - 0x512042c2, // n0x04dc c0x0144 (n0x1ccd-n0x1cd3) + I to - 0x00261b05, // n0x04dd c0x0000 (---------------) + I today - 0x00344a85, // n0x04de c0x0000 (---------------) + I tokyo - 0x0020f545, // n0x04df c0x0000 (---------------) + I tools - 0x002042c3, // n0x04e0 c0x0000 (---------------) + I top - 0x00288445, // n0x04e1 c0x0000 (---------------) + I toray - 0x002d3e87, // n0x04e2 c0x0000 (---------------) + I toshiba - 0x00259c05, // n0x04e3 c0x0000 (---------------) + I total - 0x002fd745, // n0x04e4 c0x0000 (---------------) + I tours - 0x002d7904, // n0x04e5 c0x0000 (---------------) + I town - 0x0025a486, // n0x04e6 c0x0000 (---------------) + I toyota - 0x00266684, // n0x04e7 c0x0000 (---------------) + I toys - 0x51604882, // n0x04e8 c0x0145 (n0x1cd3-n0x1ce8) + I tr - 0x00263685, // n0x04e9 c0x0000 (---------------) + I trade - 0x002a4287, // n0x04ea c0x0000 (---------------) + I trading - 0x00305148, // n0x04eb c0x0000 (---------------) + I training - 0x0029b886, // n0x04ec c0x0000 (---------------) + I travel - 0x0029b88d, // n0x04ed c0x0000 (---------------) + I travelchannel - 0x002a1b09, // n0x04ee c0x0000 (---------------) + I travelers - 0x002a1b12, // n0x04ef c0x0000 (---------------) + I travelersinsurance - 0x0032cec5, // n0x04f0 c0x0000 (---------------) + I trust - 0x00344d43, // n0x04f1 c0x0000 (---------------) + I trv - 0x5220d982, // n0x04f2 c0x0148 (n0x1cea-n0x1cfb) + I tt - 0x002e6b04, // n0x04f3 c0x0000 (---------------) + I tube - 0x002f8283, // n0x04f4 c0x0000 (---------------) + I tui - 0x002ef445, // n0x04f5 c0x0000 (---------------) + I tunes - 0x002f0285, // n0x04f6 c0x0000 (---------------) + I tushu - 0x5260cd02, // n0x04f7 c0x0149 (n0x1cfb-n0x1cff) + I tv - 0x00363603, // n0x04f8 c0x0000 (---------------) + I tvs - 0x52a48342, // n0x04f9 c0x014a (n0x1cff-n0x1d0d) + I tw - 0x52e20842, // n0x04fa c0x014b (n0x1d0d-n0x1d19) + I tz - 0x5321dc82, // n0x04fb c0x014c (n0x1d19-n0x1d68) + I ua - 0x0033f5c5, // n0x04fc c0x0000 (---------------) + I ubank - 0x00254643, // n0x04fd c0x0000 (---------------) + I ubs - 0x00228bc8, // n0x04fe c0x0000 (---------------) + I uconnect - 0x536054c2, // n0x04ff c0x014d (n0x1d68-n0x1d71) + I ug - 0x53a01ac2, // n0x0500 c0x014e (n0x1d71-n0x1d7c) + I uk - 0x002a65c6, // n0x0501 c0x0000 (---------------) + I unicom - 0x0030c34a, // n0x0502 c0x0000 (---------------) + I university - 0x0020bc03, // n0x0503 c0x0000 (---------------) + I uno - 0x00258343, // n0x0504 c0x0000 (---------------) + I uol - 0x002cd843, // n0x0505 c0x0000 (---------------) + I ups - 0x54602202, // n0x0506 c0x0151 (n0x1d7e-n0x1dbd) + I us - 0x62a05002, // n0x0507 c0x018a (n0x1e60-n0x1e66) + I uy - 0x63209182, // n0x0508 c0x018c (n0x1e67-n0x1e6b) + I uz - 0x00200bc2, // n0x0509 c0x0000 (---------------) + I va - 0x00373389, // n0x050a c0x0000 (---------------) + I vacations - 0x002bf144, // n0x050b c0x0000 (---------------) + I vana - 0x0028a748, // n0x050c c0x0000 (---------------) + I vanguard - 0x636e8d42, // n0x050d c0x018d (n0x1e6b-n0x1e71) + I vc - 0x63a03042, // n0x050e c0x018e (n0x1e71-n0x1e82) + I ve - 0x00268d05, // n0x050f c0x0000 (---------------) + I vegas - 0x0023b248, // n0x0510 c0x0000 (---------------) + I ventures - 0x002ee1c8, // n0x0511 c0x0000 (---------------) + I verisign - 0x00393b0c, // n0x0512 c0x0000 (---------------) + I versicherung - 0x00230403, // n0x0513 c0x0000 (---------------) + I vet - 0x0025de02, // n0x0514 c0x0000 (---------------) + I vg - 0x63e02402, // n0x0515 c0x018f (n0x1e82-n0x1e87) + I vi - 0x002c7f86, // n0x0516 c0x0000 (---------------) + I viajes - 0x002f3485, // n0x0517 c0x0000 (---------------) + I video - 0x00336fc3, // n0x0518 c0x0000 (---------------) + I vig - 0x0033b586, // n0x0519 c0x0000 (---------------) + I viking - 0x002f35c6, // n0x051a c0x0000 (---------------) + I villas - 0x00228303, // n0x051b c0x0000 (---------------) + I vin - 0x002f6243, // n0x051c c0x0000 (---------------) + I vip - 0x002f7746, // n0x051d c0x0000 (---------------) + I virgin - 0x002f7cc4, // n0x051e c0x0000 (---------------) + I visa - 0x0027a946, // n0x051f c0x0000 (---------------) + I vision - 0x002cb4c5, // n0x0520 c0x0000 (---------------) + I vista - 0x002f804a, // n0x0521 c0x0000 (---------------) + I vistaprint - 0x00241384, // n0x0522 c0x0000 (---------------) + I viva - 0x002f9d44, // n0x0523 c0x0000 (---------------) + I vivo - 0x00348aca, // n0x0524 c0x0000 (---------------) + I vlaanderen - 0x642e1f42, // n0x0525 c0x0190 (n0x1e87-n0x1e94) + I vn - 0x0026bc05, // n0x0526 c0x0000 (---------------) + I vodka - 0x002fbc4a, // n0x0527 c0x0000 (---------------) + I volkswagen - 0x002fc8c5, // n0x0528 c0x0000 (---------------) + I volvo - 0x002fd444, // n0x0529 c0x0000 (---------------) + I vote - 0x002fd546, // n0x052a c0x0000 (---------------) + I voting - 0x002fd6c4, // n0x052b c0x0000 (---------------) + I voto - 0x00226b06, // n0x052c c0x0000 (---------------) + I voyage - 0x6462e282, // n0x052d c0x0191 (n0x1e94-n0x1e98) + I vu - 0x0030af06, // n0x052e c0x0000 (---------------) + I vuelos - 0x0030bfc5, // n0x052f c0x0000 (---------------) + I wales - 0x00201c07, // n0x0530 c0x0000 (---------------) + I walmart - 0x002934c6, // n0x0531 c0x0000 (---------------) + I walter - 0x00234d04, // n0x0532 c0x0000 (---------------) + I wang - 0x0023ee87, // n0x0533 c0x0000 (---------------) + I wanggou - 0x0036c2c6, // n0x0534 c0x0000 (---------------) + I warman - 0x002af885, // n0x0535 c0x0000 (---------------) + I watch - 0x003a2987, // n0x0536 c0x0000 (---------------) + I watches - 0x00390f87, // n0x0537 c0x0000 (---------------) + I weather - 0x00390f8e, // n0x0538 c0x0000 (---------------) + I weatherchannel - 0x00221cc6, // n0x0539 c0x0000 (---------------) + I webcam - 0x00356405, // n0x053a c0x0000 (---------------) + I weber - 0x002c2cc7, // n0x053b c0x0000 (---------------) + I website - 0x002ed303, // n0x053c c0x0000 (---------------) + I wed - 0x003337c7, // n0x053d c0x0000 (---------------) + I wedding - 0x0020f305, // n0x053e c0x0000 (---------------) + I weibo - 0x00210a84, // n0x053f c0x0000 (---------------) + I weir - 0x0022d042, // n0x0540 c0x0000 (---------------) + I wf - 0x0033b107, // n0x0541 c0x0000 (---------------) + I whoswho - 0x002ebb44, // n0x0542 c0x0000 (---------------) + I wien - 0x0032da44, // n0x0543 c0x0000 (---------------) + I wiki - 0x00258ecb, // n0x0544 c0x0000 (---------------) + I williamhill - 0x0021cfc3, // n0x0545 c0x0000 (---------------) + I win - 0x002d0c07, // n0x0546 c0x0000 (---------------) + I windows - 0x0021cfc4, // n0x0547 c0x0000 (---------------) + I wine - 0x002b3447, // n0x0548 c0x0000 (---------------) + I winners - 0x00231483, // n0x0549 c0x0000 (---------------) + I wme - 0x0032f44d, // n0x054a c0x0000 (---------------) + I wolterskluwer - 0x0037c288, // n0x054b c0x0000 (---------------) + I woodside - 0x0024fbc4, // n0x054c c0x0000 (---------------) + I work - 0x0033ad45, // n0x054d c0x0000 (---------------) + I works - 0x00300805, // n0x054e c0x0000 (---------------) + I world - 0x002feb03, // n0x054f c0x0000 (---------------) + I wow - 0x64a06402, // n0x0550 c0x0192 (n0x1e98-n0x1e9f) + I ws - 0x002ffa03, // n0x0551 c0x0000 (---------------) + I wtc - 0x002ffec3, // n0x0552 c0x0000 (---------------) + I wtf - 0x002184c4, // n0x0553 c0x0000 (---------------) + I xbox - 0x0027b0c5, // n0x0554 c0x0000 (---------------) + I xerox - 0x00218587, // n0x0555 c0x0000 (---------------) + I xfinity - 0x0021dbc6, // n0x0556 c0x0000 (---------------) + I xihuan - 0x00365c43, // n0x0557 c0x0000 (---------------) + I xin - 0x0036c64b, // n0x0558 c0x0000 (---------------) + I xn--11b4c3d - 0x0022f08b, // n0x0559 c0x0000 (---------------) + I xn--1ck2e1b - 0x0026600b, // n0x055a c0x0000 (---------------) + I xn--1qqw23a - 0x0027b1ca, // n0x055b c0x0000 (---------------) + I xn--30rr7y - 0x002a6c8b, // n0x055c c0x0000 (---------------) + I xn--3bst00m - 0x002dab0b, // n0x055d c0x0000 (---------------) + I xn--3ds443g - 0x002d6dcc, // n0x055e c0x0000 (---------------) + I xn--3e0b707e - 0x002ef011, // n0x055f c0x0000 (---------------) + I xn--3oq18vl8pn36a - 0x0033c68a, // n0x0560 c0x0000 (---------------) + I xn--3pxu8k - 0x0034b88b, // n0x0561 c0x0000 (---------------) + I xn--42c2d9a - 0x0036de4b, // n0x0562 c0x0000 (---------------) + I xn--45brj9c - 0x003a234a, // n0x0563 c0x0000 (---------------) + I xn--45q11c - 0x003a4d8a, // n0x0564 c0x0000 (---------------) + I xn--4gbrim - 0x00300c0d, // n0x0565 c0x0000 (---------------) + I xn--4gq48lf9j - 0x0030204e, // n0x0566 c0x0000 (---------------) + I xn--54b7fta0cc - 0x003025cb, // n0x0567 c0x0000 (---------------) + I xn--55qw42g - 0x0030288a, // n0x0568 c0x0000 (---------------) + I xn--55qx5d - 0x00303c11, // n0x0569 c0x0000 (---------------) + I xn--5su34j936bgsg - 0x0030404a, // n0x056a c0x0000 (---------------) + I xn--5tzm5g - 0x0030454b, // n0x056b c0x0000 (---------------) + I xn--6frz82g - 0x00304a8e, // n0x056c c0x0000 (---------------) + I xn--6qq986b3xl - 0x0030574c, // n0x056d c0x0000 (---------------) + I xn--80adxhks - 0x00305d8b, // n0x056e c0x0000 (---------------) + I xn--80ao21a - 0x0030604e, // n0x056f c0x0000 (---------------) + I xn--80aqecdr1a - 0x003063cc, // n0x0570 c0x0000 (---------------) + I xn--80asehdb - 0x0030784a, // n0x0571 c0x0000 (---------------) + I xn--80aswg - 0x0030868c, // n0x0572 c0x0000 (---------------) + I xn--8y0a063a - 0x64f0898a, // n0x0573 c0x0193 (n0x1e9f-n0x1ea5) + I xn--90a3ac - 0x00314d49, // n0x0574 c0x0000 (---------------) + I xn--90ais - 0x0031630a, // n0x0575 c0x0000 (---------------) + I xn--9dbq2a - 0x0031658a, // n0x0576 c0x0000 (---------------) + I xn--9et52u - 0x0031680b, // n0x0577 c0x0000 (---------------) + I xn--9krt00a - 0x00318cce, // n0x0578 c0x0000 (---------------) + I xn--b4w605ferd - 0x00319051, // n0x0579 c0x0000 (---------------) + I xn--bck1b9a5dre4c - 0x0031fdc9, // n0x057a c0x0000 (---------------) + I xn--c1avg - 0x0032000a, // n0x057b c0x0000 (---------------) + I xn--c2br7g - 0x00320e0b, // n0x057c c0x0000 (---------------) + I xn--cck2b3b - 0x003227ca, // n0x057d c0x0000 (---------------) + I xn--cg4bki - 0x00322f56, // n0x057e c0x0000 (---------------) + I xn--clchc0ea0b2g2a9gcd - 0x0032474b, // n0x057f c0x0000 (---------------) + I xn--czr694b - 0x003265ca, // n0x0580 c0x0000 (---------------) + I xn--czrs0t - 0x00326e0a, // n0x0581 c0x0000 (---------------) + I xn--czru2d - 0x0032a78b, // n0x0582 c0x0000 (---------------) + I xn--d1acj3b - 0x0032c949, // n0x0583 c0x0000 (---------------) + I xn--d1alf - 0x0032fdcd, // n0x0584 c0x0000 (---------------) + I xn--eckvdtc9d - 0x0033044b, // n0x0585 c0x0000 (---------------) + I xn--efvy88h - 0x0033134b, // n0x0586 c0x0000 (---------------) + I xn--estv75g - 0x00331d0b, // n0x0587 c0x0000 (---------------) + I xn--fct429k - 0x003322c9, // n0x0588 c0x0000 (---------------) + I xn--fhbei - 0x0033290e, // n0x0589 c0x0000 (---------------) + I xn--fiq228c5hs - 0x00332f8a, // n0x058a c0x0000 (---------------) + I xn--fiq64b - 0x003346ca, // n0x058b c0x0000 (---------------) + I xn--fiqs8s - 0x00334b4a, // n0x058c c0x0000 (---------------) + I xn--fiqz9s - 0x0033518b, // n0x058d c0x0000 (---------------) + I xn--fjq720a - 0x003359cb, // n0x058e c0x0000 (---------------) + I xn--flw351e - 0x00335c8d, // n0x058f c0x0000 (---------------) + I xn--fpcrj9c3d - 0x0033750d, // n0x0590 c0x0000 (---------------) + I xn--fzc2c9e2c - 0x00337e10, // n0x0591 c0x0000 (---------------) + I xn--fzys8d69uvgm - 0x003382cb, // n0x0592 c0x0000 (---------------) + I xn--g2xx48c - 0x0033878c, // n0x0593 c0x0000 (---------------) + I xn--gckr3f0f - 0x0033940b, // n0x0594 c0x0000 (---------------) + I xn--gecrj9c - 0x0033d48b, // n0x0595 c0x0000 (---------------) + I xn--gk3at1e - 0x0033f88b, // n0x0596 c0x0000 (---------------) + I xn--h2brj9c - 0x003431cb, // n0x0597 c0x0000 (---------------) + I xn--hxt814e - 0x00343c4f, // n0x0598 c0x0000 (---------------) + I xn--i1b6b1a6a2e - 0x0034400b, // n0x0599 c0x0000 (---------------) + I xn--imr513n - 0x0034500a, // n0x059a c0x0000 (---------------) + I xn--io0a7i - 0x00345d09, // n0x059b c0x0000 (---------------) + I xn--j1aef - 0x003460c9, // n0x059c c0x0000 (---------------) + I xn--j1amh - 0x00346ccb, // n0x059d c0x0000 (---------------) + I xn--j6w193g - 0x00346f8e, // n0x059e c0x0000 (---------------) + I xn--jlq61u9w7b - 0x0034914b, // n0x059f c0x0000 (---------------) + I xn--jvr189m - 0x0034a44f, // n0x05a0 c0x0000 (---------------) + I xn--kcrx77d1x4a - 0x0034c3cb, // n0x05a1 c0x0000 (---------------) + I xn--kprw13d - 0x0034c68b, // n0x05a2 c0x0000 (---------------) + I xn--kpry57d - 0x0034c94b, // n0x05a3 c0x0000 (---------------) + I xn--kpu716f - 0x0034d2ca, // n0x05a4 c0x0000 (---------------) + I xn--kput3i - 0x00352a49, // n0x05a5 c0x0000 (---------------) + I xn--l1acc - 0x0035524f, // n0x05a6 c0x0000 (---------------) + I xn--lgbbat1ad8j - 0x00359fcc, // n0x05a7 c0x0000 (---------------) + I xn--mgb2ddes - 0x0035a44c, // n0x05a8 c0x0000 (---------------) + I xn--mgb9awbf - 0x0035a90e, // n0x05a9 c0x0000 (---------------) + I xn--mgba3a3ejt - 0x0035ae4f, // n0x05aa c0x0000 (---------------) + I xn--mgba3a4f16a - 0x0035b20e, // n0x05ab c0x0000 (---------------) + I xn--mgba3a4fra - 0x0035bd10, // n0x05ac c0x0000 (---------------) + I xn--mgba7c0bbn0a - 0x0035c10f, // n0x05ad c0x0000 (---------------) + I xn--mgbaakc7dvf - 0x0035d9ce, // n0x05ae c0x0000 (---------------) + I xn--mgbaam7a8h - 0x0035de8c, // n0x05af c0x0000 (---------------) + I xn--mgbab2bd - 0x0035e192, // n0x05b0 c0x0000 (---------------) + I xn--mgbai9a5eva00b - 0x0035fb11, // n0x05b1 c0x0000 (---------------) + I xn--mgbai9azgqp6j - 0x003600ce, // n0x05b2 c0x0000 (---------------) + I xn--mgbayh7gpa - 0x0036050e, // n0x05b3 c0x0000 (---------------) + I xn--mgbb9fbpob - 0x00360a4e, // n0x05b4 c0x0000 (---------------) + I xn--mgbbh1a71e - 0x00360dcf, // n0x05b5 c0x0000 (---------------) + I xn--mgbc0a9azcg - 0x0036118e, // n0x05b6 c0x0000 (---------------) + I xn--mgbca7dzdo - 0x00361693, // n0x05b7 c0x0000 (---------------) + I xn--mgberp4a5d4a87g - 0x00361b51, // n0x05b8 c0x0000 (---------------) + I xn--mgberp4a5d4ar - 0x00361f8e, // n0x05b9 c0x0000 (---------------) + I xn--mgbi4ecexp - 0x0036240c, // n0x05ba c0x0000 (---------------) + I xn--mgbpl2fh - 0x00362853, // n0x05bb c0x0000 (---------------) + I xn--mgbqly7c0a67fbc - 0x00362fd0, // n0x05bc c0x0000 (---------------) + I xn--mgbqly7cvafr - 0x0036390c, // n0x05bd c0x0000 (---------------) + I xn--mgbt3dhd - 0x00363c0c, // n0x05be c0x0000 (---------------) + I xn--mgbtf8fl - 0x0036414b, // n0x05bf c0x0000 (---------------) + I xn--mgbtx2b - 0x0036460e, // n0x05c0 c0x0000 (---------------) + I xn--mgbx4cd0ab - 0x00364b0b, // n0x05c1 c0x0000 (---------------) + I xn--mix082f - 0x00364ecb, // n0x05c2 c0x0000 (---------------) + I xn--mix891f - 0x003662cc, // n0x05c3 c0x0000 (---------------) + I xn--mk1bu44c - 0x0036cc4a, // n0x05c4 c0x0000 (---------------) + I xn--mxtq1m - 0x0036d64c, // n0x05c5 c0x0000 (---------------) + I xn--ngbc5azd - 0x0036d94c, // n0x05c6 c0x0000 (---------------) + I xn--ngbe9e0a - 0x0036dc49, // n0x05c7 c0x0000 (---------------) + I xn--ngbrx - 0x0036f60b, // n0x05c8 c0x0000 (---------------) + I xn--nnx388a - 0x0036f8c8, // n0x05c9 c0x0000 (---------------) + I xn--node - 0x0036fd89, // n0x05ca c0x0000 (---------------) + I xn--nqv7f - 0x0036fd8f, // n0x05cb c0x0000 (---------------) + I xn--nqv7fs00ema - 0x0037170b, // n0x05cc c0x0000 (---------------) + I xn--nyqy26a - 0x003722ca, // n0x05cd c0x0000 (---------------) + I xn--o3cw4h - 0x00373a0c, // n0x05ce c0x0000 (---------------) + I xn--ogbpf8fl - 0x003755c9, // n0x05cf c0x0000 (---------------) + I xn--p1acf - 0x00375848, // n0x05d0 c0x0000 (---------------) + I xn--p1ai - 0x00375a4b, // n0x05d1 c0x0000 (---------------) + I xn--pbt977c - 0x0037738b, // n0x05d2 c0x0000 (---------------) + I xn--pgbs0dh - 0x00377f8a, // n0x05d3 c0x0000 (---------------) + I xn--pssy2u - 0x0037820b, // n0x05d4 c0x0000 (---------------) + I xn--q9jyb4c - 0x0037924c, // n0x05d5 c0x0000 (---------------) + I xn--qcka1pmc - 0x00379d08, // n0x05d6 c0x0000 (---------------) + I xn--qxam - 0x0037c48b, // n0x05d7 c0x0000 (---------------) + I xn--rhqv96g - 0x0037ec4b, // n0x05d8 c0x0000 (---------------) + I xn--rovu88b - 0x00382dcb, // n0x05d9 c0x0000 (---------------) + I xn--s9brj9c - 0x003846cb, // n0x05da c0x0000 (---------------) + I xn--ses554g - 0x00390b0b, // n0x05db c0x0000 (---------------) + I xn--t60b56a - 0x00390dc9, // n0x05dc c0x0000 (---------------) + I xn--tckwe - 0x0039130d, // n0x05dd c0x0000 (---------------) + I xn--tiq49xqyj - 0x0039638a, // n0x05de c0x0000 (---------------) + I xn--unup4y - 0x003972d7, // n0x05df c0x0000 (---------------) + I xn--vermgensberater-ctb - 0x00398118, // n0x05e0 c0x0000 (---------------) + I xn--vermgensberatung-pwb - 0x0039c409, // n0x05e1 c0x0000 (---------------) + I xn--vhquv - 0x0039d60b, // n0x05e2 c0x0000 (---------------) + I xn--vuq861b - 0x0039e1d4, // n0x05e3 c0x0000 (---------------) + I xn--w4r85el8fhu5dnra - 0x0039e6cb, // n0x05e4 c0x0000 (---------------) + I xn--w4rs40l - 0x0039ec4a, // n0x05e5 c0x0000 (---------------) + I xn--wgbh1c - 0x0039f20a, // n0x05e6 c0x0000 (---------------) + I xn--wgbl6a - 0x0039f48b, // n0x05e7 c0x0000 (---------------) + I xn--xhq521b - 0x003a0050, // n0x05e8 c0x0000 (---------------) + I xn--xkc2al3hye2a - 0x003a0451, // n0x05e9 c0x0000 (---------------) + I xn--xkc2dl3a5ee0h - 0x003a0d0a, // n0x05ea c0x0000 (---------------) + I xn--y9a3aq - 0x003a194d, // n0x05eb c0x0000 (---------------) + I xn--yfro4i67o - 0x003a204d, // n0x05ec c0x0000 (---------------) + I xn--ygbi2ammx - 0x003a514b, // n0x05ed c0x0000 (---------------) + I xn--zfr164b - 0x003a5906, // n0x05ee c0x0000 (---------------) + I xperia - 0x00269403, // n0x05ef c0x0000 (---------------) + I xxx - 0x00247243, // n0x05f0 c0x0000 (---------------) + I xyz - 0x0030aa46, // n0x05f1 c0x0000 (---------------) + I yachts - 0x0028e505, // n0x05f2 c0x0000 (---------------) + I yahoo - 0x002ca1c7, // n0x05f3 c0x0000 (---------------) + I yamaxun - 0x0033c546, // n0x05f4 c0x0000 (---------------) + I yandex - 0x0161d542, // n0x05f5 c0x0005 (---------------)* o I ye - 0x0036e189, // n0x05f6 c0x0000 (---------------) + I yodobashi - 0x0038de04, // n0x05f7 c0x0000 (---------------) + I yoga - 0x00357948, // n0x05f8 c0x0000 (---------------) + I yokohama - 0x00249383, // n0x05f9 c0x0000 (---------------) + I you - 0x002e6a47, // n0x05fa c0x0000 (---------------) + I youtube - 0x00215d42, // n0x05fb c0x0000 (---------------) + I yt - 0x002acb43, // n0x05fc c0x0000 (---------------) + I yun - 0x65200182, // n0x05fd c0x0194 (n0x1ea5-n0x1eb6) o I za - 0x002c6b86, // n0x05fe c0x0000 (---------------) + I zappos - 0x002c7584, // n0x05ff c0x0000 (---------------) + I zara - 0x00311f04, // n0x0600 c0x0000 (---------------) + I zero - 0x0023c943, // n0x0601 c0x0000 (---------------) + I zip - 0x0023c945, // n0x0602 c0x0000 (---------------) + I zippo - 0x01700982, // n0x0603 c0x0005 (---------------)* o I zm - 0x002ddec4, // n0x0604 c0x0000 (---------------) + I zone - 0x002728c7, // n0x0605 c0x0000 (---------------) + I zuerich - 0x016d0bc2, // n0x0606 c0x0005 (---------------)* o I zw - 0x00234803, // n0x0607 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0608 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x0609 c0x0000 (---------------) + I gov - 0x002119c3, // n0x060a c0x0000 (---------------) + I mil - 0x002207c3, // n0x060b c0x0000 (---------------) + I net - 0x00225403, // n0x060c c0x0000 (---------------) + I org - 0x00201343, // n0x060d c0x0000 (---------------) + I nom - 0x00200342, // n0x060e c0x0000 (---------------) + I ac - 0x000ff148, // n0x060f c0x0000 (---------------) + blogspot - 0x0020a442, // n0x0610 c0x0000 (---------------) + I co - 0x0027c5c3, // n0x0611 c0x0000 (---------------) + I gov - 0x002119c3, // n0x0612 c0x0000 (---------------) + I mil - 0x002207c3, // n0x0613 c0x0000 (---------------) + I net - 0x00225403, // n0x0614 c0x0000 (---------------) + I org - 0x00217f43, // n0x0615 c0x0000 (---------------) + I sch - 0x0031d296, // n0x0616 c0x0000 (---------------) + I accident-investigation - 0x0031edd3, // n0x0617 c0x0000 (---------------) + I accident-prevention - 0x002f44c9, // n0x0618 c0x0000 (---------------) + I aerobatic - 0x00232588, // n0x0619 c0x0000 (---------------) + I aeroclub - 0x002e2889, // n0x061a c0x0000 (---------------) + I aerodrome - 0x002fbdc6, // n0x061b c0x0000 (---------------) + I agents - 0x00315950, // n0x061c c0x0000 (---------------) + I air-surveillance - 0x00341e93, // n0x061d c0x0000 (---------------) + I air-traffic-control - 0x00207388, // n0x061e c0x0000 (---------------) + I aircraft - 0x0024c3c7, // n0x061f c0x0000 (---------------) + I airline - 0x00278047, // n0x0620 c0x0000 (---------------) + I airport - 0x0028778a, // n0x0621 c0x0000 (---------------) + I airtraffic - 0x002b4e89, // n0x0622 c0x0000 (---------------) + I ambulance - 0x0032c289, // n0x0623 c0x0000 (---------------) + I amusement - 0x002ceb4b, // n0x0624 c0x0000 (---------------) + I association - 0x003216c6, // n0x0625 c0x0000 (---------------) + I author - 0x0022ba4a, // n0x0626 c0x0000 (---------------) + I ballooning - 0x00220ec6, // n0x0627 c0x0000 (---------------) + I broker - 0x0038da03, // n0x0628 c0x0000 (---------------) + I caa - 0x002e3005, // n0x0629 c0x0000 (---------------) + I cargo - 0x003785c8, // n0x062a c0x0000 (---------------) + I catering - 0x0030f14d, // n0x062b c0x0000 (---------------) + I certification - 0x00341a4c, // n0x062c c0x0000 (---------------) + I championship - 0x0030bc87, // n0x062d c0x0000 (---------------) + I charter - 0x00358f8d, // n0x062e c0x0000 (---------------) + I civilaviation - 0x00232684, // n0x062f c0x0000 (---------------) + I club - 0x00237d4a, // n0x0630 c0x0000 (---------------) + I conference - 0x00238c4a, // n0x0631 c0x0000 (---------------) + I consultant - 0x0023910a, // n0x0632 c0x0000 (---------------) + I consulting - 0x0032a087, // n0x0633 c0x0000 (---------------) + I control - 0x00242307, // n0x0634 c0x0000 (---------------) + I council - 0x00244344, // n0x0635 c0x0000 (---------------) + I crew - 0x00226286, // n0x0636 c0x0000 (---------------) + I design - 0x00321a84, // n0x0637 c0x0000 (---------------) + I dgca - 0x002f8c08, // n0x0638 c0x0000 (---------------) + I educator - 0x00253a09, // n0x0639 c0x0000 (---------------) + I emergency - 0x0033f006, // n0x063a c0x0000 (---------------) + I engine - 0x0033f008, // n0x063b c0x0000 (---------------) + I engineer - 0x0024768d, // n0x063c c0x0000 (---------------) + I entertainment - 0x002c52c9, // n0x063d c0x0000 (---------------) + I equipment - 0x00233048, // n0x063e c0x0000 (---------------) + I exchange - 0x00247047, // n0x063f c0x0000 (---------------) + I express - 0x0031808a, // n0x0640 c0x0000 (---------------) + I federation - 0x0024f6c6, // n0x0641 c0x0000 (---------------) + I flight - 0x0025ae47, // n0x0642 c0x0000 (---------------) + I freight - 0x00240004, // n0x0643 c0x0000 (---------------) + I fuel - 0x00261387, // n0x0644 c0x0000 (---------------) + I gliding - 0x0027c5ca, // n0x0645 c0x0000 (---------------) + I government - 0x002dc5ce, // n0x0646 c0x0000 (---------------) + I groundhandling - 0x00209d45, // n0x0647 c0x0000 (---------------) + I group - 0x002fe84b, // n0x0648 c0x0000 (---------------) + I hanggliding - 0x002e3349, // n0x0649 c0x0000 (---------------) + I homebuilt - 0x00250849, // n0x064a c0x0000 (---------------) + I insurance - 0x00279f07, // n0x064b c0x0000 (---------------) + I journal - 0x0037f20a, // n0x064c c0x0000 (---------------) + I journalist - 0x00286e47, // n0x064d c0x0000 (---------------) + I leasing - 0x002e4e49, // n0x064e c0x0000 (---------------) + I logistics - 0x00395a48, // n0x064f c0x0000 (---------------) + I magazine - 0x0026be8b, // n0x0650 c0x0000 (---------------) + I maintenance - 0x00302485, // n0x0651 c0x0000 (---------------) + I media - 0x002dc2ca, // n0x0652 c0x0000 (---------------) + I microlight - 0x002a2fc9, // n0x0653 c0x0000 (---------------) + I modelling - 0x0039a70a, // n0x0654 c0x0000 (---------------) + I navigation - 0x002c7b8b, // n0x0655 c0x0000 (---------------) + I parachuting - 0x0026128b, // n0x0656 c0x0000 (---------------) + I paragliding - 0x002ce8d5, // n0x0657 c0x0000 (---------------) + I passenger-association - 0x002d8445, // n0x0658 c0x0000 (---------------) + I pilot - 0x002470c5, // n0x0659 c0x0000 (---------------) + I press - 0x002e228a, // n0x065a c0x0000 (---------------) + I production - 0x003236ca, // n0x065b c0x0000 (---------------) + I recreation - 0x0022d4c7, // n0x065c c0x0000 (---------------) + I repbody - 0x0021e583, // n0x065d c0x0000 (---------------) + I res - 0x0029e648, // n0x065e c0x0000 (---------------) + I research - 0x002cf3ca, // n0x065f c0x0000 (---------------) + I rotorcraft - 0x0039fa46, // n0x0660 c0x0000 (---------------) + I safety - 0x00240f09, // n0x0661 c0x0000 (---------------) + I scientist - 0x00202348, // n0x0662 c0x0000 (---------------) + I services - 0x002b25c4, // n0x0663 c0x0000 (---------------) + I show - 0x00268809, // n0x0664 c0x0000 (---------------) + I skydiving - 0x002ba8c8, // n0x0665 c0x0000 (---------------) + I software - 0x002a9647, // n0x0666 c0x0000 (---------------) + I student - 0x00263686, // n0x0667 c0x0000 (---------------) + I trader - 0x002a4287, // n0x0668 c0x0000 (---------------) + I trading - 0x00294d47, // n0x0669 c0x0000 (---------------) + I trainer - 0x00243d85, // n0x066a c0x0000 (---------------) + I union - 0x002dcecc, // n0x066b c0x0000 (---------------) + I workinggroup - 0x0033ad45, // n0x066c c0x0000 (---------------) + I works - 0x00234803, // n0x066d c0x0000 (---------------) + I com - 0x0023a1c3, // n0x066e c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x066f c0x0000 (---------------) + I gov - 0x002207c3, // n0x0670 c0x0000 (---------------) + I net - 0x00225403, // n0x0671 c0x0000 (---------------) + I org - 0x0020a442, // n0x0672 c0x0000 (---------------) + I co - 0x00234803, // n0x0673 c0x0000 (---------------) + I com - 0x002207c3, // n0x0674 c0x0000 (---------------) + I net - 0x00201343, // n0x0675 c0x0000 (---------------) + I nom - 0x00225403, // n0x0676 c0x0000 (---------------) + I org - 0x00234803, // n0x0677 c0x0000 (---------------) + I com - 0x002207c3, // n0x0678 c0x0000 (---------------) + I net - 0x0020a483, // n0x0679 c0x0000 (---------------) + I off - 0x00225403, // n0x067a c0x0000 (---------------) + I org - 0x000ff148, // n0x067b c0x0000 (---------------) + blogspot - 0x00234803, // n0x067c c0x0000 (---------------) + I com - 0x0023a1c3, // n0x067d c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x067e c0x0000 (---------------) + I gov - 0x002119c3, // n0x067f c0x0000 (---------------) + I mil - 0x002207c3, // n0x0680 c0x0000 (---------------) + I net - 0x00225403, // n0x0681 c0x0000 (---------------) + I org - 0x000ff148, // n0x0682 c0x0000 (---------------) + blogspot - 0x0020a442, // n0x0683 c0x0000 (---------------) + I co - 0x00202ac2, // n0x0684 c0x0000 (---------------) + I ed - 0x00239342, // n0x0685 c0x0000 (---------------) + I gv - 0x00201782, // n0x0686 c0x0000 (---------------) + I it - 0x002018c2, // n0x0687 c0x0000 (---------------) + I og - 0x0022d542, // n0x0688 c0x0000 (---------------) + I pb - 0x04634803, // n0x0689 c0x0011 (n0x0692-n0x0693) + I com - 0x0023a1c3, // n0x068a c0x0000 (---------------) + I edu - 0x00211543, // n0x068b c0x0000 (---------------) + I gob - 0x0027c5c3, // n0x068c c0x0000 (---------------) + I gov - 0x002014c3, // n0x068d c0x0000 (---------------) + I int - 0x002119c3, // n0x068e c0x0000 (---------------) + I mil - 0x002207c3, // n0x068f c0x0000 (---------------) + I net - 0x00225403, // n0x0690 c0x0000 (---------------) + I org - 0x0023b303, // n0x0691 c0x0000 (---------------) + I tur - 0x000ff148, // n0x0692 c0x0000 (---------------) + blogspot - 0x0025ba44, // n0x0693 c0x0000 (---------------) + I e164 - 0x00314307, // n0x0694 c0x0000 (---------------) + I in-addr - 0x0021bc03, // n0x0695 c0x0000 (---------------) + I ip6 - 0x002323c4, // n0x0696 c0x0000 (---------------) + I iris - 0x0020c483, // n0x0697 c0x0000 (---------------) + I uri - 0x00279f83, // n0x0698 c0x0000 (---------------) + I urn - 0x0027c5c3, // n0x0699 c0x0000 (---------------) + I gov - 0x00200342, // n0x069a c0x0000 (---------------) + I ac - 0x0012bd03, // n0x069b c0x0000 (---------------) + biz - 0x0560a442, // n0x069c c0x0015 (n0x06a1-n0x06a2) + I co - 0x00239342, // n0x069d c0x0000 (---------------) + I gv - 0x00001804, // n0x069e c0x0000 (---------------) + info - 0x00200d82, // n0x069f c0x0000 (---------------) + I or - 0x000e1e84, // n0x06a0 c0x0000 (---------------) + priv - 0x000ff148, // n0x06a1 c0x0000 (---------------) + blogspot - 0x002335c3, // n0x06a2 c0x0000 (---------------) + I act - 0x002d0a83, // n0x06a3 c0x0000 (---------------) + I asn - 0x05e34803, // n0x06a4 c0x0017 (n0x06b4-n0x06b5) + I com - 0x00237d44, // n0x06a5 c0x0000 (---------------) + I conf - 0x0623a1c3, // n0x06a6 c0x0018 (n0x06b5-n0x06bd) + I edu - 0x0667c5c3, // n0x06a7 c0x0019 (n0x06bd-n0x06c2) + I gov - 0x0020ae82, // n0x06a8 c0x0000 (---------------) + I id - 0x00201804, // n0x06a9 c0x0000 (---------------) + I info - 0x002207c3, // n0x06aa c0x0000 (---------------) + I net - 0x002ed403, // n0x06ab c0x0000 (---------------) + I nsw - 0x002008c2, // n0x06ac c0x0000 (---------------) + I nt - 0x00225403, // n0x06ad c0x0000 (---------------) + I org - 0x0021bd02, // n0x06ae c0x0000 (---------------) + I oz - 0x002e85c3, // n0x06af c0x0000 (---------------) + I qld - 0x00200fc2, // n0x06b0 c0x0000 (---------------) + I sa - 0x00205683, // n0x06b1 c0x0000 (---------------) + I tas - 0x00202403, // n0x06b2 c0x0000 (---------------) + I vic - 0x00201c02, // n0x06b3 c0x0000 (---------------) + I wa - 0x000ff148, // n0x06b4 c0x0000 (---------------) + blogspot - 0x002335c3, // n0x06b5 c0x0000 (---------------) + I act - 0x002ed403, // n0x06b6 c0x0000 (---------------) + I nsw - 0x002008c2, // n0x06b7 c0x0000 (---------------) + I nt - 0x002e85c3, // n0x06b8 c0x0000 (---------------) + I qld - 0x00200fc2, // n0x06b9 c0x0000 (---------------) + I sa - 0x00205683, // n0x06ba c0x0000 (---------------) + I tas - 0x00202403, // n0x06bb c0x0000 (---------------) + I vic - 0x00201c02, // n0x06bc c0x0000 (---------------) + I wa - 0x002e85c3, // n0x06bd c0x0000 (---------------) + I qld - 0x00200fc2, // n0x06be c0x0000 (---------------) + I sa - 0x00205683, // n0x06bf c0x0000 (---------------) + I tas - 0x00202403, // n0x06c0 c0x0000 (---------------) + I vic - 0x00201c02, // n0x06c1 c0x0000 (---------------) + I wa - 0x00234803, // n0x06c2 c0x0000 (---------------) + I com - 0x0032bd03, // n0x06c3 c0x0000 (---------------) + I biz - 0x00234803, // n0x06c4 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x06c5 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x06c6 c0x0000 (---------------) + I gov - 0x00201804, // n0x06c7 c0x0000 (---------------) + I info - 0x002014c3, // n0x06c8 c0x0000 (---------------) + I int - 0x002119c3, // n0x06c9 c0x0000 (---------------) + I mil - 0x00207d04, // n0x06ca c0x0000 (---------------) + I name - 0x002207c3, // n0x06cb c0x0000 (---------------) + I net - 0x00225403, // n0x06cc c0x0000 (---------------) + I org - 0x00210c02, // n0x06cd c0x0000 (---------------) + I pp - 0x002210c3, // n0x06ce c0x0000 (---------------) + I pro - 0x000ff148, // n0x06cf c0x0000 (---------------) + blogspot - 0x0020a442, // n0x06d0 c0x0000 (---------------) + I co - 0x00234803, // n0x06d1 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x06d2 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x06d3 c0x0000 (---------------) + I gov - 0x002119c3, // n0x06d4 c0x0000 (---------------) + I mil - 0x002207c3, // n0x06d5 c0x0000 (---------------) + I net - 0x00225403, // n0x06d6 c0x0000 (---------------) + I org - 0x002006c2, // n0x06d7 c0x0000 (---------------) + I rs - 0x0024c704, // n0x06d8 c0x0000 (---------------) + I unbi - 0x0039fd44, // n0x06d9 c0x0000 (---------------) + I unsa - 0x0032bd03, // n0x06da c0x0000 (---------------) + I biz - 0x0020a442, // n0x06db c0x0000 (---------------) + I co - 0x00234803, // n0x06dc c0x0000 (---------------) + I com - 0x0023a1c3, // n0x06dd c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x06de c0x0000 (---------------) + I gov - 0x00201804, // n0x06df c0x0000 (---------------) + I info - 0x002207c3, // n0x06e0 c0x0000 (---------------) + I net - 0x00225403, // n0x06e1 c0x0000 (---------------) + I org - 0x00363685, // n0x06e2 c0x0000 (---------------) + I store - 0x0020cd02, // n0x06e3 c0x0000 (---------------) + I tv - 0x00200342, // n0x06e4 c0x0000 (---------------) + I ac - 0x000ff148, // n0x06e5 c0x0000 (---------------) + blogspot - 0x0027c5c3, // n0x06e6 c0x0000 (---------------) + I gov - 0x00251d41, // n0x06e7 c0x0000 (---------------) + I 0 - 0x00229441, // n0x06e8 c0x0000 (---------------) + I 1 - 0x0022f241, // n0x06e9 c0x0000 (---------------) + I 2 - 0x00231001, // n0x06ea c0x0000 (---------------) + I 3 - 0x0025bb01, // n0x06eb c0x0000 (---------------) + I 4 - 0x00272641, // n0x06ec c0x0000 (---------------) + I 5 - 0x0021bc81, // n0x06ed c0x0000 (---------------) + I 6 - 0x00250401, // n0x06ee c0x0000 (---------------) + I 7 - 0x002ef201, // n0x06ef c0x0000 (---------------) + I 8 - 0x00300ec1, // n0x06f0 c0x0000 (---------------) + I 9 - 0x002001c1, // n0x06f1 c0x0000 (---------------) + I a - 0x00200001, // n0x06f2 c0x0000 (---------------) + I b - 0x000ff148, // n0x06f3 c0x0000 (---------------) + blogspot - 0x00200141, // n0x06f4 c0x0000 (---------------) + I c - 0x00200201, // n0x06f5 c0x0000 (---------------) + I d - 0x00200081, // n0x06f6 c0x0000 (---------------) + I e - 0x002016c1, // n0x06f7 c0x0000 (---------------) + I f - 0x00200281, // n0x06f8 c0x0000 (---------------) + I g - 0x002003c1, // n0x06f9 c0x0000 (---------------) + I h - 0x00200041, // n0x06fa c0x0000 (---------------) + I i - 0x00201081, // n0x06fb c0x0000 (---------------) + I j - 0x00201b01, // n0x06fc c0x0000 (---------------) + I k - 0x00200d01, // n0x06fd c0x0000 (---------------) + I l - 0x00200441, // n0x06fe c0x0000 (---------------) + I m - 0x00200781, // n0x06ff c0x0000 (---------------) + I n - 0x00200881, // n0x0700 c0x0000 (---------------) + I o - 0x00200581, // n0x0701 c0x0000 (---------------) + I p - 0x00200f01, // n0x0702 c0x0000 (---------------) + I q - 0x002006c1, // n0x0703 c0x0000 (---------------) + I r - 0x002000c1, // n0x0704 c0x0000 (---------------) + I s - 0x002004c1, // n0x0705 c0x0000 (---------------) + I t - 0x00200741, // n0x0706 c0x0000 (---------------) + I u - 0x00200bc1, // n0x0707 c0x0000 (---------------) + I v - 0x00201c01, // n0x0708 c0x0000 (---------------) + I w - 0x00206501, // n0x0709 c0x0000 (---------------) + I x - 0x00200241, // n0x070a c0x0000 (---------------) + I y - 0x00200101, // n0x070b c0x0000 (---------------) + I z - 0x00234803, // n0x070c c0x0000 (---------------) + I com - 0x0023a1c3, // n0x070d c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x070e c0x0000 (---------------) + I gov - 0x002207c3, // n0x070f c0x0000 (---------------) + I net - 0x00225403, // n0x0710 c0x0000 (---------------) + I org - 0x0020a442, // n0x0711 c0x0000 (---------------) + I co - 0x00234803, // n0x0712 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0713 c0x0000 (---------------) + I edu - 0x00200d82, // n0x0714 c0x0000 (---------------) + I or - 0x00225403, // n0x0715 c0x0000 (---------------) + I org - 0x00108d47, // n0x0716 c0x0000 (---------------) + dscloud - 0x00012586, // n0x0717 c0x0000 (---------------) + dyndns - 0x0005508a, // n0x0718 c0x0000 (---------------) + for-better - 0x00089748, // n0x0719 c0x0000 (---------------) + for-more - 0x00055688, // n0x071a c0x0000 (---------------) + for-some - 0x00055f87, // n0x071b c0x0000 (---------------) + for-the - 0x00067646, // n0x071c c0x0000 (---------------) + selfip - 0x00111746, // n0x071d c0x0000 (---------------) + webhop - 0x002ceb44, // n0x071e c0x0000 (---------------) + I asso - 0x00321087, // n0x071f c0x0000 (---------------) + I barreau - 0x000ff148, // n0x0720 c0x0000 (---------------) + blogspot - 0x0023ef84, // n0x0721 c0x0000 (---------------) + I gouv - 0x00234803, // n0x0722 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0723 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x0724 c0x0000 (---------------) + I gov - 0x002207c3, // n0x0725 c0x0000 (---------------) + I net - 0x00225403, // n0x0726 c0x0000 (---------------) + I org - 0x00234803, // n0x0727 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0728 c0x0000 (---------------) + I edu - 0x00211543, // n0x0729 c0x0000 (---------------) + I gob - 0x0027c5c3, // n0x072a c0x0000 (---------------) + I gov - 0x002014c3, // n0x072b c0x0000 (---------------) + I int - 0x002119c3, // n0x072c c0x0000 (---------------) + I mil - 0x002207c3, // n0x072d c0x0000 (---------------) + I net - 0x00225403, // n0x072e c0x0000 (---------------) + I org - 0x0020cd02, // n0x072f c0x0000 (---------------) + I tv - 0x002c60c3, // n0x0730 c0x0000 (---------------) + I adm - 0x002adf83, // n0x0731 c0x0000 (---------------) + I adv - 0x0024a8c3, // n0x0732 c0x0000 (---------------) + I agr - 0x00201ec2, // n0x0733 c0x0000 (---------------) + I am - 0x0024d0c3, // n0x0734 c0x0000 (---------------) + I arq - 0x00201d03, // n0x0735 c0x0000 (---------------) + I art - 0x00208643, // n0x0736 c0x0000 (---------------) + I ato - 0x00200001, // n0x0737 c0x0000 (---------------) + I b - 0x00205903, // n0x0738 c0x0000 (---------------) + I bio - 0x0022d7c4, // n0x0739 c0x0000 (---------------) + I blog - 0x00306a03, // n0x073a c0x0000 (---------------) + I bmd - 0x00309843, // n0x073b c0x0000 (---------------) + I cim - 0x0021b803, // n0x073c c0x0000 (---------------) + I cng - 0x0022eec3, // n0x073d c0x0000 (---------------) + I cnt - 0x0a234803, // n0x073e c0x0028 (n0x0776-n0x0777) + I com - 0x0023d404, // n0x073f c0x0000 (---------------) + I coop - 0x0021b7c3, // n0x0740 c0x0000 (---------------) + I ecn - 0x0020a403, // n0x0741 c0x0000 (---------------) + I eco - 0x0023a1c3, // n0x0742 c0x0000 (---------------) + I edu - 0x00239f03, // n0x0743 c0x0000 (---------------) + I emp - 0x00210703, // n0x0744 c0x0000 (---------------) + I eng - 0x0029b783, // n0x0745 c0x0000 (---------------) + I esp - 0x003097c3, // n0x0746 c0x0000 (---------------) + I etc - 0x00223783, // n0x0747 c0x0000 (---------------) + I eti - 0x0020fc83, // n0x0748 c0x0000 (---------------) + I far - 0x00250a84, // n0x0749 c0x0000 (---------------) + I flog - 0x00234ec2, // n0x074a c0x0000 (---------------) + I fm - 0x00254a03, // n0x074b c0x0000 (---------------) + I fnd - 0x0025a403, // n0x074c c0x0000 (---------------) + I fot - 0x00276543, // n0x074d c0x0000 (---------------) + I fst - 0x00314bc3, // n0x074e c0x0000 (---------------) + I g12 - 0x00312e43, // n0x074f c0x0000 (---------------) + I ggf - 0x0027c5c3, // n0x0750 c0x0000 (---------------) + I gov - 0x00320a43, // n0x0751 c0x0000 (---------------) + I imb - 0x0021e783, // n0x0752 c0x0000 (---------------) + I ind - 0x00201643, // n0x0753 c0x0000 (---------------) + I inf - 0x00218b43, // n0x0754 c0x0000 (---------------) + I jor - 0x002f0583, // n0x0755 c0x0000 (---------------) + I jus - 0x0022c983, // n0x0756 c0x0000 (---------------) + I leg - 0x002c1b43, // n0x0757 c0x0000 (---------------) + I lel - 0x00200443, // n0x0758 c0x0000 (---------------) + I mat - 0x002127c3, // n0x0759 c0x0000 (---------------) + I med - 0x002119c3, // n0x075a c0x0000 (---------------) + I mil - 0x00229802, // n0x075b c0x0000 (---------------) + I mp - 0x00280603, // n0x075c c0x0000 (---------------) + I mus - 0x002207c3, // n0x075d c0x0000 (---------------) + I net - 0x01601343, // n0x075e c0x0005 (---------------)* o I nom - 0x00259483, // n0x075f c0x0000 (---------------) + I not - 0x0023ae83, // n0x0760 c0x0000 (---------------) + I ntr - 0x0020cb43, // n0x0761 c0x0000 (---------------) + I odo - 0x00225403, // n0x0762 c0x0000 (---------------) + I org - 0x00230dc3, // n0x0763 c0x0000 (---------------) + I ppg - 0x002210c3, // n0x0764 c0x0000 (---------------) + I pro - 0x0022ec03, // n0x0765 c0x0000 (---------------) + I psc - 0x002f62c3, // n0x0766 c0x0000 (---------------) + I psi - 0x002e8783, // n0x0767 c0x0000 (---------------) + I qsl - 0x00262b05, // n0x0768 c0x0000 (---------------) + I radio - 0x00229943, // n0x0769 c0x0000 (---------------) + I rec - 0x002e87c3, // n0x076a c0x0000 (---------------) + I slg - 0x0033ebc3, // n0x076b c0x0000 (---------------) + I srv - 0x0021db44, // n0x076c c0x0000 (---------------) + I taxi - 0x00339cc3, // n0x076d c0x0000 (---------------) + I teo - 0x00239943, // n0x076e c0x0000 (---------------) + I tmp - 0x002a8fc3, // n0x076f c0x0000 (---------------) + I trd - 0x0023b303, // n0x0770 c0x0000 (---------------) + I tur - 0x0020cd02, // n0x0771 c0x0000 (---------------) + I tv - 0x00230403, // n0x0772 c0x0000 (---------------) + I vet - 0x002fab44, // n0x0773 c0x0000 (---------------) + I vlog - 0x0032da44, // n0x0774 c0x0000 (---------------) + I wiki - 0x0025a343, // n0x0775 c0x0000 (---------------) + I zlg - 0x000ff148, // n0x0776 c0x0000 (---------------) + blogspot - 0x00234803, // n0x0777 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0778 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x0779 c0x0000 (---------------) + I gov - 0x002207c3, // n0x077a c0x0000 (---------------) + I net - 0x00225403, // n0x077b c0x0000 (---------------) + I org - 0x00234803, // n0x077c c0x0000 (---------------) + I com - 0x0023a1c3, // n0x077d c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x077e c0x0000 (---------------) + I gov - 0x002207c3, // n0x077f c0x0000 (---------------) + I net - 0x00225403, // n0x0780 c0x0000 (---------------) + I org - 0x0020a442, // n0x0781 c0x0000 (---------------) + I co - 0x00225403, // n0x0782 c0x0000 (---------------) + I org - 0x0b634803, // n0x0783 c0x002d (n0x0787-n0x0788) + I com - 0x0027c5c3, // n0x0784 c0x0000 (---------------) + I gov - 0x002119c3, // n0x0785 c0x0000 (---------------) + I mil - 0x0020a482, // n0x0786 c0x0000 (---------------) + I of - 0x000ff148, // n0x0787 c0x0000 (---------------) + blogspot - 0x00234803, // n0x0788 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0789 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x078a c0x0000 (---------------) + I gov - 0x002207c3, // n0x078b c0x0000 (---------------) + I net - 0x00225403, // n0x078c c0x0000 (---------------) + I org - 0x00000182, // n0x078d c0x0000 (---------------) + za - 0x00200ac2, // n0x078e c0x0000 (---------------) + I ab - 0x00221d42, // n0x078f c0x0000 (---------------) + I bc - 0x000ff148, // n0x0790 c0x0000 (---------------) + blogspot - 0x0000a442, // n0x0791 c0x0000 (---------------) + co - 0x0023be02, // n0x0792 c0x0000 (---------------) + I gc - 0x0020b442, // n0x0793 c0x0000 (---------------) + I mb - 0x002144c2, // n0x0794 c0x0000 (---------------) + I nb - 0x00201682, // n0x0795 c0x0000 (---------------) + I nf - 0x002473c2, // n0x0796 c0x0000 (---------------) + I nl - 0x0020d602, // n0x0797 c0x0000 (---------------) + I ns - 0x002008c2, // n0x0798 c0x0000 (---------------) + I nt - 0x00204fc2, // n0x0799 c0x0000 (---------------) + I nu - 0x00200882, // n0x079a c0x0000 (---------------) + I on - 0x00200582, // n0x079b c0x0000 (---------------) + I pe - 0x00379342, // n0x079c c0x0000 (---------------) + I qc - 0x0020d642, // n0x079d c0x0000 (---------------) + I sk - 0x0022c4c2, // n0x079e c0x0000 (---------------) + I yk - 0x00147d09, // n0x079f c0x0000 (---------------) + ftpaccess - 0x00170fcb, // n0x07a0 c0x0000 (---------------) + game-server - 0x000d3d48, // n0x07a1 c0x0000 (---------------) + myphotos - 0x00043309, // n0x07a2 c0x0000 (---------------) + scrapping - 0x0027c5c3, // n0x07a3 c0x0000 (---------------) + I gov - 0x000ff148, // n0x07a4 c0x0000 (---------------) + blogspot - 0x000ff148, // n0x07a5 c0x0000 (---------------) + blogspot - 0x00200342, // n0x07a6 c0x0000 (---------------) + I ac - 0x002ceb44, // n0x07a7 c0x0000 (---------------) + I asso - 0x0020a442, // n0x07a8 c0x0000 (---------------) + I co - 0x00234803, // n0x07a9 c0x0000 (---------------) + I com - 0x00202ac2, // n0x07aa c0x0000 (---------------) + I ed - 0x0023a1c3, // n0x07ab c0x0000 (---------------) + I edu - 0x0020ec82, // n0x07ac c0x0000 (---------------) + I go - 0x0023ef84, // n0x07ad c0x0000 (---------------) + I gouv - 0x002014c3, // n0x07ae c0x0000 (---------------) + I int - 0x0024bb82, // n0x07af c0x0000 (---------------) + I md - 0x002207c3, // n0x07b0 c0x0000 (---------------) + I net - 0x00200d82, // n0x07b1 c0x0000 (---------------) + I or - 0x00225403, // n0x07b2 c0x0000 (---------------) + I org - 0x002470c6, // n0x07b3 c0x0000 (---------------) + I presse - 0x00316dcf, // n0x07b4 c0x0000 (---------------) + I xn--aroport-bya - 0x00700783, // n0x07b5 c0x0001 (---------------) ! I www - 0x000ff148, // n0x07b6 c0x0000 (---------------) + blogspot - 0x0020a442, // n0x07b7 c0x0000 (---------------) + I co - 0x00211543, // n0x07b8 c0x0000 (---------------) + I gob - 0x0027c5c3, // n0x07b9 c0x0000 (---------------) + I gov - 0x002119c3, // n0x07ba c0x0000 (---------------) + I mil - 0x0020a442, // n0x07bb c0x0000 (---------------) + I co - 0x00234803, // n0x07bc c0x0000 (---------------) + I com - 0x0027c5c3, // n0x07bd c0x0000 (---------------) + I gov - 0x002207c3, // n0x07be c0x0000 (---------------) + I net - 0x00200342, // n0x07bf c0x0000 (---------------) + I ac - 0x002034c2, // n0x07c0 c0x0000 (---------------) + I ah - 0x0e6f4209, // n0x07c1 c0x0039 (n0x07ec-n0x07ed) o I amazonaws - 0x00208fc2, // n0x07c2 c0x0000 (---------------) + I bj - 0x0ee34803, // n0x07c3 c0x003b (n0x07ee-n0x07ef) + I com - 0x00242d42, // n0x07c4 c0x0000 (---------------) + I cq - 0x0023a1c3, // n0x07c5 c0x0000 (---------------) + I edu - 0x00218b02, // n0x07c6 c0x0000 (---------------) + I fj - 0x00223002, // n0x07c7 c0x0000 (---------------) + I gd - 0x0027c5c3, // n0x07c8 c0x0000 (---------------) + I gov - 0x00296a42, // n0x07c9 c0x0000 (---------------) + I gs - 0x00251b02, // n0x07ca c0x0000 (---------------) + I gx - 0x0025a302, // n0x07cb c0x0000 (---------------) + I gz - 0x002037c2, // n0x07cc c0x0000 (---------------) + I ha - 0x0028dd82, // n0x07cd c0x0000 (---------------) + I hb - 0x00203282, // n0x07ce c0x0000 (---------------) + I he - 0x002003c2, // n0x07cf c0x0000 (---------------) + I hi - 0x00209a82, // n0x07d0 c0x0000 (---------------) + I hk - 0x00253642, // n0x07d1 c0x0000 (---------------) + I hl - 0x0020d342, // n0x07d2 c0x0000 (---------------) + I hn - 0x002acf82, // n0x07d3 c0x0000 (---------------) + I jl - 0x00231c82, // n0x07d4 c0x0000 (---------------) + I js - 0x0031a2c2, // n0x07d5 c0x0000 (---------------) + I jx - 0x00225f02, // n0x07d6 c0x0000 (---------------) + I ln - 0x002119c3, // n0x07d7 c0x0000 (---------------) + I mil - 0x00209502, // n0x07d8 c0x0000 (---------------) + I mo - 0x002207c3, // n0x07d9 c0x0000 (---------------) + I net - 0x0023d882, // n0x07da c0x0000 (---------------) + I nm - 0x00265fc2, // n0x07db c0x0000 (---------------) + I nx - 0x00225403, // n0x07dc c0x0000 (---------------) + I org - 0x0024d142, // n0x07dd c0x0000 (---------------) + I qh - 0x00217f42, // n0x07de c0x0000 (---------------) + I sc - 0x0025bd42, // n0x07df c0x0000 (---------------) + I sd - 0x00201202, // n0x07e0 c0x0000 (---------------) + I sh - 0x00213542, // n0x07e1 c0x0000 (---------------) + I sn - 0x002eefc2, // n0x07e2 c0x0000 (---------------) + I sx - 0x00224e02, // n0x07e3 c0x0000 (---------------) + I tj - 0x00248342, // n0x07e4 c0x0000 (---------------) + I tw - 0x00269482, // n0x07e5 c0x0000 (---------------) + I xj - 0x0030288a, // n0x07e6 c0x0000 (---------------) + I xn--55qx5d - 0x0034500a, // n0x07e7 c0x0000 (---------------) + I xn--io0a7i - 0x00372e0a, // n0x07e8 c0x0000 (---------------) + I xn--od0alg - 0x003a5a82, // n0x07e9 c0x0000 (---------------) + I xz - 0x0020f702, // n0x07ea c0x0000 (---------------) + I yn - 0x002472c2, // n0x07eb c0x0000 (---------------) + I zj - 0x0e8362c7, // n0x07ec c0x003a (n0x07ed-n0x07ee) + compute - 0x0003374a, // n0x07ed c0x0000 (---------------) + cn-north-1 - 0x0f2f4209, // n0x07ee c0x003c (n0x07ef-n0x07f0) o I amazonaws - 0x0f63374a, // n0x07ef c0x003d (n0x07f0-n0x07f1) o I cn-north-1 - 0x00030fc2, // n0x07f0 c0x0000 (---------------) + s3 - 0x0024a4c4, // n0x07f1 c0x0000 (---------------) + I arts - 0x0fe34803, // n0x07f2 c0x003f (n0x07fe-n0x07ff) + I com - 0x0023a1c3, // n0x07f3 c0x0000 (---------------) + I edu - 0x0024bac4, // n0x07f4 c0x0000 (---------------) + I firm - 0x0027c5c3, // n0x07f5 c0x0000 (---------------) + I gov - 0x00201804, // n0x07f6 c0x0000 (---------------) + I info - 0x002014c3, // n0x07f7 c0x0000 (---------------) + I int - 0x002119c3, // n0x07f8 c0x0000 (---------------) + I mil - 0x002207c3, // n0x07f9 c0x0000 (---------------) + I net - 0x00201343, // n0x07fa c0x0000 (---------------) + I nom - 0x00225403, // n0x07fb c0x0000 (---------------) + I org - 0x00229943, // n0x07fc c0x0000 (---------------) + I rec - 0x00221cc3, // n0x07fd c0x0000 (---------------) + I web - 0x000ff148, // n0x07fe c0x0000 (---------------) + blogspot - 0x00071105, // n0x07ff c0x0000 (---------------) + 1kapp - 0x00108602, // n0x0800 c0x0000 (---------------) + 4u - 0x001727c6, // n0x0801 c0x0000 (---------------) + africa - 0x106f4209, // n0x0802 c0x0041 (n0x08cf-n0x08e1) o I amazonaws - 0x00108f07, // n0x0803 c0x0000 (---------------) + appspot - 0x00000942, // n0x0804 c0x0000 (---------------) + ar - 0x0019d88a, // n0x0805 c0x0000 (---------------) + betainabox - 0x0002d7c7, // n0x0806 c0x0000 (---------------) + blogdns - 0x000ff148, // n0x0807 c0x0000 (---------------) + blogspot - 0x0001bfc2, // n0x0808 c0x0000 (---------------) + br - 0x0013a7c7, // n0x0809 c0x0000 (---------------) + cechire - 0x001946cf, // n0x080a c0x0000 (---------------) + cloudcontrolapp - 0x00129f4f, // n0x080b c0x0000 (---------------) + cloudcontrolled - 0x0001b802, // n0x080c c0x0000 (---------------) + cn - 0x0000a442, // n0x080d c0x0000 (---------------) + co - 0x0009b6c8, // n0x080e c0x0000 (---------------) + codespot - 0x00007802, // n0x080f c0x0000 (---------------) + de - 0x001456c8, // n0x0810 c0x0000 (---------------) + dnsalias - 0x0007bbc7, // n0x0811 c0x0000 (---------------) + dnsdojo - 0x000141cb, // n0x0812 c0x0000 (---------------) + doesntexist - 0x00168e09, // n0x0813 c0x0000 (---------------) + dontexist - 0x001455c7, // n0x0814 c0x0000 (---------------) + doomdns - 0x000f11cc, // n0x0815 c0x0000 (---------------) + dreamhosters - 0x0018bfc7, // n0x0816 c0x0000 (---------------) + dsmynas - 0x00125f0a, // n0x0817 c0x0000 (---------------) + dyn-o-saur - 0x00197988, // n0x0818 c0x0000 (---------------) + dynalias - 0x00072d4e, // n0x0819 c0x0000 (---------------) + dyndns-at-home - 0x000dcc4e, // n0x081a c0x0000 (---------------) + dyndns-at-work - 0x0002d60b, // n0x081b c0x0000 (---------------) + dyndns-blog - 0x000e940b, // n0x081c c0x0000 (---------------) + dyndns-free - 0x0001258b, // n0x081d c0x0000 (---------------) + dyndns-home - 0x0001ba49, // n0x081e c0x0000 (---------------) + dyndns-ip - 0x0001e08b, // n0x081f c0x0000 (---------------) + dyndns-mail - 0x0002178d, // n0x0820 c0x0000 (---------------) + dyndns-office - 0x0002500b, // n0x0821 c0x0000 (---------------) + dyndns-pics - 0x0002bf0d, // n0x0822 c0x0000 (---------------) + dyndns-remote - 0x0002cb8d, // n0x0823 c0x0000 (---------------) + dyndns-server - 0x0015624a, // n0x0824 c0x0000 (---------------) + dyndns-web - 0x0012d88b, // n0x0825 c0x0000 (---------------) + dyndns-wiki - 0x0013ab8b, // n0x0826 c0x0000 (---------------) + dyndns-work - 0x0001f710, // n0x0827 c0x0000 (---------------) + elasticbeanstalk - 0x000b8b8f, // n0x0828 c0x0000 (---------------) + est-a-la-maison - 0x0007044f, // n0x0829 c0x0000 (---------------) + est-a-la-masion - 0x0007cc0d, // n0x082a c0x0000 (---------------) + est-le-patron - 0x0007a510, // n0x082b c0x0000 (---------------) + est-mon-blogueur - 0x00007602, // n0x082c c0x0000 (---------------) + eu - 0x0018be48, // n0x082d c0x0000 (---------------) + familyds - 0x0004b00b, // n0x082e c0x0000 (---------------) + firebaseapp - 0x00052a48, // n0x082f c0x0000 (---------------) + flynnhub - 0x00060a07, // n0x0830 c0x0000 (---------------) + from-ak - 0x00060d47, // n0x0831 c0x0000 (---------------) + from-al - 0x00060f07, // n0x0832 c0x0000 (---------------) + from-ar - 0x00061547, // n0x0833 c0x0000 (---------------) + from-ca - 0x00062007, // n0x0834 c0x0000 (---------------) + from-ct - 0x000626c7, // n0x0835 c0x0000 (---------------) + from-dc - 0x000632c7, // n0x0836 c0x0000 (---------------) + from-de - 0x00063807, // n0x0837 c0x0000 (---------------) + from-fl - 0x00063d87, // n0x0838 c0x0000 (---------------) + from-ga - 0x00064107, // n0x0839 c0x0000 (---------------) + from-hi - 0x00064987, // n0x083a c0x0000 (---------------) + from-ia - 0x00064b47, // n0x083b c0x0000 (---------------) + from-id - 0x00064d07, // n0x083c c0x0000 (---------------) + from-il - 0x00064ec7, // n0x083d c0x0000 (---------------) + from-in - 0x000651c7, // n0x083e c0x0000 (---------------) + from-ks - 0x000655c7, // n0x083f c0x0000 (---------------) + from-ky - 0x00065e07, // n0x0840 c0x0000 (---------------) + from-ma - 0x000662c7, // n0x0841 c0x0000 (---------------) + from-md - 0x00066bc7, // n0x0842 c0x0000 (---------------) + from-mi - 0x00067947, // n0x0843 c0x0000 (---------------) + from-mn - 0x00067b07, // n0x0844 c0x0000 (---------------) + from-mo - 0x00067e07, // n0x0845 c0x0000 (---------------) + from-ms - 0x00068247, // n0x0846 c0x0000 (---------------) + from-mt - 0x00068447, // n0x0847 c0x0000 (---------------) + from-nc - 0x000696c7, // n0x0848 c0x0000 (---------------) + from-nd - 0x00069887, // n0x0849 c0x0000 (---------------) + from-ne - 0x00069c87, // n0x084a c0x0000 (---------------) + from-nh - 0x0006a007, // n0x084b c0x0000 (---------------) + from-nj - 0x0006a507, // n0x084c c0x0000 (---------------) + from-nm - 0x0006afc7, // n0x084d c0x0000 (---------------) + from-nv - 0x0006c307, // n0x084e c0x0000 (---------------) + from-oh - 0x0006c5c7, // n0x084f c0x0000 (---------------) + from-ok - 0x0006c947, // n0x0850 c0x0000 (---------------) + from-or - 0x0006cb07, // n0x0851 c0x0000 (---------------) + from-pa - 0x0006ce87, // n0x0852 c0x0000 (---------------) + from-pr - 0x0006d507, // n0x0853 c0x0000 (---------------) + from-ri - 0x0006d987, // n0x0854 c0x0000 (---------------) + from-sc - 0x0006dd87, // n0x0855 c0x0000 (---------------) + from-sd - 0x000720c7, // n0x0856 c0x0000 (---------------) + from-tn - 0x00072287, // n0x0857 c0x0000 (---------------) + from-tx - 0x000726c7, // n0x0858 c0x0000 (---------------) + from-ut - 0x000736c7, // n0x0859 c0x0000 (---------------) + from-va - 0x00073d07, // n0x085a c0x0000 (---------------) + from-vt - 0x00074007, // n0x085b c0x0000 (---------------) + from-wa - 0x000741c7, // n0x085c c0x0000 (---------------) + from-wi - 0x00074547, // n0x085d c0x0000 (---------------) + from-wv - 0x00075447, // n0x085e c0x0000 (---------------) + from-wy - 0x0000b902, // n0x085f c0x0000 (---------------) + gb - 0x000d7507, // n0x0860 c0x0000 (---------------) + getmyip - 0x000cc391, // n0x0861 c0x0000 (---------------) + githubusercontent - 0x000df7ca, // n0x0862 c0x0000 (---------------) + googleapis - 0x0009b54a, // n0x0863 c0x0000 (---------------) + googlecode - 0x000569c6, // n0x0864 c0x0000 (---------------) + gotdns - 0x0000ec8b, // n0x0865 c0x0000 (---------------) + gotpantheon - 0x00009d42, // n0x0866 c0x0000 (---------------) + gr - 0x00098609, // n0x0867 c0x0000 (---------------) + herokuapp - 0x00091c49, // n0x0868 c0x0000 (---------------) + herokussl - 0x00009a82, // n0x0869 c0x0000 (---------------) + hk - 0x0014e14a, // n0x086a c0x0000 (---------------) + hobby-site - 0x000a54c9, // n0x086b c0x0000 (---------------) + homelinux - 0x000a6ac8, // n0x086c c0x0000 (---------------) + homeunix - 0x0001dc42, // n0x086d c0x0000 (---------------) + hu - 0x0011e089, // n0x086e c0x0000 (---------------) + iamallama - 0x0016b38e, // n0x086f c0x0000 (---------------) + is-a-anarchist - 0x000a124c, // n0x0870 c0x0000 (---------------) + is-a-blogger - 0x000d55cf, // n0x0871 c0x0000 (---------------) + is-a-bookkeeper - 0x00187a8e, // n0x0872 c0x0000 (---------------) + is-a-bulls-fan - 0x0000c50c, // n0x0873 c0x0000 (---------------) + is-a-caterer - 0x0000fa89, // n0x0874 c0x0000 (---------------) + is-a-chef - 0x000130d1, // n0x0875 c0x0000 (---------------) + is-a-conservative - 0x00014c08, // n0x0876 c0x0000 (---------------) + is-a-cpa - 0x00017352, // n0x0877 c0x0000 (---------------) + is-a-cubicle-slave - 0x00024b0d, // n0x0878 c0x0000 (---------------) + is-a-democrat - 0x0002614d, // n0x0879 c0x0000 (---------------) + is-a-designer - 0x0008824b, // n0x087a c0x0000 (---------------) + is-a-doctor - 0x000adc15, // n0x087b c0x0000 (---------------) + is-a-financialadvisor - 0x000487c9, // n0x087c c0x0000 (---------------) + is-a-geek - 0x0004cd4a, // n0x087d c0x0000 (---------------) + is-a-green - 0x0004e1c9, // n0x087e c0x0000 (---------------) + is-a-guru - 0x0004f950, // n0x087f c0x0000 (---------------) + is-a-hard-worker - 0x0005a64b, // n0x0880 c0x0000 (---------------) + is-a-hunter - 0x0006024f, // n0x0881 c0x0000 (---------------) + is-a-landscaper - 0x0006d24b, // n0x0882 c0x0000 (---------------) + is-a-lawyer - 0x0006d68c, // n0x0883 c0x0000 (---------------) + is-a-liberal - 0x000732d0, // n0x0884 c0x0000 (---------------) + is-a-libertarian - 0x0007fb4a, // n0x0885 c0x0000 (---------------) + is-a-llama - 0x000804cd, // n0x0886 c0x0000 (---------------) + is-a-musician - 0x0008404e, // n0x0887 c0x0000 (---------------) + is-a-nascarfan - 0x0008a30a, // n0x0888 c0x0000 (---------------) + is-a-nurse - 0x001463cc, // n0x0889 c0x0000 (---------------) + is-a-painter - 0x00094a14, // n0x088a c0x0000 (---------------) + is-a-personaltrainer - 0x00098291, // n0x088b c0x0000 (---------------) + is-a-photographer - 0x0009890b, // n0x088c c0x0000 (---------------) + is-a-player - 0x0009e1cf, // n0x088d c0x0000 (---------------) + is-a-republican - 0x0009f74d, // n0x088e c0x0000 (---------------) + is-a-rockstar - 0x000a0bce, // n0x088f c0x0000 (---------------) + is-a-socialist - 0x000a950c, // n0x0890 c0x0000 (---------------) + is-a-student - 0x000aad0c, // n0x0891 c0x0000 (---------------) + is-a-teacher - 0x000ab7cb, // n0x0892 c0x0000 (---------------) + is-a-techie - 0x000abace, // n0x0893 c0x0000 (---------------) + is-a-therapist - 0x000ac2d0, // n0x0894 c0x0000 (---------------) + is-an-accountant - 0x000bc50b, // n0x0895 c0x0000 (---------------) + is-an-actor - 0x000d03cd, // n0x0896 c0x0000 (---------------) + is-an-actress - 0x000b188f, // n0x0897 c0x0000 (---------------) + is-an-anarchist - 0x000d510c, // n0x0898 c0x0000 (---------------) + is-an-artist - 0x0013ee8e, // n0x0899 c0x0000 (---------------) + is-an-engineer - 0x001037d1, // n0x089a c0x0000 (---------------) + is-an-entertainer - 0x000b528c, // n0x089b c0x0000 (---------------) + is-certified - 0x000bb687, // n0x089c c0x0000 (---------------) + is-gone - 0x000bbf4d, // n0x089d c0x0000 (---------------) + is-into-anime - 0x000bd84c, // n0x089e c0x0000 (---------------) + is-into-cars - 0x000fde10, // n0x089f c0x0000 (---------------) + is-into-cartoons - 0x0014950d, // n0x08a0 c0x0000 (---------------) + is-into-games - 0x0016d0c7, // n0x08a1 c0x0000 (---------------) + is-leet - 0x000f6b50, // n0x08a2 c0x0000 (---------------) + is-not-certified - 0x000f1bc8, // n0x08a3 c0x0000 (---------------) + is-slick - 0x000ead4b, // n0x08a4 c0x0000 (---------------) + is-uberleet - 0x0014524f, // n0x08a5 c0x0000 (---------------) + is-with-theband - 0x00085d48, // n0x08a6 c0x0000 (---------------) + isa-geek - 0x000df9cd, // n0x08a7 c0x0000 (---------------) + isa-hockeynut - 0x00150b90, // n0x08a8 c0x0000 (---------------) + issmarterthanyou - 0x000aff43, // n0x08a9 c0x0000 (---------------) + jpn - 0x000093c2, // n0x08aa c0x0000 (---------------) + kr - 0x00057409, // n0x08ab c0x0000 (---------------) + likes-pie - 0x00072b4a, // n0x08ac c0x0000 (---------------) + likescandy - 0x00007d83, // n0x08ad c0x0000 (---------------) + mex - 0x0011e7c8, // n0x08ae c0x0000 (---------------) + neat-url - 0x00121347, // n0x08af c0x0000 (---------------) + nfshost - 0x00001342, // n0x08b0 c0x0000 (---------------) + no - 0x00062c8a, // n0x08b1 c0x0000 (---------------) + operaunite - 0x0019444f, // n0x08b2 c0x0000 (---------------) + outsystemscloud - 0x0011188c, // n0x08b3 c0x0000 (---------------) + pagefrontapp - 0x00111b52, // n0x08b4 c0x0000 (---------------) + pagespeedmobilizer - 0x0011b543, // n0x08b5 c0x0000 (---------------) + qa2 - 0x00179342, // n0x08b6 c0x0000 (---------------) + qc - 0x00112b48, // n0x08b7 c0x0000 (---------------) + rackmaze - 0x00129ec7, // n0x08b8 c0x0000 (---------------) + rhcloud - 0x00002082, // n0x08b9 c0x0000 (---------------) + ro - 0x0000e0c2, // n0x08ba c0x0000 (---------------) + ru - 0x00000fc2, // n0x08bb c0x0000 (---------------) + sa - 0x00176390, // n0x08bc c0x0000 (---------------) + saves-the-whales - 0x00002342, // n0x08bd c0x0000 (---------------) + se - 0x00067646, // n0x08be c0x0000 (---------------) + selfip - 0x00045f8e, // n0x08bf c0x0000 (---------------) + sells-for-less - 0x0008d90b, // n0x08c0 c0x0000 (---------------) + sells-for-u - 0x000cd608, // n0x08c1 c0x0000 (---------------) + servebbs - 0x000c704a, // n0x08c2 c0x0000 (---------------) + simple-url - 0x000f6307, // n0x08c3 c0x0000 (---------------) + sinaapp - 0x0000660d, // n0x08c4 c0x0000 (---------------) + space-to-rent - 0x0018dc0c, // n0x08c5 c0x0000 (---------------) + teaches-yoga - 0x00001ac2, // n0x08c6 c0x0000 (---------------) + uk - 0x00002202, // n0x08c7 c0x0000 (---------------) + us - 0x00005002, // n0x08c8 c0x0000 (---------------) + uy - 0x000f624a, // n0x08c9 c0x0000 (---------------) + vipsinaapp - 0x000df6ca, // n0x08ca c0x0000 (---------------) + withgoogle - 0x000e694b, // n0x08cb c0x0000 (---------------) + withyoutube - 0x000feece, // n0x08cc c0x0000 (---------------) + writesthisblog - 0x000d9988, // n0x08cd c0x0000 (---------------) + yolasite - 0x00000182, // n0x08ce c0x0000 (---------------) + za - 0x108362c7, // n0x08cf c0x0042 (n0x08e1-n0x08ea) + compute - 0x10c362c9, // n0x08d0 c0x0043 (n0x08ea-n0x08ec) + compute-1 - 0x00010e03, // n0x08d1 c0x0000 (---------------) + elb - 0x1135f80c, // n0x08d2 c0x0044 (n0x08ec-n0x08ed) o I eu-central-1 - 0x00030fc2, // n0x08d3 c0x0000 (---------------) + s3 - 0x00071c91, // n0x08d4 c0x0000 (---------------) + s3-ap-northeast-1 - 0x00070d11, // n0x08d5 c0x0000 (---------------) + s3-ap-southeast-1 - 0x0012a351, // n0x08d6 c0x0000 (---------------) + s3-ap-southeast-2 - 0x0015f74f, // n0x08d7 c0x0000 (---------------) + s3-eu-central-1 - 0x00030fcc, // n0x08d8 c0x0000 (---------------) + s3-eu-west-1 - 0x000546cd, // n0x08d9 c0x0000 (---------------) + s3-external-1 - 0x0019b7cd, // n0x08da c0x0000 (---------------) + s3-external-2 - 0x000e40d5, // n0x08db c0x0000 (---------------) + s3-fips-us-gov-west-1 - 0x001262cc, // n0x08dc c0x0000 (---------------) + s3-sa-east-1 - 0x00148210, // n0x08dd c0x0000 (---------------) + s3-us-gov-west-1 - 0x000c414c, // n0x08de c0x0000 (---------------) + s3-us-west-1 - 0x000dd88c, // n0x08df c0x0000 (---------------) + s3-us-west-2 - 0x0012dfc9, // n0x08e0 c0x0000 (---------------) + us-east-1 - 0x00071d4e, // n0x08e1 c0x0000 (---------------) + ap-northeast-1 - 0x00070dce, // n0x08e2 c0x0000 (---------------) + ap-southeast-1 - 0x0012a40e, // n0x08e3 c0x0000 (---------------) + ap-southeast-2 - 0x0015f80c, // n0x08e4 c0x0000 (---------------) + eu-central-1 - 0x00031089, // n0x08e5 c0x0000 (---------------) + eu-west-1 - 0x00126389, // n0x08e6 c0x0000 (---------------) + sa-east-1 - 0x000e42cd, // n0x08e7 c0x0000 (---------------) + us-gov-west-1 - 0x000c4209, // n0x08e8 c0x0000 (---------------) + us-west-1 - 0x000dd949, // n0x08e9 c0x0000 (---------------) + us-west-2 - 0x000293c3, // n0x08ea c0x0000 (---------------) + z-1 - 0x00140d03, // n0x08eb c0x0000 (---------------) + z-2 - 0x00030fc2, // n0x08ec c0x0000 (---------------) + s3 - 0x00200342, // n0x08ed c0x0000 (---------------) + I ac - 0x0020a442, // n0x08ee c0x0000 (---------------) + I co - 0x00202ac2, // n0x08ef c0x0000 (---------------) + I ed - 0x002016c2, // n0x08f0 c0x0000 (---------------) + I fi - 0x0020ec82, // n0x08f1 c0x0000 (---------------) + I go - 0x00200d82, // n0x08f2 c0x0000 (---------------) + I or - 0x00200fc2, // n0x08f3 c0x0000 (---------------) + I sa - 0x00234803, // n0x08f4 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x08f5 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x08f6 c0x0000 (---------------) + I gov - 0x00201643, // n0x08f7 c0x0000 (---------------) + I inf - 0x002207c3, // n0x08f8 c0x0000 (---------------) + I net - 0x00225403, // n0x08f9 c0x0000 (---------------) + I org - 0x000ff148, // n0x08fa c0x0000 (---------------) + blogspot - 0x00234803, // n0x08fb c0x0000 (---------------) + I com - 0x0023a1c3, // n0x08fc c0x0000 (---------------) + I edu - 0x002207c3, // n0x08fd c0x0000 (---------------) + I net - 0x00225403, // n0x08fe c0x0000 (---------------) + I org - 0x0003e083, // n0x08ff c0x0000 (---------------) + ath - 0x0027c5c3, // n0x0900 c0x0000 (---------------) + I gov - 0x00200342, // n0x0901 c0x0000 (---------------) + I ac - 0x0032bd03, // n0x0902 c0x0000 (---------------) + I biz - 0x12e34803, // n0x0903 c0x004b (n0x090e-n0x090f) + I com - 0x00278e47, // n0x0904 c0x0000 (---------------) + I ekloges - 0x0027c5c3, // n0x0905 c0x0000 (---------------) + I gov - 0x0030dc03, // n0x0906 c0x0000 (---------------) + I ltd - 0x00207d04, // n0x0907 c0x0000 (---------------) + I name - 0x002207c3, // n0x0908 c0x0000 (---------------) + I net - 0x00225403, // n0x0909 c0x0000 (---------------) + I org - 0x0027498a, // n0x090a c0x0000 (---------------) + I parliament - 0x002470c5, // n0x090b c0x0000 (---------------) + I press - 0x002210c3, // n0x090c c0x0000 (---------------) + I pro - 0x00200c42, // n0x090d c0x0000 (---------------) + I tm - 0x000ff148, // n0x090e c0x0000 (---------------) + blogspot - 0x000ff148, // n0x090f c0x0000 (---------------) + blogspot - 0x000ff148, // n0x0910 c0x0000 (---------------) + blogspot - 0x00034803, // n0x0911 c0x0000 (---------------) + com - 0x000d084f, // n0x0912 c0x0000 (---------------) + fuettertdasnetz - 0x00168f8a, // n0x0913 c0x0000 (---------------) + isteingeek - 0x000a0e87, // n0x0914 c0x0000 (---------------) + istmein - 0x0002064a, // n0x0915 c0x0000 (---------------) + lebtimnetz - 0x000968ca, // n0x0916 c0x0000 (---------------) + leitungsen - 0x0000754d, // n0x0917 c0x0000 (---------------) + traeumtgerade - 0x000ff148, // n0x0918 c0x0000 (---------------) + blogspot - 0x00234803, // n0x0919 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x091a c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x091b c0x0000 (---------------) + I gov - 0x002207c3, // n0x091c c0x0000 (---------------) + I net - 0x00225403, // n0x091d c0x0000 (---------------) + I org - 0x00201d03, // n0x091e c0x0000 (---------------) + I art - 0x00234803, // n0x091f c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0920 c0x0000 (---------------) + I edu - 0x00211543, // n0x0921 c0x0000 (---------------) + I gob - 0x0027c5c3, // n0x0922 c0x0000 (---------------) + I gov - 0x002119c3, // n0x0923 c0x0000 (---------------) + I mil - 0x002207c3, // n0x0924 c0x0000 (---------------) + I net - 0x00225403, // n0x0925 c0x0000 (---------------) + I org - 0x00291e03, // n0x0926 c0x0000 (---------------) + I sld - 0x00221cc3, // n0x0927 c0x0000 (---------------) + I web - 0x00201d03, // n0x0928 c0x0000 (---------------) + I art - 0x002ceb44, // n0x0929 c0x0000 (---------------) + I asso - 0x00234803, // n0x092a c0x0000 (---------------) + I com - 0x0023a1c3, // n0x092b c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x092c c0x0000 (---------------) + I gov - 0x002207c3, // n0x092d c0x0000 (---------------) + I net - 0x00225403, // n0x092e c0x0000 (---------------) + I org - 0x00204343, // n0x092f c0x0000 (---------------) + I pol - 0x00234803, // n0x0930 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0931 c0x0000 (---------------) + I edu - 0x002016c3, // n0x0932 c0x0000 (---------------) + I fin - 0x00211543, // n0x0933 c0x0000 (---------------) + I gob - 0x0027c5c3, // n0x0934 c0x0000 (---------------) + I gov - 0x00201804, // n0x0935 c0x0000 (---------------) + I info - 0x0032c883, // n0x0936 c0x0000 (---------------) + I k12 - 0x002127c3, // n0x0937 c0x0000 (---------------) + I med - 0x002119c3, // n0x0938 c0x0000 (---------------) + I mil - 0x002207c3, // n0x0939 c0x0000 (---------------) + I net - 0x00225403, // n0x093a c0x0000 (---------------) + I org - 0x002210c3, // n0x093b c0x0000 (---------------) + I pro - 0x00200503, // n0x093c c0x0000 (---------------) + I aip - 0x15234803, // n0x093d c0x0054 (n0x0946-n0x0947) + I com - 0x0023a1c3, // n0x093e c0x0000 (---------------) + I edu - 0x002b5483, // n0x093f c0x0000 (---------------) + I fie - 0x0027c5c3, // n0x0940 c0x0000 (---------------) + I gov - 0x0026d7c3, // n0x0941 c0x0000 (---------------) + I lib - 0x002127c3, // n0x0942 c0x0000 (---------------) + I med - 0x00225403, // n0x0943 c0x0000 (---------------) + I org - 0x00207083, // n0x0944 c0x0000 (---------------) + I pri - 0x0030c604, // n0x0945 c0x0000 (---------------) + I riik - 0x000ff148, // n0x0946 c0x0000 (---------------) + blogspot - 0x15a34803, // n0x0947 c0x0056 (n0x0950-n0x0951) + I com - 0x0023a1c3, // n0x0948 c0x0000 (---------------) + I edu - 0x002a6b83, // n0x0949 c0x0000 (---------------) + I eun - 0x0027c5c3, // n0x094a c0x0000 (---------------) + I gov - 0x002119c3, // n0x094b c0x0000 (---------------) + I mil - 0x00207d04, // n0x094c c0x0000 (---------------) + I name - 0x002207c3, // n0x094d c0x0000 (---------------) + I net - 0x00225403, // n0x094e c0x0000 (---------------) + I org - 0x0021e603, // n0x094f c0x0000 (---------------) + I sci - 0x000ff148, // n0x0950 c0x0000 (---------------) + blogspot - 0x16234803, // n0x0951 c0x0058 (n0x0956-n0x0957) + I com - 0x0023a1c3, // n0x0952 c0x0000 (---------------) + I edu - 0x00211543, // n0x0953 c0x0000 (---------------) + I gob - 0x00201343, // n0x0954 c0x0000 (---------------) + I nom - 0x00225403, // n0x0955 c0x0000 (---------------) + I org - 0x000ff148, // n0x0956 c0x0000 (---------------) + blogspot - 0x0032bd03, // n0x0957 c0x0000 (---------------) + I biz - 0x00234803, // n0x0958 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0959 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x095a c0x0000 (---------------) + I gov - 0x00201804, // n0x095b c0x0000 (---------------) + I info - 0x00207d04, // n0x095c c0x0000 (---------------) + I name - 0x002207c3, // n0x095d c0x0000 (---------------) + I net - 0x00225403, // n0x095e c0x0000 (---------------) + I org - 0x00321885, // n0x095f c0x0000 (---------------) + I aland - 0x000ff148, // n0x0960 c0x0000 (---------------) + blogspot - 0x0003b843, // n0x0961 c0x0000 (---------------) + iki - 0x00312448, // n0x0962 c0x0000 (---------------) + I aeroport - 0x00351ec7, // n0x0963 c0x0000 (---------------) + I assedic - 0x002ceb44, // n0x0964 c0x0000 (---------------) + I asso - 0x003334c6, // n0x0965 c0x0000 (---------------) + I avocat - 0x003480c6, // n0x0966 c0x0000 (---------------) + I avoues - 0x000ff148, // n0x0967 c0x0000 (---------------) + blogspot - 0x00287c83, // n0x0968 c0x0000 (---------------) + I cci - 0x0026fe49, // n0x0969 c0x0000 (---------------) + I chambagri - 0x002b4695, // n0x096a c0x0000 (---------------) + I chirurgiens-dentistes - 0x00234803, // n0x096b c0x0000 (---------------) + I com - 0x003279d2, // n0x096c c0x0000 (---------------) + I experts-comptables - 0x0032778f, // n0x096d c0x0000 (---------------) + I geometre-expert - 0x0023ef84, // n0x096e c0x0000 (---------------) + I gouv - 0x00219745, // n0x096f c0x0000 (---------------) + I greta - 0x002f0350, // n0x0970 c0x0000 (---------------) + I huissier-justice - 0x00232787, // n0x0971 c0x0000 (---------------) + I medecin - 0x00201343, // n0x0972 c0x0000 (---------------) + I nom - 0x0035cfc8, // n0x0973 c0x0000 (---------------) + I notaires - 0x002f648a, // n0x0974 c0x0000 (---------------) + I pharmacien - 0x00245344, // n0x0975 c0x0000 (---------------) + I port - 0x002e13c3, // n0x0976 c0x0000 (---------------) + I prd - 0x002470c6, // n0x0977 c0x0000 (---------------) + I presse - 0x00200c42, // n0x0978 c0x0000 (---------------) + I tm - 0x0023040b, // n0x0979 c0x0000 (---------------) + I veterinaire - 0x00234803, // n0x097a c0x0000 (---------------) + I com - 0x0023a1c3, // n0x097b c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x097c c0x0000 (---------------) + I gov - 0x002119c3, // n0x097d c0x0000 (---------------) + I mil - 0x002207c3, // n0x097e c0x0000 (---------------) + I net - 0x00225403, // n0x097f c0x0000 (---------------) + I org - 0x002e7783, // n0x0980 c0x0000 (---------------) + I pvt - 0x0020a442, // n0x0981 c0x0000 (---------------) + I co - 0x002207c3, // n0x0982 c0x0000 (---------------) + I net - 0x00225403, // n0x0983 c0x0000 (---------------) + I org - 0x00234803, // n0x0984 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0985 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x0986 c0x0000 (---------------) + I gov - 0x002119c3, // n0x0987 c0x0000 (---------------) + I mil - 0x00225403, // n0x0988 c0x0000 (---------------) + I org - 0x00234803, // n0x0989 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x098a c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x098b c0x0000 (---------------) + I gov - 0x0030dc03, // n0x098c c0x0000 (---------------) + I ltd - 0x002161c3, // n0x098d c0x0000 (---------------) + I mod - 0x00225403, // n0x098e c0x0000 (---------------) + I org - 0x0020a442, // n0x098f c0x0000 (---------------) + I co - 0x00234803, // n0x0990 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0991 c0x0000 (---------------) + I edu - 0x002207c3, // n0x0992 c0x0000 (---------------) + I net - 0x00225403, // n0x0993 c0x0000 (---------------) + I org - 0x00200342, // n0x0994 c0x0000 (---------------) + I ac - 0x00234803, // n0x0995 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0996 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x0997 c0x0000 (---------------) + I gov - 0x002207c3, // n0x0998 c0x0000 (---------------) + I net - 0x00225403, // n0x0999 c0x0000 (---------------) + I org - 0x002ceb44, // n0x099a c0x0000 (---------------) + I asso - 0x00234803, // n0x099b c0x0000 (---------------) + I com - 0x0023a1c3, // n0x099c c0x0000 (---------------) + I edu - 0x00209504, // n0x099d c0x0000 (---------------) + I mobi - 0x002207c3, // n0x099e c0x0000 (---------------) + I net - 0x00225403, // n0x099f c0x0000 (---------------) + I org - 0x000ff148, // n0x09a0 c0x0000 (---------------) + blogspot - 0x00234803, // n0x09a1 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x09a2 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x09a3 c0x0000 (---------------) + I gov - 0x002207c3, // n0x09a4 c0x0000 (---------------) + I net - 0x00225403, // n0x09a5 c0x0000 (---------------) + I org - 0x00234803, // n0x09a6 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x09a7 c0x0000 (---------------) + I edu - 0x00211543, // n0x09a8 c0x0000 (---------------) + I gob - 0x0021e783, // n0x09a9 c0x0000 (---------------) + I ind - 0x002119c3, // n0x09aa c0x0000 (---------------) + I mil - 0x002207c3, // n0x09ab c0x0000 (---------------) + I net - 0x00225403, // n0x09ac c0x0000 (---------------) + I org - 0x0020a442, // n0x09ad c0x0000 (---------------) + I co - 0x00234803, // n0x09ae c0x0000 (---------------) + I com - 0x0023a1c3, // n0x09af c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x09b0 c0x0000 (---------------) + I gov - 0x002207c3, // n0x09b1 c0x0000 (---------------) + I net - 0x00225403, // n0x09b2 c0x0000 (---------------) + I org - 0x000ff148, // n0x09b3 c0x0000 (---------------) + blogspot - 0x00234803, // n0x09b4 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x09b5 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x09b6 c0x0000 (---------------) + I gov - 0x0031e343, // n0x09b7 c0x0000 (---------------) + I idv - 0x00028343, // n0x09b8 c0x0000 (---------------) + inc - 0x0010dc03, // n0x09b9 c0x0000 (---------------) + ltd - 0x002207c3, // n0x09ba c0x0000 (---------------) + I net - 0x00225403, // n0x09bb c0x0000 (---------------) + I org - 0x0030288a, // n0x09bc c0x0000 (---------------) + I xn--55qx5d - 0x00322d09, // n0x09bd c0x0000 (---------------) + I xn--ciqpn - 0x0033db4b, // n0x09be c0x0000 (---------------) + I xn--gmq050i - 0x0033e58a, // n0x09bf c0x0000 (---------------) + I xn--gmqw5a - 0x0034500a, // n0x09c0 c0x0000 (---------------) + I xn--io0a7i - 0x00353bcb, // n0x09c1 c0x0000 (---------------) + I xn--lcvr32d - 0x00365a4a, // n0x09c2 c0x0000 (---------------) + I xn--mk0axi - 0x0036cc4a, // n0x09c3 c0x0000 (---------------) + I xn--mxtq1m - 0x00372e0a, // n0x09c4 c0x0000 (---------------) + I xn--od0alg - 0x0037308b, // n0x09c5 c0x0000 (---------------) + I xn--od0aq3b - 0x00391d09, // n0x09c6 c0x0000 (---------------) + I xn--tn0ag - 0x003938ca, // n0x09c7 c0x0000 (---------------) + I xn--uc0atv - 0x00393e0b, // n0x09c8 c0x0000 (---------------) + I xn--uc0ay4a - 0x0039e98b, // n0x09c9 c0x0000 (---------------) + I xn--wcvs22d - 0x003a4b4a, // n0x09ca c0x0000 (---------------) + I xn--zf0avx - 0x00234803, // n0x09cb c0x0000 (---------------) + I com - 0x0023a1c3, // n0x09cc c0x0000 (---------------) + I edu - 0x00211543, // n0x09cd c0x0000 (---------------) + I gob - 0x002119c3, // n0x09ce c0x0000 (---------------) + I mil - 0x002207c3, // n0x09cf c0x0000 (---------------) + I net - 0x00225403, // n0x09d0 c0x0000 (---------------) + I org - 0x000ff148, // n0x09d1 c0x0000 (---------------) + blogspot - 0x00234803, // n0x09d2 c0x0000 (---------------) + I com - 0x00260a04, // n0x09d3 c0x0000 (---------------) + I from - 0x00212ec2, // n0x09d4 c0x0000 (---------------) + I iz - 0x00207d04, // n0x09d5 c0x0000 (---------------) + I name - 0x002a1a05, // n0x09d6 c0x0000 (---------------) + I adult - 0x00201d03, // n0x09d7 c0x0000 (---------------) + I art - 0x002ceb44, // n0x09d8 c0x0000 (---------------) + I asso - 0x00234803, // n0x09d9 c0x0000 (---------------) + I com - 0x0023d404, // n0x09da c0x0000 (---------------) + I coop - 0x0023a1c3, // n0x09db c0x0000 (---------------) + I edu - 0x0024bac4, // n0x09dc c0x0000 (---------------) + I firm - 0x0023ef84, // n0x09dd c0x0000 (---------------) + I gouv - 0x00201804, // n0x09de c0x0000 (---------------) + I info - 0x002127c3, // n0x09df c0x0000 (---------------) + I med - 0x002207c3, // n0x09e0 c0x0000 (---------------) + I net - 0x00225403, // n0x09e1 c0x0000 (---------------) + I org - 0x00294b45, // n0x09e2 c0x0000 (---------------) + I perso - 0x00204343, // n0x09e3 c0x0000 (---------------) + I pol - 0x002210c3, // n0x09e4 c0x0000 (---------------) + I pro - 0x00286d83, // n0x09e5 c0x0000 (---------------) + I rel - 0x0033ae44, // n0x09e6 c0x0000 (---------------) + I shop - 0x00314c44, // n0x09e7 c0x0000 (---------------) + I 2000 - 0x0024a8c5, // n0x09e8 c0x0000 (---------------) + I agrar - 0x000ff148, // n0x09e9 c0x0000 (---------------) + blogspot - 0x0020f3c4, // n0x09ea c0x0000 (---------------) + I bolt - 0x00355bc6, // n0x09eb c0x0000 (---------------) + I casino - 0x00285284, // n0x09ec c0x0000 (---------------) + I city - 0x0020a442, // n0x09ed c0x0000 (---------------) + I co - 0x00338b47, // n0x09ee c0x0000 (---------------) + I erotica - 0x0024f047, // n0x09ef c0x0000 (---------------) + I erotika - 0x00249d04, // n0x09f0 c0x0000 (---------------) + I film - 0x00259685, // n0x09f1 c0x0000 (---------------) + I forum - 0x00349705, // n0x09f2 c0x0000 (---------------) + I games - 0x00235ec5, // n0x09f3 c0x0000 (---------------) + I hotel - 0x00201804, // n0x09f4 c0x0000 (---------------) + I info - 0x002246c8, // n0x09f5 c0x0000 (---------------) + I ingatlan - 0x00292d86, // n0x09f6 c0x0000 (---------------) + I jogasz - 0x002d24c8, // n0x09f7 c0x0000 (---------------) + I konyvelo - 0x0023cc45, // n0x09f8 c0x0000 (---------------) + I lakas - 0x00302485, // n0x09f9 c0x0000 (---------------) + I media - 0x00222084, // n0x09fa c0x0000 (---------------) + I news - 0x00225403, // n0x09fb c0x0000 (---------------) + I org - 0x002e1e84, // n0x09fc c0x0000 (---------------) + I priv - 0x00352d86, // n0x09fd c0x0000 (---------------) + I reklam - 0x002471c3, // n0x09fe c0x0000 (---------------) + I sex - 0x0033ae44, // n0x09ff c0x0000 (---------------) + I shop - 0x00294445, // n0x0a00 c0x0000 (---------------) + I sport - 0x0023ba44, // n0x0a01 c0x0000 (---------------) + I suli - 0x00206444, // n0x0a02 c0x0000 (---------------) + I szex - 0x00200c42, // n0x0a03 c0x0000 (---------------) + I tm - 0x00269e86, // n0x0a04 c0x0000 (---------------) + I tozsde - 0x003850c6, // n0x0a05 c0x0000 (---------------) + I utazas - 0x002f3485, // n0x0a06 c0x0000 (---------------) + I video - 0x00200342, // n0x0a07 c0x0000 (---------------) + I ac - 0x0032bd03, // n0x0a08 c0x0000 (---------------) + I biz - 0x1b20a442, // n0x0a09 c0x006c (n0x0a12-n0x0a13) + I co - 0x0023b544, // n0x0a0a c0x0000 (---------------) + I desa - 0x0020ec82, // n0x0a0b c0x0000 (---------------) + I go - 0x002119c3, // n0x0a0c c0x0000 (---------------) + I mil - 0x0022c482, // n0x0a0d c0x0000 (---------------) + I my - 0x002207c3, // n0x0a0e c0x0000 (---------------) + I net - 0x00200d82, // n0x0a0f c0x0000 (---------------) + I or - 0x00217f43, // n0x0a10 c0x0000 (---------------) + I sch - 0x00221cc3, // n0x0a11 c0x0000 (---------------) + I web - 0x000ff148, // n0x0a12 c0x0000 (---------------) + blogspot - 0x000ff148, // n0x0a13 c0x0000 (---------------) + blogspot - 0x0027c5c3, // n0x0a14 c0x0000 (---------------) + I gov - 0x00200342, // n0x0a15 c0x0000 (---------------) + I ac - 0x1be0a442, // n0x0a16 c0x006f (n0x0a1d-n0x0a1e) + I co - 0x0027c5c3, // n0x0a17 c0x0000 (---------------) + I gov - 0x00264c83, // n0x0a18 c0x0000 (---------------) + I idf - 0x0032c883, // n0x0a19 c0x0000 (---------------) + I k12 - 0x0022a684, // n0x0a1a c0x0000 (---------------) + I muni - 0x002207c3, // n0x0a1b c0x0000 (---------------) + I net - 0x00225403, // n0x0a1c c0x0000 (---------------) + I org - 0x000ff148, // n0x0a1d c0x0000 (---------------) + blogspot - 0x00200342, // n0x0a1e c0x0000 (---------------) + I ac - 0x1c60a442, // n0x0a1f c0x0071 (n0x0a25-n0x0a27) + I co - 0x00234803, // n0x0a20 c0x0000 (---------------) + I com - 0x002207c3, // n0x0a21 c0x0000 (---------------) + I net - 0x00225403, // n0x0a22 c0x0000 (---------------) + I org - 0x0020d982, // n0x0a23 c0x0000 (---------------) + I tt - 0x0020cd02, // n0x0a24 c0x0000 (---------------) + I tv - 0x0030dc03, // n0x0a25 c0x0000 (---------------) + I ltd - 0x002db1c3, // n0x0a26 c0x0000 (---------------) + I plc - 0x00200342, // n0x0a27 c0x0000 (---------------) + I ac - 0x000ff148, // n0x0a28 c0x0000 (---------------) + blogspot - 0x0020a442, // n0x0a29 c0x0000 (---------------) + I co - 0x0023a1c3, // n0x0a2a c0x0000 (---------------) + I edu - 0x0024bac4, // n0x0a2b c0x0000 (---------------) + I firm - 0x002082c3, // n0x0a2c c0x0000 (---------------) + I gen - 0x0027c5c3, // n0x0a2d c0x0000 (---------------) + I gov - 0x0021e783, // n0x0a2e c0x0000 (---------------) + I ind - 0x002119c3, // n0x0a2f c0x0000 (---------------) + I mil - 0x002207c3, // n0x0a30 c0x0000 (---------------) + I net - 0x0021a7c3, // n0x0a31 c0x0000 (---------------) + I nic - 0x00225403, // n0x0a32 c0x0000 (---------------) + I org - 0x0021e583, // n0x0a33 c0x0000 (---------------) + I res - 0x001249d3, // n0x0a34 c0x0000 (---------------) + barrel-of-knowledge - 0x00127314, // n0x0a35 c0x0000 (---------------) + barrell-of-knowledge - 0x00012586, // n0x0a36 c0x0000 (---------------) + dyndns - 0x000554c7, // n0x0a37 c0x0000 (---------------) + for-our - 0x0018e149, // n0x0a38 c0x0000 (---------------) + groks-the - 0x001107ca, // n0x0a39 c0x0000 (---------------) + groks-this - 0x0008960d, // n0x0a3a c0x0000 (---------------) + here-for-more - 0x000f9aca, // n0x0a3b c0x0000 (---------------) + knowsitall - 0x00067646, // n0x0a3c c0x0000 (---------------) + selfip - 0x00111746, // n0x0a3d c0x0000 (---------------) + webhop - 0x00207602, // n0x0a3e c0x0000 (---------------) + I eu - 0x00234803, // n0x0a3f c0x0000 (---------------) + I com - 0x000cc386, // n0x0a40 c0x0000 (---------------) + github - 0x0018e105, // n0x0a41 c0x0000 (---------------) + ngrok - 0x0000b283, // n0x0a42 c0x0000 (---------------) + nid - 0x0000ed48, // n0x0a43 c0x0000 (---------------) + pantheon - 0x000b2408, // n0x0a44 c0x0000 (---------------) + sandcats - 0x00234803, // n0x0a45 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0a46 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x0a47 c0x0000 (---------------) + I gov - 0x002119c3, // n0x0a48 c0x0000 (---------------) + I mil - 0x002207c3, // n0x0a49 c0x0000 (---------------) + I net - 0x00225403, // n0x0a4a c0x0000 (---------------) + I org - 0x00200342, // n0x0a4b c0x0000 (---------------) + I ac - 0x0020a442, // n0x0a4c c0x0000 (---------------) + I co - 0x0027c5c3, // n0x0a4d c0x0000 (---------------) + I gov - 0x0020ae82, // n0x0a4e c0x0000 (---------------) + I id - 0x002207c3, // n0x0a4f c0x0000 (---------------) + I net - 0x00225403, // n0x0a50 c0x0000 (---------------) + I org - 0x00217f43, // n0x0a51 c0x0000 (---------------) + I sch - 0x0035ae4f, // n0x0a52 c0x0000 (---------------) + I xn--mgba3a4f16a - 0x0035b20e, // n0x0a53 c0x0000 (---------------) + I xn--mgba3a4fra - 0x000ff148, // n0x0a54 c0x0000 (---------------) + blogspot - 0x00234803, // n0x0a55 c0x0000 (---------------) + I com - 0x00047fc7, // n0x0a56 c0x0000 (---------------) + cupcake - 0x0023a1c3, // n0x0a57 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x0a58 c0x0000 (---------------) + I gov - 0x002014c3, // n0x0a59 c0x0000 (---------------) + I int - 0x002207c3, // n0x0a5a c0x0000 (---------------) + I net - 0x00225403, // n0x0a5b c0x0000 (---------------) + I org - 0x0021ce03, // n0x0a5c c0x0000 (---------------) + I abr - 0x00313dc7, // n0x0a5d c0x0000 (---------------) + I abruzzo - 0x00201b42, // n0x0a5e c0x0000 (---------------) + I ag - 0x002d2b49, // n0x0a5f c0x0000 (---------------) + I agrigento - 0x00200cc2, // n0x0a60 c0x0000 (---------------) + I al - 0x0037668b, // n0x0a61 c0x0000 (---------------) + I alessandria - 0x002dfeca, // n0x0a62 c0x0000 (---------------) + I alto-adige - 0x002e56c9, // n0x0a63 c0x0000 (---------------) + I altoadige - 0x00201002, // n0x0a64 c0x0000 (---------------) + I an - 0x00351946, // n0x0a65 c0x0000 (---------------) + I ancona - 0x00274d55, // n0x0a66 c0x0000 (---------------) + I andria-barletta-trani - 0x003767d5, // n0x0a67 c0x0000 (---------------) + I andria-trani-barletta - 0x0028b413, // n0x0a68 c0x0000 (---------------) + I andriabarlettatrani - 0x00376d53, // n0x0a69 c0x0000 (---------------) + I andriatranibarletta - 0x00202e82, // n0x0a6a c0x0000 (---------------) + I ao - 0x00212b85, // n0x0a6b c0x0000 (---------------) + I aosta - 0x0022788c, // n0x0a6c c0x0000 (---------------) + I aosta-valley - 0x00214fcb, // n0x0a6d c0x0000 (---------------) + I aostavalley - 0x00240885, // n0x0a6e c0x0000 (---------------) + I aoste - 0x00211702, // n0x0a6f c0x0000 (---------------) + I ap - 0x00200ec2, // n0x0a70 c0x0000 (---------------) + I aq - 0x00369c46, // n0x0a71 c0x0000 (---------------) + I aquila - 0x00200942, // n0x0a72 c0x0000 (---------------) + I ar - 0x0038c5c6, // n0x0a73 c0x0000 (---------------) + I arezzo - 0x00397b0d, // n0x0a74 c0x0000 (---------------) + I ascoli-piceno - 0x0034584c, // n0x0a75 c0x0000 (---------------) + I ascolipiceno - 0x0021f784, // n0x0a76 c0x0000 (---------------) + I asti - 0x00200482, // n0x0a77 c0x0000 (---------------) + I at - 0x00208482, // n0x0a78 c0x0000 (---------------) + I av - 0x00217708, // n0x0a79 c0x0000 (---------------) + I avellino - 0x00202e42, // n0x0a7a c0x0000 (---------------) + I ba - 0x00252c06, // n0x0a7b c0x0000 (---------------) + I balsan - 0x00253884, // n0x0a7c c0x0000 (---------------) + I bari - 0x00274f15, // n0x0a7d c0x0000 (---------------) + I barletta-trani-andria - 0x0028b593, // n0x0a7e c0x0000 (---------------) + I barlettatraniandria - 0x00204203, // n0x0a7f c0x0000 (---------------) + I bas - 0x003331ca, // n0x0a80 c0x0000 (---------------) + I basilicata - 0x00285a87, // n0x0a81 c0x0000 (---------------) + I belluno - 0x002e6b89, // n0x0a82 c0x0000 (---------------) + I benevento - 0x0022f5c7, // n0x0a83 c0x0000 (---------------) + I bergamo - 0x00303f42, // n0x0a84 c0x0000 (---------------) + I bg - 0x00200002, // n0x0a85 c0x0000 (---------------) + I bi - 0x003a53c6, // n0x0a86 c0x0000 (---------------) + I biella - 0x0020afc2, // n0x0a87 c0x0000 (---------------) + I bl - 0x000ff148, // n0x0a88 c0x0000 (---------------) + blogspot - 0x00210b82, // n0x0a89 c0x0000 (---------------) + I bn - 0x0020cb02, // n0x0a8a c0x0000 (---------------) + I bo - 0x00389f47, // n0x0a8b c0x0000 (---------------) + I bologna - 0x00211b47, // n0x0a8c c0x0000 (---------------) + I bolzano - 0x0021bcc5, // n0x0a8d c0x0000 (---------------) + I bozen - 0x0021bfc2, // n0x0a8e c0x0000 (---------------) + I br - 0x0021e547, // n0x0a8f c0x0000 (---------------) + I brescia - 0x0021e708, // n0x0a90 c0x0000 (---------------) + I brindisi - 0x00254682, // n0x0a91 c0x0000 (---------------) + I bs - 0x002206c2, // n0x0a92 c0x0000 (---------------) + I bt - 0x0022dbc2, // n0x0a93 c0x0000 (---------------) + I bz - 0x00200e02, // n0x0a94 c0x0000 (---------------) + I ca - 0x0023f348, // n0x0a95 c0x0000 (---------------) + I cagliari - 0x0021a843, // n0x0a96 c0x0000 (---------------) + I cal - 0x00252848, // n0x0a97 c0x0000 (---------------) + I calabria - 0x0023a7cd, // n0x0a98 c0x0000 (---------------) + I caltanissetta - 0x00221d83, // n0x0a99 c0x0000 (---------------) + I cam - 0x0031df08, // n0x0a9a c0x0000 (---------------) + I campania - 0x00241bcf, // n0x0a9b c0x0000 (---------------) + I campidano-medio - 0x00241f8e, // n0x0a9c c0x0000 (---------------) + I campidanomedio - 0x00338c8a, // n0x0a9d c0x0000 (---------------) + I campobasso - 0x002f5591, // n0x0a9e c0x0000 (---------------) + I carbonia-iglesias - 0x002f5a10, // n0x0a9f c0x0000 (---------------) + I carboniaiglesias - 0x002b630d, // n0x0aa0 c0x0000 (---------------) + I carrara-massa - 0x002b664c, // n0x0aa1 c0x0000 (---------------) + I carraramassa - 0x0023e207, // n0x0aa2 c0x0000 (---------------) + I caserta - 0x00333347, // n0x0aa3 c0x0000 (---------------) + I catania - 0x00333589, // n0x0aa4 c0x0000 (---------------) + I catanzaro - 0x0021f882, // n0x0aa5 c0x0000 (---------------) + I cb - 0x00202482, // n0x0aa6 c0x0000 (---------------) + I ce - 0x00256ecc, // n0x0aa7 c0x0000 (---------------) + I cesena-forli - 0x002571cb, // n0x0aa8 c0x0000 (---------------) + I cesenaforli - 0x00200382, // n0x0aa9 c0x0000 (---------------) + I ch - 0x002ab986, // n0x0aaa c0x0000 (---------------) + I chieti - 0x00204d82, // n0x0aab c0x0000 (---------------) + I ci - 0x00217582, // n0x0aac c0x0000 (---------------) + I cl - 0x0021b802, // n0x0aad c0x0000 (---------------) + I cn - 0x0020a442, // n0x0aae c0x0000 (---------------) + I co - 0x00235904, // n0x0aaf c0x0000 (---------------) + I como - 0x00240a87, // n0x0ab0 c0x0000 (---------------) + I cosenza - 0x00207442, // n0x0ab1 c0x0000 (---------------) + I cr - 0x00243ec7, // n0x0ab2 c0x0000 (---------------) + I cremona - 0x00245147, // n0x0ab3 c0x0000 (---------------) + I crotone - 0x0020ea42, // n0x0ab4 c0x0000 (---------------) + I cs - 0x00228d42, // n0x0ab5 c0x0000 (---------------) + I ct - 0x00247e85, // n0x0ab6 c0x0000 (---------------) + I cuneo - 0x00200142, // n0x0ab7 c0x0000 (---------------) + I cz - 0x0025898e, // n0x0ab8 c0x0000 (---------------) + I dell-ogliastra - 0x0026340d, // n0x0ab9 c0x0000 (---------------) + I dellogliastra - 0x0023a1c3, // n0x0aba c0x0000 (---------------) + I edu - 0x0031354e, // n0x0abb c0x0000 (---------------) + I emilia-romagna - 0x00284b8d, // n0x0abc c0x0000 (---------------) + I emiliaromagna - 0x00380a03, // n0x0abd c0x0000 (---------------) + I emr - 0x00203082, // n0x0abe c0x0000 (---------------) + I en - 0x00208504, // n0x0abf c0x0000 (---------------) + I enna - 0x00249502, // n0x0ac0 c0x0000 (---------------) + I fc - 0x0020a502, // n0x0ac1 c0x0000 (---------------) + I fe - 0x002d9385, // n0x0ac2 c0x0000 (---------------) + I fermo - 0x002e46c7, // n0x0ac3 c0x0000 (---------------) + I ferrara - 0x0034cbc2, // n0x0ac4 c0x0000 (---------------) + I fg - 0x002016c2, // n0x0ac5 c0x0000 (---------------) + I fi - 0x0024b407, // n0x0ac6 c0x0000 (---------------) + I firenze - 0x00250f08, // n0x0ac7 c0x0000 (---------------) + I florence - 0x00234ec2, // n0x0ac8 c0x0000 (---------------) + I fm - 0x00201886, // n0x0ac9 c0x0000 (---------------) + I foggia - 0x00256d4c, // n0x0aca c0x0000 (---------------) + I forli-cesena - 0x0025708b, // n0x0acb c0x0000 (---------------) + I forlicesena - 0x00241542, // n0x0acc c0x0000 (---------------) + I fr - 0x0025c68f, // n0x0acd c0x0000 (---------------) + I friuli-v-giulia - 0x0025ca50, // n0x0ace c0x0000 (---------------) + I friuli-ve-giulia - 0x0025ce4f, // n0x0acf c0x0000 (---------------) + I friuli-vegiulia - 0x0025d215, // n0x0ad0 c0x0000 (---------------) + I friuli-venezia-giulia - 0x0025d754, // n0x0ad1 c0x0000 (---------------) + I friuli-veneziagiulia - 0x0025dc4e, // n0x0ad2 c0x0000 (---------------) + I friuli-vgiulia - 0x0025dfce, // n0x0ad3 c0x0000 (---------------) + I friuliv-giulia - 0x0025e34f, // n0x0ad4 c0x0000 (---------------) + I friulive-giulia - 0x0025e70e, // n0x0ad5 c0x0000 (---------------) + I friulivegiulia - 0x0025ea94, // n0x0ad6 c0x0000 (---------------) + I friulivenezia-giulia - 0x0025ef93, // n0x0ad7 c0x0000 (---------------) + I friuliveneziagiulia - 0x0025f44d, // n0x0ad8 c0x0000 (---------------) + I friulivgiulia - 0x00275609, // n0x0ad9 c0x0000 (---------------) + I frosinone - 0x0028a1c3, // n0x0ada c0x0000 (---------------) + I fvg - 0x00200282, // n0x0adb c0x0000 (---------------) + I ge - 0x00309cc5, // n0x0adc c0x0000 (---------------) + I genoa - 0x002082c6, // n0x0add c0x0000 (---------------) + I genova - 0x0020ec82, // n0x0ade c0x0000 (---------------) + I go - 0x0026ae07, // n0x0adf c0x0000 (---------------) + I gorizia - 0x0027c5c3, // n0x0ae0 c0x0000 (---------------) + I gov - 0x00209d42, // n0x0ae1 c0x0000 (---------------) + I gr - 0x002dbd08, // n0x0ae2 c0x0000 (---------------) + I grosseto - 0x002f57d1, // n0x0ae3 c0x0000 (---------------) + I iglesias-carbonia - 0x002f5c10, // n0x0ae4 c0x0000 (---------------) + I iglesiascarbonia - 0x00200402, // n0x0ae5 c0x0000 (---------------) + I im - 0x0027d007, // n0x0ae6 c0x0000 (---------------) + I imperia - 0x002022c2, // n0x0ae7 c0x0000 (---------------) + I is - 0x0026ed87, // n0x0ae8 c0x0000 (---------------) + I isernia - 0x002093c2, // n0x0ae9 c0x0000 (---------------) + I kr - 0x0026f489, // n0x0aea c0x0000 (---------------) + I la-spezia - 0x00369c07, // n0x0aeb c0x0000 (---------------) + I laquila - 0x00255d88, // n0x0aec c0x0000 (---------------) + I laspezia - 0x002238c6, // n0x0aed c0x0000 (---------------) + I latina - 0x002db0c3, // n0x0aee c0x0000 (---------------) + I laz - 0x00310585, // n0x0aef c0x0000 (---------------) + I lazio - 0x0023a442, // n0x0af0 c0x0000 (---------------) + I lc - 0x00209ec2, // n0x0af1 c0x0000 (---------------) + I le - 0x003a57c5, // n0x0af2 c0x0000 (---------------) + I lecce - 0x0022c805, // n0x0af3 c0x0000 (---------------) + I lecco - 0x00204e42, // n0x0af4 c0x0000 (---------------) + I li - 0x0023bac3, // n0x0af5 c0x0000 (---------------) + I lig - 0x0023bac7, // n0x0af6 c0x0000 (---------------) + I liguria - 0x00211f87, // n0x0af7 c0x0000 (---------------) + I livorno - 0x00200d42, // n0x0af8 c0x0000 (---------------) + I lo - 0x002583c4, // n0x0af9 c0x0000 (---------------) + I lodi - 0x002136c3, // n0x0afa c0x0000 (---------------) + I lom - 0x002c6d09, // n0x0afb c0x0000 (---------------) + I lombardia - 0x002dcac8, // n0x0afc c0x0000 (---------------) + I lombardy - 0x0020e982, // n0x0afd c0x0000 (---------------) + I lt - 0x002047c2, // n0x0afe c0x0000 (---------------) + I lu - 0x002ae2c7, // n0x0aff c0x0000 (---------------) + I lucania - 0x002b0445, // n0x0b00 c0x0000 (---------------) + I lucca - 0x0031da08, // n0x0b01 c0x0000 (---------------) + I macerata - 0x0024e707, // n0x0b02 c0x0000 (---------------) + I mantova - 0x00201cc3, // n0x0b03 c0x0000 (---------------) + I mar - 0x00283706, // n0x0b04 c0x0000 (---------------) + I marche - 0x002b618d, // n0x0b05 c0x0000 (---------------) + I massa-carrara - 0x002b650c, // n0x0b06 c0x0000 (---------------) + I massacarrara - 0x0035d586, // n0x0b07 c0x0000 (---------------) + I matera - 0x0020b442, // n0x0b08 c0x0000 (---------------) + I mb - 0x00219ac2, // n0x0b09 c0x0000 (---------------) + I mc - 0x00205d82, // n0x0b0a c0x0000 (---------------) + I me - 0x00241a4f, // n0x0b0b c0x0000 (---------------) + I medio-campidano - 0x00241e4e, // n0x0b0c c0x0000 (---------------) + I mediocampidano - 0x00349787, // n0x0b0d c0x0000 (---------------) + I messina - 0x00206082, // n0x0b0e c0x0000 (---------------) + I mi - 0x002f52c5, // n0x0b0f c0x0000 (---------------) + I milan - 0x002f52c6, // n0x0b10 c0x0000 (---------------) + I milano - 0x00220782, // n0x0b11 c0x0000 (---------------) + I mn - 0x00209502, // n0x0b12 c0x0000 (---------------) + I mo - 0x002161c6, // n0x0b13 c0x0000 (---------------) + I modena - 0x0026ecc3, // n0x0b14 c0x0000 (---------------) + I mol - 0x0026ecc6, // n0x0b15 c0x0000 (---------------) + I molise - 0x002c5905, // n0x0b16 c0x0000 (---------------) + I monza - 0x002c590d, // n0x0b17 c0x0000 (---------------) + I monza-brianza - 0x002c6155, // n0x0b18 c0x0000 (---------------) + I monza-e-della-brianza - 0x002c690c, // n0x0b19 c0x0000 (---------------) + I monzabrianza - 0x002c72cd, // n0x0b1a c0x0000 (---------------) + I monzaebrianza - 0x002c7692, // n0x0b1b c0x0000 (---------------) + I monzaedellabrianza - 0x0020df02, // n0x0b1c c0x0000 (---------------) + I ms - 0x00207682, // n0x0b1d c0x0000 (---------------) + I mt - 0x002012c2, // n0x0b1e c0x0000 (---------------) + I na - 0x00385986, // n0x0b1f c0x0000 (---------------) + I naples - 0x002a1106, // n0x0b20 c0x0000 (---------------) + I napoli - 0x00201342, // n0x0b21 c0x0000 (---------------) + I no - 0x00208346, // n0x0b22 c0x0000 (---------------) + I novara - 0x00204fc2, // n0x0b23 c0x0000 (---------------) + I nu - 0x0039dcc5, // n0x0b24 c0x0000 (---------------) + I nuoro - 0x002018c2, // n0x0b25 c0x0000 (---------------) + I og - 0x00258ac9, // n0x0b26 c0x0000 (---------------) + I ogliastra - 0x0026b1cc, // n0x0b27 c0x0000 (---------------) + I olbia-tempio - 0x0026b50b, // n0x0b28 c0x0000 (---------------) + I olbiatempio - 0x00200d82, // n0x0b29 c0x0000 (---------------) + I or - 0x00251348, // n0x0b2a c0x0000 (---------------) + I oristano - 0x002010c2, // n0x0b2b c0x0000 (---------------) + I ot - 0x00206642, // n0x0b2c c0x0000 (---------------) + I pa - 0x00214d86, // n0x0b2d c0x0000 (---------------) + I padova - 0x003603c5, // n0x0b2e c0x0000 (---------------) + I padua - 0x0022e007, // n0x0b2f c0x0000 (---------------) + I palermo - 0x00276745, // n0x0b30 c0x0000 (---------------) + I parma - 0x002d7685, // n0x0b31 c0x0000 (---------------) + I pavia - 0x00248042, // n0x0b32 c0x0000 (---------------) + I pc - 0x0033af42, // n0x0b33 c0x0000 (---------------) + I pd - 0x00200582, // n0x0b34 c0x0000 (---------------) + I pe - 0x00260547, // n0x0b35 c0x0000 (---------------) + I perugia - 0x00229a4d, // n0x0b36 c0x0000 (---------------) + I pesaro-urbino - 0x00229dcc, // n0x0b37 c0x0000 (---------------) + I pesarourbino - 0x00237a07, // n0x0b38 c0x0000 (---------------) + I pescara - 0x00230e02, // n0x0b39 c0x0000 (---------------) + I pg - 0x002160c2, // n0x0b3a c0x0000 (---------------) + I pi - 0x0033a288, // n0x0b3b c0x0000 (---------------) + I piacenza - 0x00257588, // n0x0b3c c0x0000 (---------------) + I piedmont - 0x002d8248, // n0x0b3d c0x0000 (---------------) + I piemonte - 0x002df984, // n0x0b3e c0x0000 (---------------) + I pisa - 0x002abd47, // n0x0b3f c0x0000 (---------------) + I pistoia - 0x002ddd43, // n0x0b40 c0x0000 (---------------) + I pmn - 0x00230c02, // n0x0b41 c0x0000 (---------------) + I pn - 0x00200842, // n0x0b42 c0x0000 (---------------) + I po - 0x002e0149, // n0x0b43 c0x0000 (---------------) + I pordenone - 0x00309007, // n0x0b44 c0x0000 (---------------) + I potenza - 0x00207082, // n0x0b45 c0x0000 (---------------) + I pr - 0x0026cfc5, // n0x0b46 c0x0000 (---------------) + I prato - 0x00230382, // n0x0b47 c0x0000 (---------------) + I pt - 0x00236382, // n0x0b48 c0x0000 (---------------) + I pu - 0x002774c3, // n0x0b49 c0x0000 (---------------) + I pug - 0x002774c6, // n0x0b4a c0x0000 (---------------) + I puglia - 0x002e7782, // n0x0b4b c0x0000 (---------------) + I pv - 0x002e8542, // n0x0b4c c0x0000 (---------------) + I pz - 0x00204102, // n0x0b4d c0x0000 (---------------) + I ra - 0x00315406, // n0x0b4e c0x0000 (---------------) + I ragusa - 0x00208447, // n0x0b4f c0x0000 (---------------) + I ravenna - 0x00200dc2, // n0x0b50 c0x0000 (---------------) + I rc - 0x00206842, // n0x0b51 c0x0000 (---------------) + I re - 0x0033fdcf, // n0x0b52 c0x0000 (---------------) + I reggio-calabria - 0x0031338d, // n0x0b53 c0x0000 (---------------) + I reggio-emilia - 0x002526ce, // n0x0b54 c0x0000 (---------------) + I reggiocalabria - 0x00284a0c, // n0x0b55 c0x0000 (---------------) + I reggioemilia - 0x0020b502, // n0x0b56 c0x0000 (---------------) + I rg - 0x00200982, // n0x0b57 c0x0000 (---------------) + I ri - 0x00223705, // n0x0b58 c0x0000 (---------------) + I rieti - 0x003a4f46, // n0x0b59 c0x0000 (---------------) + I rimini - 0x00222442, // n0x0b5a c0x0000 (---------------) + I rm - 0x0020b242, // n0x0b5b c0x0000 (---------------) + I rn - 0x00202082, // n0x0b5c c0x0000 (---------------) + I ro - 0x0022a1c4, // n0x0b5d c0x0000 (---------------) + I roma - 0x002e29c4, // n0x0b5e c0x0000 (---------------) + I rome - 0x00336f46, // n0x0b5f c0x0000 (---------------) + I rovigo - 0x00200fc2, // n0x0b60 c0x0000 (---------------) + I sa - 0x002783c7, // n0x0b61 c0x0000 (---------------) + I salerno - 0x002128c3, // n0x0b62 c0x0000 (---------------) + I sar - 0x00216f08, // n0x0b63 c0x0000 (---------------) + I sardegna - 0x0021a4c8, // n0x0b64 c0x0000 (---------------) + I sardinia - 0x002ada87, // n0x0b65 c0x0000 (---------------) + I sassari - 0x00385886, // n0x0b66 c0x0000 (---------------) + I savona - 0x00203342, // n0x0b67 c0x0000 (---------------) + I si - 0x0023f2c3, // n0x0b68 c0x0000 (---------------) + I sic - 0x00372c47, // n0x0b69 c0x0000 (---------------) + I sicilia - 0x00246946, // n0x0b6a c0x0000 (---------------) + I sicily - 0x002b8445, // n0x0b6b c0x0000 (---------------) + I siena - 0x00344848, // n0x0b6c c0x0000 (---------------) + I siracusa - 0x00208102, // n0x0b6d c0x0000 (---------------) + I so - 0x00307287, // n0x0b6e c0x0000 (---------------) + I sondrio - 0x00206602, // n0x0b6f c0x0000 (---------------) + I sp - 0x00334d82, // n0x0b70 c0x0000 (---------------) + I sr - 0x00202302, // n0x0b71 c0x0000 (---------------) + I ss - 0x002cf849, // n0x0b72 c0x0000 (---------------) + I suedtirol - 0x00208842, // n0x0b73 c0x0000 (---------------) + I sv - 0x002004c2, // n0x0b74 c0x0000 (---------------) + I ta - 0x00237c83, // n0x0b75 c0x0000 (---------------) + I taa - 0x0032c487, // n0x0b76 c0x0000 (---------------) + I taranto - 0x00201182, // n0x0b77 c0x0000 (---------------) + I te - 0x0026b34c, // n0x0b78 c0x0000 (---------------) + I tempio-olbia - 0x0026b64b, // n0x0b79 c0x0000 (---------------) + I tempioolbia - 0x0035d606, // n0x0b7a c0x0000 (---------------) + I teramo - 0x0020b1c5, // n0x0b7b c0x0000 (---------------) + I terni - 0x0022e342, // n0x0b7c c0x0000 (---------------) + I tn - 0x002042c2, // n0x0b7d c0x0000 (---------------) + I to - 0x002b3ec6, // n0x0b7e c0x0000 (---------------) + I torino - 0x0021fd43, // n0x0b7f c0x0000 (---------------) + I tos - 0x00329207, // n0x0b80 c0x0000 (---------------) + I toscana - 0x0020ed02, // n0x0b81 c0x0000 (---------------) + I tp - 0x00204882, // n0x0b82 c0x0000 (---------------) + I tr - 0x00274bd5, // n0x0b83 c0x0000 (---------------) + I trani-andria-barletta - 0x00376995, // n0x0b84 c0x0000 (---------------) + I trani-barletta-andria - 0x0028b2d3, // n0x0b85 c0x0000 (---------------) + I traniandriabarletta - 0x00376ed3, // n0x0b86 c0x0000 (---------------) + I tranibarlettaandria - 0x00294547, // n0x0b87 c0x0000 (---------------) + I trapani - 0x002bca88, // n0x0b88 c0x0000 (---------------) + I trentino - 0x0036d250, // n0x0b89 c0x0000 (---------------) + I trentino-a-adige - 0x002eafcf, // n0x0b8a c0x0000 (---------------) + I trentino-aadige - 0x0033e0d3, // n0x0b8b c0x0000 (---------------) + I trentino-alto-adige - 0x0034fbd2, // n0x0b8c c0x0000 (---------------) + I trentino-altoadige - 0x002d2710, // n0x0b8d c0x0000 (---------------) + I trentino-s-tirol - 0x0033cd8f, // n0x0b8e c0x0000 (---------------) + I trentino-stirol - 0x002bca92, // n0x0b8f c0x0000 (---------------) + I trentino-sud-tirol - 0x002c54d1, // n0x0b90 c0x0000 (---------------) + I trentino-sudtirol - 0x002cc793, // n0x0b91 c0x0000 (---------------) + I trentino-sued-tirol - 0x002cf612, // n0x0b92 c0x0000 (---------------) + I trentino-suedtirol - 0x002d144f, // n0x0b93 c0x0000 (---------------) + I trentinoa-adige - 0x002d7b4e, // n0x0b94 c0x0000 (---------------) + I trentinoaadige - 0x002dfcd2, // n0x0b95 c0x0000 (---------------) + I trentinoalto-adige - 0x002e54d1, // n0x0b96 c0x0000 (---------------) + I trentinoaltoadige - 0x002e5ccf, // n0x0b97 c0x0000 (---------------) + I trentinos-tirol - 0x002e780e, // n0x0b98 c0x0000 (---------------) + I trentinostirol - 0x002e88d1, // n0x0b99 c0x0000 (---------------) + I trentinosud-tirol - 0x002ef650, // n0x0b9a c0x0000 (---------------) + I trentinosudtirol - 0x00336512, // n0x0b9b c0x0000 (---------------) + I trentinosued-tirol - 0x002ea451, // n0x0b9c c0x0000 (---------------) + I trentinosuedtirol - 0x002fb506, // n0x0b9d c0x0000 (---------------) + I trento - 0x002f8347, // n0x0b9e c0x0000 (---------------) + I treviso - 0x00365dc7, // n0x0b9f c0x0000 (---------------) + I trieste - 0x00205e42, // n0x0ba0 c0x0000 (---------------) + I ts - 0x0027f245, // n0x0ba1 c0x0000 (---------------) + I turin - 0x002f00c7, // n0x0ba2 c0x0000 (---------------) + I tuscany - 0x0020cd02, // n0x0ba3 c0x0000 (---------------) + I tv - 0x00204502, // n0x0ba4 c0x0000 (---------------) + I ud - 0x00229605, // n0x0ba5 c0x0000 (---------------) + I udine - 0x0021f083, // n0x0ba6 c0x0000 (---------------) + I umb - 0x0024ab46, // n0x0ba7 c0x0000 (---------------) + I umbria - 0x00229c0d, // n0x0ba8 c0x0000 (---------------) + I urbino-pesaro - 0x00229f4c, // n0x0ba9 c0x0000 (---------------) + I urbinopesaro - 0x00200bc2, // n0x0baa c0x0000 (---------------) + I va - 0x0022770b, // n0x0bab c0x0000 (---------------) + I val-d-aosta - 0x00214e8a, // n0x0bac c0x0000 (---------------) + I val-daosta - 0x00326a0a, // n0x0bad c0x0000 (---------------) + I vald-aosta - 0x002b2f09, // n0x0bae c0x0000 (---------------) + I valdaosta - 0x002dec0b, // n0x0baf c0x0000 (---------------) + I valle-aosta - 0x002f07cd, // n0x0bb0 c0x0000 (---------------) + I valle-d-aosta - 0x0024e84c, // n0x0bb1 c0x0000 (---------------) + I valle-daosta - 0x00212a4a, // n0x0bb2 c0x0000 (---------------) + I valleaosta - 0x0021798c, // n0x0bb3 c0x0000 (---------------) + I valled-aosta - 0x0023fc8b, // n0x0bb4 c0x0000 (---------------) + I valledaosta - 0x002406cc, // n0x0bb5 c0x0000 (---------------) + I vallee-aoste - 0x0024994b, // n0x0bb6 c0x0000 (---------------) + I valleeaoste - 0x0026b143, // n0x0bb7 c0x0000 (---------------) + I vao - 0x002746c6, // n0x0bb8 c0x0000 (---------------) + I varese - 0x002dd582, // n0x0bb9 c0x0000 (---------------) + I vb - 0x002e8d42, // n0x0bba c0x0000 (---------------) + I vc - 0x00211ec3, // n0x0bbb c0x0000 (---------------) + I vda - 0x00203042, // n0x0bbc c0x0000 (---------------) + I ve - 0x00203043, // n0x0bbd c0x0000 (---------------) + I ven - 0x00366f06, // n0x0bbe c0x0000 (---------------) + I veneto - 0x0025d3c7, // n0x0bbf c0x0000 (---------------) + I venezia - 0x0024f546, // n0x0bc0 c0x0000 (---------------) + I venice - 0x0022ce08, // n0x0bc1 c0x0000 (---------------) + I verbania - 0x002c3dc8, // n0x0bc2 c0x0000 (---------------) + I vercelli - 0x0020cd46, // n0x0bc3 c0x0000 (---------------) + I verona - 0x00202402, // n0x0bc4 c0x0000 (---------------) + I vi - 0x002f2e4d, // n0x0bc5 c0x0000 (---------------) + I vibo-valentia - 0x002f318c, // n0x0bc6 c0x0000 (---------------) + I vibovalentia - 0x0023f047, // n0x0bc7 c0x0000 (---------------) + I vicenza - 0x002f9907, // n0x0bc8 c0x0000 (---------------) + I viterbo - 0x0020fe42, // n0x0bc9 c0x0000 (---------------) + I vr - 0x00235642, // n0x0bca c0x0000 (---------------) + I vs - 0x00273e42, // n0x0bcb c0x0000 (---------------) + I vt - 0x00213d42, // n0x0bcc c0x0000 (---------------) + I vv - 0x0020a442, // n0x0bcd c0x0000 (---------------) + I co - 0x002207c3, // n0x0bce c0x0000 (---------------) + I net - 0x00225403, // n0x0bcf c0x0000 (---------------) + I org - 0x00234803, // n0x0bd0 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x0bd1 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x0bd2 c0x0000 (---------------) + I gov - 0x002119c3, // n0x0bd3 c0x0000 (---------------) + I mil - 0x00207d04, // n0x0bd4 c0x0000 (---------------) + I name - 0x002207c3, // n0x0bd5 c0x0000 (---------------) + I net - 0x00225403, // n0x0bd6 c0x0000 (---------------) + I org - 0x00217f43, // n0x0bd7 c0x0000 (---------------) + I sch - 0x00200342, // n0x0bd8 c0x0000 (---------------) + I ac - 0x002001c2, // n0x0bd9 c0x0000 (---------------) + I ad - 0x1f68fc85, // n0x0bda c0x007d (n0x0c47-n0x0c7b) + I aichi - 0x1fa055c5, // n0x0bdb c0x007e (n0x0c7b-n0x0c97) + I akita - 0x1ff150c6, // n0x0bdc c0x007f (n0x0c97-n0x0cad) + I aomori - 0x000ff148, // n0x0bdd c0x0000 (---------------) + blogspot - 0x202aec45, // n0x0bde c0x0080 (n0x0cad-n0x0ce7) + I chiba - 0x0020a442, // n0x0bdf c0x0000 (---------------) + I co - 0x00202ac2, // n0x0be0 c0x0000 (---------------) + I ed - 0x20627c85, // n0x0be1 c0x0081 (n0x0ce7-n0x0cfd) + I ehime - 0x20a7d1c5, // n0x0be2 c0x0082 (n0x0cfd-n0x0d0c) + I fukui - 0x20e7e907, // n0x0be3 c0x0083 (n0x0d0c-n0x0d4b) + I fukuoka - 0x213101c9, // n0x0be4 c0x0084 (n0x0d4b-n0x0d7e) + I fukushima - 0x216ad2c4, // n0x0be5 c0x0085 (n0x0d7e-n0x0da4) + I gifu - 0x0020ec82, // n0x0be6 c0x0000 (---------------) + I go - 0x00209d42, // n0x0be7 c0x0000 (---------------) + I gr - 0x21a43505, // n0x0be8 c0x0086 (n0x0da4-n0x0dc8) + I gunma - 0x21e03509, // n0x0be9 c0x0087 (n0x0dc8-n0x0de1) + I hiroshima - 0x22368c88, // n0x0bea c0x0088 (n0x0de1-n0x0e6f) + I hokkaido - 0x226ac145, // n0x0beb c0x0089 (n0x0e6f-n0x0e9d) + I hyogo - 0x22ac4687, // n0x0bec c0x008a (n0x0e9d-n0x0ed0) + I ibaraki - 0x22e18cc8, // n0x0bed c0x008b (n0x0ed0-n0x0ee3) + I ishikawa - 0x232d9605, // n0x0bee c0x008c (n0x0ee3-n0x0f05) + I iwate - 0x23601b06, // n0x0bef c0x008d (n0x0f05-n0x0f14) + I kagawa - 0x23a6bcc9, // n0x0bf0 c0x008e (n0x0f14-n0x0f28) + I kagoshima - 0x23f9ac48, // n0x0bf1 c0x008f (n0x0f28-n0x0f46) + I kanagawa - 0x242bb4c8, // n0x0bf2 c0x0090 (n0x0f46-n0x0f47)* o I kawasaki - 0x2469c2ca, // n0x0bf3 c0x0091 (n0x0f47-n0x0f48)* o I kitakyushu - 0x24a4d484, // n0x0bf4 c0x0092 (n0x0f48-n0x0f49)* o I kobe - 0x24eccf85, // n0x0bf5 c0x0093 (n0x0f49-n0x0f68) + I kochi - 0x252b5f08, // n0x0bf6 c0x0094 (n0x0f68-n0x0f82) + I kumamoto - 0x256c0c45, // n0x0bf7 c0x0095 (n0x0f82-n0x0fa1) + I kyoto - 0x0021a8c2, // n0x0bf8 c0x0000 (---------------) + I lg - 0x25a71843, // n0x0bf9 c0x0096 (n0x0fa1-n0x0fbf) + I mie - 0x25ea2786, // n0x0bfa c0x0097 (n0x0fbf-n0x0fe0) + I miyagi - 0x26260088, // n0x0bfb c0x0098 (n0x0fe0-n0x0ffb) + I miyazaki - 0x2678d3c6, // n0x0bfc c0x0099 (n0x0ffb-n0x1046) + I nagano - 0x26ac3a48, // n0x0bfd c0x009a (n0x1046-n0x105c) + I nagasaki - 0x26e2fb46, // n0x0bfe c0x009b (n0x105c-n0x105d)* o I nagoya - 0x272b8504, // n0x0bff c0x009c (n0x105d-n0x1083) + I nara - 0x002030c2, // n0x0c00 c0x0000 (---------------) + I ne - 0x27687e47, // n0x0c01 c0x009d (n0x1083-n0x10a5) + I niigata - 0x27aa8144, // n0x0c02 c0x009e (n0x10a5-n0x10b8) + I oita - 0x27e77a47, // n0x0c03 c0x009f (n0x10b8-n0x10d2) + I okayama - 0x283955c7, // n0x0c04 c0x00a0 (n0x10d2-n0x10fc) + I okinawa - 0x00200d82, // n0x0c05 c0x0000 (---------------) + I or - 0x2869a485, // n0x0c06 c0x00a1 (n0x10fc-n0x112e) + I osaka - 0x28a324c4, // n0x0c07 c0x00a2 (n0x112e-n0x1148) + I saga - 0x28ef6807, // n0x0c08 c0x00a3 (n0x1148-n0x118d) + I saitama - 0x29212307, // n0x0c09 c0x00a4 (n0x118d-n0x118e)* o I sapporo - 0x29682846, // n0x0c0a c0x00a5 (n0x118e-n0x118f)* o I sendai - 0x29a20245, // n0x0c0b c0x00a6 (n0x118f-n0x11a6) + I shiga - 0x29e93907, // n0x0c0c c0x00a7 (n0x11a6-n0x11bd) + I shimane - 0x2a2fe1c8, // n0x0c0d c0x00a8 (n0x11bd-n0x11e1) + I shizuoka - 0x2a747347, // n0x0c0e c0x00a9 (n0x11e1-n0x1200) + I tochigi - 0x2ab7ea09, // n0x0c0f c0x00aa (n0x1200-n0x1211) + I tokushima - 0x2af44a85, // n0x0c10 c0x00ab (n0x1211-n0x124a) + I tokyo - 0x2b2fb607, // n0x0c11 c0x00ac (n0x124a-n0x1257) + I tottori - 0x2b68f146, // n0x0c12 c0x00ad (n0x1257-n0x126f) + I toyama - 0x2ba23d88, // n0x0c13 c0x00ae (n0x126f-n0x128c) + I wakayama - 0x002db80d, // n0x0c14 c0x0000 (---------------) + I xn--0trq7p7nn - 0x002481c9, // n0x0c15 c0x0000 (---------------) + I xn--1ctwo - 0x00251b4b, // n0x0c16 c0x0000 (---------------) + I xn--1lqs03n - 0x0025a98b, // n0x0c17 c0x0000 (---------------) + I xn--1lqs71d - 0x0027240b, // n0x0c18 c0x0000 (---------------) + I xn--2m4a15e - 0x002a56cb, // n0x0c19 c0x0000 (---------------) + I xn--32vp30h - 0x0030130b, // n0x0c1a c0x0000 (---------------) + I xn--4it168d - 0x003015cb, // n0x0c1b c0x0000 (---------------) + I xn--4it797k - 0x00301a09, // n0x0c1c c0x0000 (---------------) + I xn--4pvxs - 0x00302b0b, // n0x0c1d c0x0000 (---------------) + I xn--5js045d - 0x00302dcb, // n0x0c1e c0x0000 (---------------) + I xn--5rtp49c - 0x0030324b, // n0x0c1f c0x0000 (---------------) + I xn--5rtq34k - 0x003042ca, // n0x0c20 c0x0000 (---------------) + I xn--6btw5a - 0x0030480a, // n0x0c21 c0x0000 (---------------) + I xn--6orx2r - 0x00304e0c, // n0x0c22 c0x0000 (---------------) + I xn--7t0a264c - 0x00307e8b, // n0x0c23 c0x0000 (---------------) + I xn--8ltr62k - 0x0030840a, // n0x0c24 c0x0000 (---------------) + I xn--8pvr4u - 0x0032028a, // n0x0c25 c0x0000 (---------------) + I xn--c3s14m - 0x0032d08e, // n0x0c26 c0x0000 (---------------) + I xn--d5qv7z876c - 0x0032e68e, // n0x0c27 c0x0000 (---------------) + I xn--djrs72d6uy - 0x0032ea0a, // n0x0c28 c0x0000 (---------------) + I xn--djty4k - 0x0033010a, // n0x0c29 c0x0000 (---------------) + I xn--efvn9s - 0x00330a4b, // n0x0c2a c0x0000 (---------------) + I xn--ehqz56n - 0x00330d0b, // n0x0c2b c0x0000 (---------------) + I xn--elqq16h - 0x00331a4b, // n0x0c2c c0x0000 (---------------) + I xn--f6qx53a - 0x00349bcb, // n0x0c2d c0x0000 (---------------) + I xn--k7yn95e - 0x0034a1ca, // n0x0c2e c0x0000 (---------------) + I xn--kbrq7o - 0x0034ae8b, // n0x0c2f c0x0000 (---------------) + I xn--klt787d - 0x0034b14a, // n0x0c30 c0x0000 (---------------) + I xn--kltp7d - 0x0034b3ca, // n0x0c31 c0x0000 (---------------) + I xn--kltx9a - 0x0034b64a, // n0x0c32 c0x0000 (---------------) + I xn--klty5x - 0x0036688b, // n0x0c33 c0x0000 (---------------) + I xn--mkru45i - 0x0036e84b, // n0x0c34 c0x0000 (---------------) + I xn--nit225k - 0x0037048e, // n0x0c35 c0x0000 (---------------) + I xn--ntso0iqx3a - 0x0037080b, // n0x0c36 c0x0000 (---------------) + I xn--ntsq17g - 0x00377ccb, // n0x0c37 c0x0000 (---------------) + I xn--pssu33l - 0x0037990b, // n0x0c38 c0x0000 (---------------) + I xn--qqqt11m - 0x0037c74a, // n0x0c39 c0x0000 (---------------) + I xn--rht27z - 0x0037c9c9, // n0x0c3a c0x0000 (---------------) + I xn--rht3d - 0x0037cc0a, // n0x0c3b c0x0000 (---------------) + I xn--rht61e - 0x0037e28a, // n0x0c3c c0x0000 (---------------) + I xn--rny31h - 0x0039258b, // n0x0c3d c0x0000 (---------------) + I xn--tor131o - 0x003940cb, // n0x0c3e c0x0000 (---------------) + I xn--uist22h - 0x00394f0a, // n0x0c3f c0x0000 (---------------) + I xn--uisz3g - 0x0039660b, // n0x0c40 c0x0000 (---------------) + I xn--uuwu58a - 0x0039bb0b, // n0x0c41 c0x0000 (---------------) + I xn--vgu402c - 0x003a458b, // n0x0c42 c0x0000 (---------------) + I xn--zbx025d - 0x2be80908, // n0x0c43 c0x00af (n0x128c-n0x12ae) + I yamagata - 0x2c288d89, // n0x0c44 c0x00b0 (n0x12ae-n0x12be) + I yamaguchi - 0x2c6a1649, // n0x0c45 c0x00b1 (n0x12be-n0x12da) + I yamanashi - 0x2cb57948, // n0x0c46 c0x00b2 (n0x12da-n0x12db)* o I yokohama - 0x00336cc5, // n0x0c47 c0x0000 (---------------) + I aisai - 0x00201ec3, // n0x0c48 c0x0000 (---------------) + I ama - 0x00201004, // n0x0c49 c0x0000 (---------------) + I anjo - 0x0038a145, // n0x0c4a c0x0000 (---------------) + I asuke - 0x002ad006, // n0x0c4b c0x0000 (---------------) + I chiryu - 0x002af505, // n0x0c4c c0x0000 (---------------) + I chita - 0x00288584, // n0x0c4d c0x0000 (---------------) + I fuso - 0x0026ad08, // n0x0c4e c0x0000 (---------------) + I gamagori - 0x00255385, // n0x0c4f c0x0000 (---------------) + I handa - 0x00290704, // n0x0c50 c0x0000 (---------------) + I hazu - 0x002c5e07, // n0x0c51 c0x0000 (---------------) + I hekinan - 0x0029cc0a, // n0x0c52 c0x0000 (---------------) + I higashiura - 0x0023090a, // n0x0c53 c0x0000 (---------------) + I ichinomiya - 0x00301e87, // n0x0c54 c0x0000 (---------------) + I inazawa - 0x00204f87, // n0x0c55 c0x0000 (---------------) + I inuyama - 0x002eea47, // n0x0c56 c0x0000 (---------------) + I isshiki - 0x0022a307, // n0x0c57 c0x0000 (---------------) + I iwakura - 0x0028eec5, // n0x0c58 c0x0000 (---------------) + I kanie - 0x00329606, // n0x0c59 c0x0000 (---------------) + I kariya - 0x0030d6c7, // n0x0c5a c0x0000 (---------------) + I kasugai - 0x0035d8c4, // n0x0c5b c0x0000 (---------------) + I kira - 0x00357786, // n0x0c5c c0x0000 (---------------) + I kiyosu - 0x002a9fc6, // n0x0c5d c0x0000 (---------------) + I komaki - 0x002026c5, // n0x0c5e c0x0000 (---------------) + I konan - 0x0025b484, // n0x0c5f c0x0000 (---------------) + I kota - 0x002a3c46, // n0x0c60 c0x0000 (---------------) + I mihama - 0x0029be07, // n0x0c61 c0x0000 (---------------) + I miyoshi - 0x0021dd06, // n0x0c62 c0x0000 (---------------) + I nishio - 0x003566c7, // n0x0c63 c0x0000 (---------------) + I nisshin - 0x0027bd43, // n0x0c64 c0x0000 (---------------) + I obu - 0x00250b06, // n0x0c65 c0x0000 (---------------) + I oguchi - 0x00237105, // n0x0c66 c0x0000 (---------------) + I oharu - 0x0027ea07, // n0x0c67 c0x0000 (---------------) + I okazaki - 0x002c120a, // n0x0c68 c0x0000 (---------------) + I owariasahi - 0x002b0144, // n0x0c69 c0x0000 (---------------) + I seto - 0x00215908, // n0x0c6a c0x0000 (---------------) + I shikatsu - 0x00287089, // n0x0c6b c0x0000 (---------------) + I shinshiro - 0x002d0d87, // n0x0c6c c0x0000 (---------------) + I shitara - 0x002e9dc6, // n0x0c6d c0x0000 (---------------) + I tahara - 0x0035c7c8, // n0x0c6e c0x0000 (---------------) + I takahama - 0x0030a049, // n0x0c6f c0x0000 (---------------) + I tobishima - 0x00288044, // n0x0c70 c0x0000 (---------------) + I toei - 0x00300144, // n0x0c71 c0x0000 (---------------) + I togo - 0x002fa645, // n0x0c72 c0x0000 (---------------) + I tokai - 0x002c1d88, // n0x0c73 c0x0000 (---------------) + I tokoname - 0x002c27c7, // n0x0c74 c0x0000 (---------------) + I toyoake - 0x002856c9, // n0x0c75 c0x0000 (---------------) + I toyohashi - 0x00243788, // n0x0c76 c0x0000 (---------------) + I toyokawa - 0x002490c6, // n0x0c77 c0x0000 (---------------) + I toyone - 0x0025a486, // n0x0c78 c0x0000 (---------------) + I toyota - 0x00296d88, // n0x0c79 c0x0000 (---------------) + I tsushima - 0x00369586, // n0x0c7a c0x0000 (---------------) + I yatomi - 0x002055c5, // n0x0c7b c0x0000 (---------------) + I akita - 0x00282906, // n0x0c7c c0x0000 (---------------) + I daisen - 0x00277d48, // n0x0c7d c0x0000 (---------------) + I fujisato - 0x0023a5c6, // n0x0c7e c0x0000 (---------------) + I gojome - 0x0025644b, // n0x0c7f c0x0000 (---------------) + I hachirogata - 0x0028bdc6, // n0x0c80 c0x0000 (---------------) + I happou - 0x00298bcd, // n0x0c81 c0x0000 (---------------) + I higashinaruse - 0x0037f145, // n0x0c82 c0x0000 (---------------) + I honjo - 0x002a8006, // n0x0c83 c0x0000 (---------------) + I honjyo - 0x00218d85, // n0x0c84 c0x0000 (---------------) + I ikawa - 0x00295e89, // n0x0c85 c0x0000 (---------------) + I kamikoani - 0x0030c807, // n0x0c86 c0x0000 (---------------) + I kamioka - 0x003749c8, // n0x0c87 c0x0000 (---------------) + I katagami - 0x00319606, // n0x0c88 c0x0000 (---------------) + I kazuno - 0x00297709, // n0x0c89 c0x0000 (---------------) + I kitaakita - 0x002e2746, // n0x0c8a c0x0000 (---------------) + I kosaka - 0x002c1185, // n0x0c8b c0x0000 (---------------) + I kyowa - 0x0022b406, // n0x0c8c c0x0000 (---------------) + I misato - 0x002b3c46, // n0x0c8d c0x0000 (---------------) + I mitane - 0x002c90c9, // n0x0c8e c0x0000 (---------------) + I moriyoshi - 0x00352386, // n0x0c8f c0x0000 (---------------) + I nikaho - 0x002f7447, // n0x0c90 c0x0000 (---------------) + I noshiro - 0x002c8805, // n0x0c91 c0x0000 (---------------) + I odate - 0x00202ec3, // n0x0c92 c0x0000 (---------------) + I oga - 0x00223b05, // n0x0c93 c0x0000 (---------------) + I ogata - 0x002a5d87, // n0x0c94 c0x0000 (---------------) + I semboku - 0x0032bb86, // n0x0c95 c0x0000 (---------------) + I yokote - 0x0037f049, // n0x0c96 c0x0000 (---------------) + I yurihonjo - 0x003150c6, // n0x0c97 c0x0000 (---------------) + I aomori - 0x0024cb86, // n0x0c98 c0x0000 (---------------) + I gonohe - 0x0020c209, // n0x0c99 c0x0000 (---------------) + I hachinohe - 0x00282309, // n0x0c9a c0x0000 (---------------) + I hashikami - 0x0029f5c7, // n0x0c9b c0x0000 (---------------) + I hiranai - 0x00312088, // n0x0c9c c0x0000 (---------------) + I hirosaki - 0x0025a009, // n0x0c9d c0x0000 (---------------) + I itayanagi - 0x0027eec8, // n0x0c9e c0x0000 (---------------) + I kuroishi - 0x00379b86, // n0x0c9f c0x0000 (---------------) + I misawa - 0x002d1c05, // n0x0ca0 c0x0000 (---------------) + I mutsu - 0x0021f44a, // n0x0ca1 c0x0000 (---------------) + I nakadomari - 0x0024cc06, // n0x0ca2 c0x0000 (---------------) + I noheji - 0x00204086, // n0x0ca3 c0x0000 (---------------) + I oirase - 0x002a2945, // n0x0ca4 c0x0000 (---------------) + I owani - 0x002ad608, // n0x0ca5 c0x0000 (---------------) + I rokunohe - 0x00203147, // n0x0ca6 c0x0000 (---------------) + I sannohe - 0x0023740a, // n0x0ca7 c0x0000 (---------------) + I shichinohe - 0x0024ca86, // n0x0ca8 c0x0000 (---------------) + I shingo - 0x0023fec5, // n0x0ca9 c0x0000 (---------------) + I takko - 0x00254446, // n0x0caa c0x0000 (---------------) + I towada - 0x002aa707, // n0x0cab c0x0000 (---------------) + I tsugaru - 0x002e9c87, // n0x0cac c0x0000 (---------------) + I tsuruta - 0x002f70c5, // n0x0cad c0x0000 (---------------) + I abiko - 0x002c1345, // n0x0cae c0x0000 (---------------) + I asahi - 0x002e7c06, // n0x0caf c0x0000 (---------------) + I chonan - 0x002e8d86, // n0x0cb0 c0x0000 (---------------) + I chosei - 0x002ffa86, // n0x0cb1 c0x0000 (---------------) + I choshi - 0x0032efc4, // n0x0cb2 c0x0000 (---------------) + I chuo - 0x002813c9, // n0x0cb3 c0x0000 (---------------) + I funabashi - 0x00289c46, // n0x0cb4 c0x0000 (---------------) + I futtsu - 0x00282eca, // n0x0cb5 c0x0000 (---------------) + I hanamigawa - 0x0028fcc8, // n0x0cb6 c0x0000 (---------------) + I ichihara - 0x00261808, // n0x0cb7 c0x0000 (---------------) + I ichikawa - 0x0023090a, // n0x0cb8 c0x0000 (---------------) + I ichinomiya - 0x00204e85, // n0x0cb9 c0x0000 (---------------) + I inzai - 0x0029bd45, // n0x0cba c0x0000 (---------------) + I isumi - 0x0030a8c8, // n0x0cbb c0x0000 (---------------) + I kamagaya - 0x002ccd08, // n0x0cbc c0x0000 (---------------) + I kamogawa - 0x0036ab87, // n0x0cbd c0x0000 (---------------) + I kashiwa - 0x002948c6, // n0x0cbe c0x0000 (---------------) + I katori - 0x0031b088, // n0x0cbf c0x0000 (---------------) + I katsuura - 0x00268e47, // n0x0cc0 c0x0000 (---------------) + I kimitsu - 0x00280dc8, // n0x0cc1 c0x0000 (---------------) + I kisarazu - 0x00367f86, // n0x0cc2 c0x0000 (---------------) + I kozaki - 0x00283488, // n0x0cc3 c0x0000 (---------------) + I kujukuri - 0x002b6a06, // n0x0cc4 c0x0000 (---------------) + I kyonan - 0x0023cdc7, // n0x0cc5 c0x0000 (---------------) + I matsudo - 0x00298146, // n0x0cc6 c0x0000 (---------------) + I midori - 0x002a3c46, // n0x0cc7 c0x0000 (---------------) + I mihama - 0x00234f0a, // n0x0cc8 c0x0000 (---------------) + I minamiboso - 0x00235986, // n0x0cc9 c0x0000 (---------------) + I mobara - 0x002d1c09, // n0x0cca c0x0000 (---------------) + I mutsuzawa - 0x002aef46, // n0x0ccb c0x0000 (---------------) + I nagara - 0x002d424a, // n0x0ccc c0x0000 (---------------) + I nagareyama - 0x002b8509, // n0x0ccd c0x0000 (---------------) + I narashino - 0x002f8706, // n0x0cce c0x0000 (---------------) + I narita - 0x0037ac44, // n0x0ccf c0x0000 (---------------) + I noda - 0x00309d8d, // n0x0cd0 c0x0000 (---------------) + I oamishirasato - 0x00289007, // n0x0cd1 c0x0000 (---------------) + I omigawa - 0x0031d786, // n0x0cd2 c0x0000 (---------------) + I onjuku - 0x002bb385, // n0x0cd3 c0x0000 (---------------) + I otaki - 0x002e27c5, // n0x0cd4 c0x0000 (---------------) + I sakae - 0x00306846, // n0x0cd5 c0x0000 (---------------) + I sakura - 0x0028e889, // n0x0cd6 c0x0000 (---------------) + I shimofusa - 0x002a22c7, // n0x0cd7 c0x0000 (---------------) + I shirako - 0x002794c6, // n0x0cd8 c0x0000 (---------------) + I shiroi - 0x002d06c6, // n0x0cd9 c0x0000 (---------------) + I shisui - 0x00288609, // n0x0cda c0x0000 (---------------) + I sodegaura - 0x0021c884, // n0x0cdb c0x0000 (---------------) + I sosa - 0x00225dc4, // n0x0cdc c0x0000 (---------------) + I tako - 0x00201d88, // n0x0cdd c0x0000 (---------------) + I tateyama - 0x003672c6, // n0x0cde c0x0000 (---------------) + I togane - 0x0029ef48, // n0x0cdf c0x0000 (---------------) + I tohnosho - 0x0022b388, // n0x0ce0 c0x0000 (---------------) + I tomisato - 0x0027de07, // n0x0ce1 c0x0000 (---------------) + I urayasu - 0x00200309, // n0x0ce2 c0x0000 (---------------) + I yachimata - 0x002ffc87, // n0x0ce3 c0x0000 (---------------) + I yachiyo - 0x002aeb0a, // n0x0ce4 c0x0000 (---------------) + I yokaichiba - 0x0022ad4f, // n0x0ce5 c0x0000 (---------------) + I yokoshibahikari - 0x0026220a, // n0x0ce6 c0x0000 (---------------) + I yotsukaido - 0x00223505, // n0x0ce7 c0x0000 (---------------) + I ainan - 0x00277f85, // n0x0ce8 c0x0000 (---------------) + I honai - 0x00214985, // n0x0ce9 c0x0000 (---------------) + I ikata - 0x002537c7, // n0x0cea c0x0000 (---------------) + I imabari - 0x00233303, // n0x0ceb c0x0000 (---------------) + I iyo - 0x00312288, // n0x0cec c0x0000 (---------------) + I kamijima - 0x002f3a86, // n0x0ced c0x0000 (---------------) + I kihoku - 0x002f3b89, // n0x0cee c0x0000 (---------------) + I kumakogen - 0x003a2706, // n0x0cef c0x0000 (---------------) + I masaki - 0x002c2187, // n0x0cf0 c0x0000 (---------------) + I matsuno - 0x002974c9, // n0x0cf1 c0x0000 (---------------) + I matsuyama - 0x003748c8, // n0x0cf2 c0x0000 (---------------) + I namikata - 0x002a2a07, // n0x0cf3 c0x0000 (---------------) + I niihama - 0x003006c3, // n0x0cf4 c0x0000 (---------------) + I ozu - 0x00336d45, // n0x0cf5 c0x0000 (---------------) + I saijo - 0x00233285, // n0x0cf6 c0x0000 (---------------) + I seiyo - 0x0032ee0b, // n0x0cf7 c0x0000 (---------------) + I shikokuchuo - 0x002c0d04, // n0x0cf8 c0x0000 (---------------) + I tobe - 0x0020a204, // n0x0cf9 c0x0000 (---------------) + I toon - 0x00276d06, // n0x0cfa c0x0000 (---------------) + I uchiko - 0x00300a47, // n0x0cfb c0x0000 (---------------) + I uwajima - 0x0038e7ca, // n0x0cfc c0x0000 (---------------) + I yawatahama - 0x0034d107, // n0x0cfd c0x0000 (---------------) + I echizen - 0x002880c7, // n0x0cfe c0x0000 (---------------) + I eiheiji - 0x0027d1c5, // n0x0cff c0x0000 (---------------) + I fukui - 0x00202a45, // n0x0d00 c0x0000 (---------------) + I ikeda - 0x00218889, // n0x0d01 c0x0000 (---------------) + I katsuyama - 0x002a3c46, // n0x0d02 c0x0000 (---------------) + I mihama - 0x0034cf8d, // n0x0d03 c0x0000 (---------------) + I minamiechizen - 0x00395985, // n0x0d04 c0x0000 (---------------) + I obama - 0x0029f143, // n0x0d05 c0x0000 (---------------) + I ohi - 0x00209903, // n0x0d06 c0x0000 (---------------) + I ono - 0x002f4405, // n0x0d07 c0x0000 (---------------) + I sabae - 0x0034db85, // n0x0d08 c0x0000 (---------------) + I sakai - 0x0035c7c8, // n0x0d09 c0x0000 (---------------) + I takahama - 0x0027abc7, // n0x0d0a c0x0000 (---------------) + I tsuruga - 0x00225b46, // n0x0d0b c0x0000 (---------------) + I wakasa - 0x0029d2c6, // n0x0d0c c0x0000 (---------------) + I ashiya - 0x0022ab05, // n0x0d0d c0x0000 (---------------) + I buzen - 0x0023a487, // n0x0d0e c0x0000 (---------------) + I chikugo - 0x00205207, // n0x0d0f c0x0000 (---------------) + I chikuho - 0x00292c47, // n0x0d10 c0x0000 (---------------) + I chikujo - 0x002cd00a, // n0x0d11 c0x0000 (---------------) + I chikushino - 0x00250bc8, // n0x0d12 c0x0000 (---------------) + I chikuzen - 0x0032efc4, // n0x0d13 c0x0000 (---------------) + I chuo - 0x00213ec7, // n0x0d14 c0x0000 (---------------) + I dazaifu - 0x0027be87, // n0x0d15 c0x0000 (---------------) + I fukuchi - 0x003306c6, // n0x0d16 c0x0000 (---------------) + I hakata - 0x00264247, // n0x0d17 c0x0000 (---------------) + I higashi - 0x002d5c48, // n0x0d18 c0x0000 (---------------) + I hirokawa - 0x002a1548, // n0x0d19 c0x0000 (---------------) + I hisayama - 0x0026a7c6, // n0x0d1a c0x0000 (---------------) + I iizuka - 0x00219fc8, // n0x0d1b c0x0000 (---------------) + I inatsuki - 0x002c8c84, // n0x0d1c c0x0000 (---------------) + I kaho - 0x0030d6c6, // n0x0d1d c0x0000 (---------------) + I kasuga - 0x0020a6c6, // n0x0d1e c0x0000 (---------------) + I kasuya - 0x003a4406, // n0x0d1f c0x0000 (---------------) + I kawara - 0x00310bc6, // n0x0d20 c0x0000 (---------------) + I keisen - 0x002171c4, // n0x0d21 c0x0000 (---------------) + I koga - 0x0022a3c6, // n0x0d22 c0x0000 (---------------) + I kurate - 0x002ba406, // n0x0d23 c0x0000 (---------------) + I kurogi - 0x00296506, // n0x0d24 c0x0000 (---------------) + I kurume - 0x00220086, // n0x0d25 c0x0000 (---------------) + I minami - 0x002097c6, // n0x0d26 c0x0000 (---------------) + I miyako - 0x002d5a86, // n0x0d27 c0x0000 (---------------) + I miyama - 0x00225a48, // n0x0d28 c0x0000 (---------------) + I miyawaka - 0x00357608, // n0x0d29 c0x0000 (---------------) + I mizumaki - 0x002ce148, // n0x0d2a c0x0000 (---------------) + I munakata - 0x002af708, // n0x0d2b c0x0000 (---------------) + I nakagawa - 0x0030a846, // n0x0d2c c0x0000 (---------------) + I nakama - 0x0020e5c5, // n0x0d2d c0x0000 (---------------) + I nishi - 0x00223ac6, // n0x0d2e c0x0000 (---------------) + I nogata - 0x002ac1c5, // n0x0d2f c0x0000 (---------------) + I ogori - 0x0037bb87, // n0x0d30 c0x0000 (---------------) + I okagaki - 0x00243845, // n0x0d31 c0x0000 (---------------) + I okawa - 0x00215303, // n0x0d32 c0x0000 (---------------) + I oki - 0x00205985, // n0x0d33 c0x0000 (---------------) + I omuta - 0x00268a84, // n0x0d34 c0x0000 (---------------) + I onga - 0x00209905, // n0x0d35 c0x0000 (---------------) + I onojo - 0x0020ffc3, // n0x0d36 c0x0000 (---------------) + I oto - 0x002de447, // n0x0d37 c0x0000 (---------------) + I saigawa - 0x0036cf08, // n0x0d38 c0x0000 (---------------) + I sasaguri - 0x00356786, // n0x0d39 c0x0000 (---------------) + I shingu - 0x0028eb4d, // n0x0d3a c0x0000 (---------------) + I shinyoshitomi - 0x00277f46, // n0x0d3b c0x0000 (---------------) + I shonai - 0x00295585, // n0x0d3c c0x0000 (---------------) + I soeda - 0x002c9483, // n0x0d3d c0x0000 (---------------) + I sue - 0x002b58c9, // n0x0d3e c0x0000 (---------------) + I tachiarai - 0x002c4846, // n0x0d3f c0x0000 (---------------) + I tagawa - 0x00232206, // n0x0d40 c0x0000 (---------------) + I takata - 0x0034e0c4, // n0x0d41 c0x0000 (---------------) + I toho - 0x00262187, // n0x0d42 c0x0000 (---------------) + I toyotsu - 0x0023b786, // n0x0d43 c0x0000 (---------------) + I tsuiki - 0x002ad445, // n0x0d44 c0x0000 (---------------) + I ukiha - 0x00206043, // n0x0d45 c0x0000 (---------------) + I umi - 0x00202204, // n0x0d46 c0x0000 (---------------) + I usui - 0x0027c046, // n0x0d47 c0x0000 (---------------) + I yamada - 0x002993c4, // n0x0d48 c0x0000 (---------------) + I yame - 0x00317108, // n0x0d49 c0x0000 (---------------) + I yanagawa - 0x00324f89, // n0x0d4a c0x0000 (---------------) + I yukuhashi - 0x002c0a09, // n0x0d4b c0x0000 (---------------) + I aizubange - 0x0029ed4a, // n0x0d4c c0x0000 (---------------) + I aizumisato - 0x0024458d, // n0x0d4d c0x0000 (---------------) + I aizuwakamatsu - 0x00253087, // n0x0d4e c0x0000 (---------------) + I asakawa - 0x00203f06, // n0x0d4f c0x0000 (---------------) + I bandai - 0x0020aec4, // n0x0d50 c0x0000 (---------------) + I date - 0x003101c9, // n0x0d51 c0x0000 (---------------) + I fukushima - 0x002872c8, // n0x0d52 c0x0000 (---------------) + I furudono - 0x00288c06, // n0x0d53 c0x0000 (---------------) + I futaba - 0x00258d46, // n0x0d54 c0x0000 (---------------) + I hanawa - 0x00264247, // n0x0d55 c0x0000 (---------------) + I higashi - 0x002a9246, // n0x0d56 c0x0000 (---------------) + I hirata - 0x0021b286, // n0x0d57 c0x0000 (---------------) + I hirono - 0x00325286, // n0x0d58 c0x0000 (---------------) + I iitate - 0x0039564a, // n0x0d59 c0x0000 (---------------) + I inawashiro - 0x00218cc8, // n0x0d5a c0x0000 (---------------) + I ishikawa - 0x00226045, // n0x0d5b c0x0000 (---------------) + I iwaki - 0x0027db49, // n0x0d5c c0x0000 (---------------) + I izumizaki - 0x002c2f4a, // n0x0d5d c0x0000 (---------------) + I kagamiishi - 0x002ca0c8, // n0x0d5e c0x0000 (---------------) + I kaneyama - 0x0029b248, // n0x0d5f c0x0000 (---------------) + I kawamata - 0x00295b08, // n0x0d60 c0x0000 (---------------) + I kitakata - 0x0020560c, // n0x0d61 c0x0000 (---------------) + I kitashiobara - 0x002d5005, // n0x0d62 c0x0000 (---------------) + I koori - 0x0029d548, // n0x0d63 c0x0000 (---------------) + I koriyama - 0x002f51c6, // n0x0d64 c0x0000 (---------------) + I kunimi - 0x002a7a46, // n0x0d65 c0x0000 (---------------) + I miharu - 0x002c1587, // n0x0d66 c0x0000 (---------------) + I mishima - 0x0034d005, // n0x0d67 c0x0000 (---------------) + I namie - 0x00282a85, // n0x0d68 c0x0000 (---------------) + I nango - 0x002c08c9, // n0x0d69 c0x0000 (---------------) + I nishiaizu - 0x0020eb47, // n0x0d6a c0x0000 (---------------) + I nishigo - 0x002f3b45, // n0x0d6b c0x0000 (---------------) + I okuma - 0x0021c587, // n0x0d6c c0x0000 (---------------) + I omotego - 0x00209903, // n0x0d6d c0x0000 (---------------) + I ono - 0x002c3405, // n0x0d6e c0x0000 (---------------) + I otama - 0x00347f08, // n0x0d6f c0x0000 (---------------) + I samegawa - 0x002bdd07, // n0x0d70 c0x0000 (---------------) + I shimogo - 0x0029b109, // n0x0d71 c0x0000 (---------------) + I shirakawa - 0x002b25c5, // n0x0d72 c0x0000 (---------------) + I showa - 0x002e5244, // n0x0d73 c0x0000 (---------------) + I soma - 0x0029ff48, // n0x0d74 c0x0000 (---------------) + I sukagawa - 0x0023e347, // n0x0d75 c0x0000 (---------------) + I taishin - 0x002a2bc8, // n0x0d76 c0x0000 (---------------) + I tamakawa - 0x0032c0c8, // n0x0d77 c0x0000 (---------------) + I tanagura - 0x002d02c5, // n0x0d78 c0x0000 (---------------) + I tenei - 0x00351c46, // n0x0d79 c0x0000 (---------------) + I yabuki - 0x0028f806, // n0x0d7a c0x0000 (---------------) + I yamato - 0x0024dfc9, // n0x0d7b c0x0000 (---------------) + I yamatsuri - 0x0031b907, // n0x0d7c c0x0000 (---------------) + I yanaizu - 0x002ac906, // n0x0d7d c0x0000 (---------------) + I yugawa - 0x0028c347, // n0x0d7e c0x0000 (---------------) + I anpachi - 0x00216283, // n0x0d7f c0x0000 (---------------) + I ena - 0x002ad2c4, // n0x0d80 c0x0000 (---------------) + I gifu - 0x002a0885, // n0x0d81 c0x0000 (---------------) + I ginan - 0x00214144, // n0x0d82 c0x0000 (---------------) + I godo - 0x00233e84, // n0x0d83 c0x0000 (---------------) + I gujo - 0x00280b87, // n0x0d84 c0x0000 (---------------) + I hashima - 0x00216987, // n0x0d85 c0x0000 (---------------) + I hichiso - 0x00279684, // n0x0d86 c0x0000 (---------------) + I hida - 0x0029af50, // n0x0d87 c0x0000 (---------------) + I higashishirakawa - 0x00254187, // n0x0d88 c0x0000 (---------------) + I ibigawa - 0x00202a45, // n0x0d89 c0x0000 (---------------) + I ikeda - 0x002ec70c, // n0x0d8a c0x0000 (---------------) + I kakamigahara - 0x00278944, // n0x0d8b c0x0000 (---------------) + I kani - 0x00394d08, // n0x0d8c c0x0000 (---------------) + I kasahara - 0x0023ccc9, // n0x0d8d c0x0000 (---------------) + I kasamatsu - 0x00301186, // n0x0d8e c0x0000 (---------------) + I kawaue - 0x0021c988, // n0x0d8f c0x0000 (---------------) + I kitagata - 0x0024c144, // n0x0d90 c0x0000 (---------------) + I mino - 0x0024c148, // n0x0d91 c0x0000 (---------------) + I minokamo - 0x00264706, // n0x0d92 c0x0000 (---------------) + I mitake - 0x0021ff08, // n0x0d93 c0x0000 (---------------) + I mizunami - 0x002a0586, // n0x0d94 c0x0000 (---------------) + I motosu - 0x0022e38b, // n0x0d95 c0x0000 (---------------) + I nakatsugawa - 0x00202ec5, // n0x0d96 c0x0000 (---------------) + I ogaki - 0x002c8c08, // n0x0d97 c0x0000 (---------------) + I sakahogi - 0x00219444, // n0x0d98 c0x0000 (---------------) + I seki - 0x00281f8a, // n0x0d99 c0x0000 (---------------) + I sekigahara - 0x0029b109, // n0x0d9a c0x0000 (---------------) + I shirakawa - 0x002dc1c6, // n0x0d9b c0x0000 (---------------) + I tajimi - 0x002c2008, // n0x0d9c c0x0000 (---------------) + I takayama - 0x002731c5, // n0x0d9d c0x0000 (---------------) + I tarui - 0x00222bc4, // n0x0d9e c0x0000 (---------------) + I toki - 0x00394c06, // n0x0d9f c0x0000 (---------------) + I tomika - 0x00292b08, // n0x0da0 c0x0000 (---------------) + I wanouchi - 0x00280908, // n0x0da1 c0x0000 (---------------) + I yamagata - 0x003445c6, // n0x0da2 c0x0000 (---------------) + I yaotsu - 0x0030cdc4, // n0x0da3 c0x0000 (---------------) + I yoro - 0x0021f3c6, // n0x0da4 c0x0000 (---------------) + I annaka - 0x002ffd07, // n0x0da5 c0x0000 (---------------) + I chiyoda - 0x00277947, // n0x0da6 c0x0000 (---------------) + I fujioka - 0x0026424f, // n0x0da7 c0x0000 (---------------) + I higashiagatsuma - 0x00207107, // n0x0da8 c0x0000 (---------------) + I isesaki - 0x002f87c7, // n0x0da9 c0x0000 (---------------) + I itakura - 0x002a7905, // n0x0daa c0x0000 (---------------) + I kanna - 0x002d8105, // n0x0dab c0x0000 (---------------) + I kanra - 0x0029f289, // n0x0dac c0x0000 (---------------) + I katashina - 0x00267386, // n0x0dad c0x0000 (---------------) + I kawaba - 0x0027f5c5, // n0x0dae c0x0000 (---------------) + I kiryu - 0x00282607, // n0x0daf c0x0000 (---------------) + I kusatsu - 0x002c85c8, // n0x0db0 c0x0000 (---------------) + I maebashi - 0x002bc205, // n0x0db1 c0x0000 (---------------) + I meiwa - 0x00298146, // n0x0db2 c0x0000 (---------------) + I midori - 0x00215448, // n0x0db3 c0x0000 (---------------) + I minakami - 0x0038d3ca, // n0x0db4 c0x0000 (---------------) + I naganohara - 0x00328c48, // n0x0db5 c0x0000 (---------------) + I nakanojo - 0x003a0907, // n0x0db6 c0x0000 (---------------) + I nanmoku - 0x0022b186, // n0x0db7 c0x0000 (---------------) + I numata - 0x0027db06, // n0x0db8 c0x0000 (---------------) + I oizumi - 0x0021c243, // n0x0db9 c0x0000 (---------------) + I ora - 0x002010c3, // n0x0dba c0x0000 (---------------) + I ota - 0x002c3109, // n0x0dbb c0x0000 (---------------) + I shibukawa - 0x00259e89, // n0x0dbc c0x0000 (---------------) + I shimonita - 0x0037e906, // n0x0dbd c0x0000 (---------------) + I shinto - 0x002b25c5, // n0x0dbe c0x0000 (---------------) + I showa - 0x002a0308, // n0x0dbf c0x0000 (---------------) + I takasaki - 0x002c2008, // n0x0dc0 c0x0000 (---------------) + I takayama - 0x002d8ac8, // n0x0dc1 c0x0000 (---------------) + I tamamura - 0x0032530b, // n0x0dc2 c0x0000 (---------------) + I tatebayashi - 0x0028ed87, // n0x0dc3 c0x0000 (---------------) + I tomioka - 0x002fec89, // n0x0dc4 c0x0000 (---------------) + I tsukiyono - 0x002644c8, // n0x0dc5 c0x0000 (---------------) + I tsumagoi - 0x003831c4, // n0x0dc6 c0x0000 (---------------) + I ueno - 0x002c91c8, // n0x0dc7 c0x0000 (---------------) + I yoshioka - 0x0028d589, // n0x0dc8 c0x0000 (---------------) + I asaminami - 0x002acc05, // n0x0dc9 c0x0000 (---------------) + I daiwa - 0x002536c7, // n0x0dca c0x0000 (---------------) + I etajima - 0x002bed45, // n0x0dcb c0x0000 (---------------) + I fuchu - 0x00280808, // n0x0dcc c0x0000 (---------------) + I fukuyama - 0x0028fb0b, // n0x0dcd c0x0000 (---------------) + I hatsukaichi - 0x00293650, // n0x0dce c0x0000 (---------------) + I higashihiroshima - 0x002a7e05, // n0x0dcf c0x0000 (---------------) + I hongo - 0x0021938c, // n0x0dd0 c0x0000 (---------------) + I jinsekikogen - 0x00225d05, // n0x0dd1 c0x0000 (---------------) + I kaita - 0x0027d243, // n0x0dd2 c0x0000 (---------------) + I kui - 0x0027e786, // n0x0dd3 c0x0000 (---------------) + I kumano - 0x002b8744, // n0x0dd4 c0x0000 (---------------) + I kure - 0x0039fec6, // n0x0dd5 c0x0000 (---------------) + I mihara - 0x0029be07, // n0x0dd6 c0x0000 (---------------) + I miyoshi - 0x002154c4, // n0x0dd7 c0x0000 (---------------) + I naka - 0x00230808, // n0x0dd8 c0x0000 (---------------) + I onomichi - 0x0031214d, // n0x0dd9 c0x0000 (---------------) + I osakikamijima - 0x002fdac5, // n0x0dda c0x0000 (---------------) + I otake - 0x00244a84, // n0x0ddb c0x0000 (---------------) + I saka - 0x00223244, // n0x0ddc c0x0000 (---------------) + I sera - 0x0027d5c9, // n0x0ddd c0x0000 (---------------) + I seranishi - 0x00267f88, // n0x0dde c0x0000 (---------------) + I shinichi - 0x00314f47, // n0x0ddf c0x0000 (---------------) + I shobara - 0x00264788, // n0x0de0 c0x0000 (---------------) + I takehara - 0x00281488, // n0x0de1 c0x0000 (---------------) + I abashiri - 0x002797c5, // n0x0de2 c0x0000 (---------------) + I abira - 0x0038ba47, // n0x0de3 c0x0000 (---------------) + I aibetsu - 0x00279747, // n0x0de4 c0x0000 (---------------) + I akabira - 0x0026fc07, // n0x0de5 c0x0000 (---------------) + I akkeshi - 0x002c1349, // n0x0de6 c0x0000 (---------------) + I asahikawa - 0x0023b609, // n0x0de7 c0x0000 (---------------) + I ashibetsu - 0x00244046, // n0x0de8 c0x0000 (---------------) + I ashoro - 0x002b6846, // n0x0de9 c0x0000 (---------------) + I assabu - 0x00264486, // n0x0dea c0x0000 (---------------) + I atsuma - 0x00265c05, // n0x0deb c0x0000 (---------------) + I bibai - 0x0024c784, // n0x0dec c0x0000 (---------------) + I biei - 0x00201a06, // n0x0ded c0x0000 (---------------) + I bifuka - 0x00201f86, // n0x0dee c0x0000 (---------------) + I bihoro - 0x00279808, // n0x0def c0x0000 (---------------) + I biratori - 0x002aa3cb, // n0x0df0 c0x0000 (---------------) + I chippubetsu - 0x002b0007, // n0x0df1 c0x0000 (---------------) + I chitose - 0x0020aec4, // n0x0df2 c0x0000 (---------------) + I date - 0x00224106, // n0x0df3 c0x0000 (---------------) + I ebetsu - 0x00280287, // n0x0df4 c0x0000 (---------------) + I embetsu - 0x002f3d45, // n0x0df5 c0x0000 (---------------) + I eniwa - 0x00232d05, // n0x0df6 c0x0000 (---------------) + I erimo - 0x00200f84, // n0x0df7 c0x0000 (---------------) + I esan - 0x0023b586, // n0x0df8 c0x0000 (---------------) + I esashi - 0x00201a88, // n0x0df9 c0x0000 (---------------) + I fukagawa - 0x003101c9, // n0x0dfa c0x0000 (---------------) + I fukushima - 0x00249f46, // n0x0dfb c0x0000 (---------------) + I furano - 0x00286ac8, // n0x0dfc c0x0000 (---------------) + I furubira - 0x002ad506, // n0x0dfd c0x0000 (---------------) + I haboro - 0x00330f88, // n0x0dfe c0x0000 (---------------) + I hakodate - 0x0029ab0c, // n0x0dff c0x0000 (---------------) + I hamatonbetsu - 0x00279686, // n0x0e00 c0x0000 (---------------) + I hidaka - 0x0029524d, // n0x0e01 c0x0000 (---------------) + I higashikagura - 0x002956cb, // n0x0e02 c0x0000 (---------------) + I higashikawa - 0x002f7505, // n0x0e03 c0x0000 (---------------) + I hiroo - 0x00205347, // n0x0e04 c0x0000 (---------------) + I hokuryu - 0x00352486, // n0x0e05 c0x0000 (---------------) + I hokuto - 0x002e9b48, // n0x0e06 c0x0000 (---------------) + I honbetsu - 0x002440c9, // n0x0e07 c0x0000 (---------------) + I horokanai - 0x002c0408, // n0x0e08 c0x0000 (---------------) + I horonobe - 0x00202a45, // n0x0e09 c0x0000 (---------------) + I ikeda - 0x002f8fc7, // n0x0e0a c0x0000 (---------------) + I imakane - 0x0027efc8, // n0x0e0b c0x0000 (---------------) + I ishikari - 0x00306e09, // n0x0e0c c0x0000 (---------------) + I iwamizawa - 0x0023ab06, // n0x0e0d c0x0000 (---------------) + I iwanai - 0x0035cdca, // n0x0e0e c0x0000 (---------------) + I kamifurano - 0x002e98c8, // n0x0e0f c0x0000 (---------------) + I kamikawa - 0x002c024b, // n0x0e10 c0x0000 (---------------) + I kamishihoro - 0x002867cc, // n0x0e11 c0x0000 (---------------) + I kamisunagawa - 0x0024c248, // n0x0e12 c0x0000 (---------------) + I kamoenai - 0x0027b786, // n0x0e13 c0x0000 (---------------) + I kayabe - 0x0038cb88, // n0x0e14 c0x0000 (---------------) + I kembuchi - 0x00207247, // n0x0e15 c0x0000 (---------------) + I kikonai - 0x0023b889, // n0x0e16 c0x0000 (---------------) + I kimobetsu - 0x0020340d, // n0x0e17 c0x0000 (---------------) + I kitahiroshima - 0x0029cf46, // n0x0e18 c0x0000 (---------------) + I kitami - 0x002aa0c8, // n0x0e19 c0x0000 (---------------) + I kiyosato - 0x003574c9, // n0x0e1a c0x0000 (---------------) + I koshimizu - 0x002b7208, // n0x0e1b c0x0000 (---------------) + I kunneppu - 0x00283588, // n0x0e1c c0x0000 (---------------) + I kuriyama - 0x002baccc, // n0x0e1d c0x0000 (---------------) + I kuromatsunai - 0x002bbac7, // n0x0e1e c0x0000 (---------------) + I kushiro - 0x002bcf07, // n0x0e1f c0x0000 (---------------) + I kutchan - 0x002c1185, // n0x0e20 c0x0000 (---------------) + I kyowa - 0x00248d07, // n0x0e21 c0x0000 (---------------) + I mashike - 0x002c8488, // n0x0e22 c0x0000 (---------------) + I matsumae - 0x00394c86, // n0x0e23 c0x0000 (---------------) + I mikasa - 0x00249dcc, // n0x0e24 c0x0000 (---------------) + I minamifurano - 0x002e59c8, // n0x0e25 c0x0000 (---------------) + I mombetsu - 0x002ca4c8, // n0x0e26 c0x0000 (---------------) + I moseushi - 0x002b0ec6, // n0x0e27 c0x0000 (---------------) + I mukawa - 0x00395c47, // n0x0e28 c0x0000 (---------------) + I muroran - 0x00244244, // n0x0e29 c0x0000 (---------------) + I naie - 0x002af708, // n0x0e2a c0x0000 (---------------) + I nakagawa - 0x00284e4c, // n0x0e2b c0x0000 (---------------) + I nakasatsunai - 0x002162cc, // n0x0e2c c0x0000 (---------------) + I nakatombetsu - 0x00223585, // n0x0e2d c0x0000 (---------------) + I nanae - 0x00386e07, // n0x0e2e c0x0000 (---------------) + I nanporo - 0x0030cd46, // n0x0e2f c0x0000 (---------------) + I nayoro - 0x00395bc6, // n0x0e30 c0x0000 (---------------) + I nemuro - 0x00296048, // n0x0e31 c0x0000 (---------------) + I niikappu - 0x0037f784, // n0x0e32 c0x0000 (---------------) + I niki - 0x0021dd0b, // n0x0e33 c0x0000 (---------------) + I nishiokoppe - 0x0027194b, // n0x0e34 c0x0000 (---------------) + I noboribetsu - 0x0022b186, // n0x0e35 c0x0000 (---------------) + I numata - 0x00311fc7, // n0x0e36 c0x0000 (---------------) + I obihiro - 0x00310685, // n0x0e37 c0x0000 (---------------) + I obira - 0x0026c705, // n0x0e38 c0x0000 (---------------) + I oketo - 0x0021de46, // n0x0e39 c0x0000 (---------------) + I okoppe - 0x00273185, // n0x0e3a c0x0000 (---------------) + I otaru - 0x002c0cc5, // n0x0e3b c0x0000 (---------------) + I otobe - 0x002c1847, // n0x0e3c c0x0000 (---------------) + I otofuke - 0x00277309, // n0x0e3d c0x0000 (---------------) + I otoineppu - 0x002e8f44, // n0x0e3e c0x0000 (---------------) + I oumu - 0x00275f85, // n0x0e3f c0x0000 (---------------) + I ozora - 0x002d8e85, // n0x0e40 c0x0000 (---------------) + I pippu - 0x00283c08, // n0x0e41 c0x0000 (---------------) + I rankoshi - 0x00230645, // n0x0e42 c0x0000 (---------------) + I rebun - 0x002a7149, // n0x0e43 c0x0000 (---------------) + I rikubetsu - 0x002916c7, // n0x0e44 c0x0000 (---------------) + I rishiri - 0x002916cb, // n0x0e45 c0x0000 (---------------) + I rishirifuji - 0x0022a146, // n0x0e46 c0x0000 (---------------) + I saroma - 0x00225789, // n0x0e47 c0x0000 (---------------) + I sarufutsu - 0x0025b3c8, // n0x0e48 c0x0000 (---------------) + I shakotan - 0x0024f845, // n0x0e49 c0x0000 (---------------) + I shari - 0x0026fd08, // n0x0e4a c0x0000 (---------------) + I shibecha - 0x0023b648, // n0x0e4b c0x0000 (---------------) + I shibetsu - 0x00210487, // n0x0e4c c0x0000 (---------------) + I shikabe - 0x0027d9c7, // n0x0e4d c0x0000 (---------------) + I shikaoi - 0x00280c09, // n0x0e4e c0x0000 (---------------) + I shimamaki - 0x0021fe47, // n0x0e4f c0x0000 (---------------) + I shimizu - 0x00256b09, // n0x0e50 c0x0000 (---------------) + I shimokawa - 0x00285f8c, // n0x0e51 c0x0000 (---------------) + I shinshinotsu - 0x0037e908, // n0x0e52 c0x0000 (---------------) + I shintoku - 0x002a36c9, // n0x0e53 c0x0000 (---------------) + I shiranuka - 0x002a5087, // n0x0e54 c0x0000 (---------------) + I shiraoi - 0x00281549, // n0x0e55 c0x0000 (---------------) + I shiriuchi - 0x00216ac7, // n0x0e56 c0x0000 (---------------) + I sobetsu - 0x002868c8, // n0x0e57 c0x0000 (---------------) + I sunagawa - 0x00283f45, // n0x0e58 c0x0000 (---------------) + I taiki - 0x0030d646, // n0x0e59 c0x0000 (---------------) + I takasu - 0x002bb3c8, // n0x0e5a c0x0000 (---------------) + I takikawa - 0x002f7e48, // n0x0e5b c0x0000 (---------------) + I takinoue - 0x002c2e09, // n0x0e5c c0x0000 (---------------) + I teshikaga - 0x002c0d07, // n0x0e5d c0x0000 (---------------) + I tobetsu - 0x0026d085, // n0x0e5e c0x0000 (---------------) + I tohma - 0x00206a09, // n0x0e5f c0x0000 (---------------) + I tomakomai - 0x00215d86, // n0x0e60 c0x0000 (---------------) + I tomari - 0x0028f144, // n0x0e61 c0x0000 (---------------) + I toya - 0x0034ddc6, // n0x0e62 c0x0000 (---------------) + I toyako - 0x0025ff08, // n0x0e63 c0x0000 (---------------) + I toyotomi - 0x00265847, // n0x0e64 c0x0000 (---------------) + I toyoura - 0x002aa5c8, // n0x0e65 c0x0000 (---------------) + I tsubetsu - 0x0021a089, // n0x0e66 c0x0000 (---------------) + I tsukigata - 0x002b0c47, // n0x0e67 c0x0000 (---------------) + I urakawa - 0x0029cdc6, // n0x0e68 c0x0000 (---------------) + I urausu - 0x00205404, // n0x0e69 c0x0000 (---------------) + I uryu - 0x00205a09, // n0x0e6a c0x0000 (---------------) + I utashinai - 0x0038d008, // n0x0e6b c0x0000 (---------------) + I wakkanai - 0x002b0d87, // n0x0e6c c0x0000 (---------------) + I wassamu - 0x00329706, // n0x0e6d c0x0000 (---------------) + I yakumo - 0x00233346, // n0x0e6e c0x0000 (---------------) + I yoichi - 0x00204004, // n0x0e6f c0x0000 (---------------) + I aioi - 0x002a8206, // n0x0e70 c0x0000 (---------------) + I akashi - 0x00206ac3, // n0x0e71 c0x0000 (---------------) + I ako - 0x00312749, // n0x0e72 c0x0000 (---------------) + I amagasaki - 0x00202e86, // n0x0e73 c0x0000 (---------------) + I aogaki - 0x0029b485, // n0x0e74 c0x0000 (---------------) + I asago - 0x0029d2c6, // n0x0e75 c0x0000 (---------------) + I ashiya - 0x002a2d05, // n0x0e76 c0x0000 (---------------) + I awaji - 0x0027f448, // n0x0e77 c0x0000 (---------------) + I fukusaki - 0x0024bf87, // n0x0e78 c0x0000 (---------------) + I goshiki - 0x0020cec6, // n0x0e79 c0x0000 (---------------) + I harima - 0x00227cc6, // n0x0e7a c0x0000 (---------------) + I himeji - 0x00261808, // n0x0e7b c0x0000 (---------------) + I ichikawa - 0x0029f407, // n0x0e7c c0x0000 (---------------) + I inagawa - 0x0029cf85, // n0x0e7d c0x0000 (---------------) + I itami - 0x0029d7c8, // n0x0e7e c0x0000 (---------------) + I kakogawa - 0x0037e648, // n0x0e7f c0x0000 (---------------) + I kamigori - 0x002e98c8, // n0x0e80 c0x0000 (---------------) + I kamikawa - 0x00225bc5, // n0x0e81 c0x0000 (---------------) + I kasai - 0x0030d6c6, // n0x0e82 c0x0000 (---------------) + I kasuga - 0x002c07c9, // n0x0e83 c0x0000 (---------------) + I kawanishi - 0x0028f684, // n0x0e84 c0x0000 (---------------) + I miki - 0x0036968b, // n0x0e85 c0x0000 (---------------) + I minamiawaji - 0x0021af8b, // n0x0e86 c0x0000 (---------------) + I nishinomiya - 0x00225f49, // n0x0e87 c0x0000 (---------------) + I nishiwaki - 0x00209903, // n0x0e88 c0x0000 (---------------) + I ono - 0x002581c5, // n0x0e89 c0x0000 (---------------) + I sanda - 0x00202506, // n0x0e8a c0x0000 (---------------) + I sannan - 0x00226688, // n0x0e8b c0x0000 (---------------) + I sasayama - 0x0022acc4, // n0x0e8c c0x0000 (---------------) + I sayo - 0x00356786, // n0x0e8d c0x0000 (---------------) + I shingu - 0x002cd149, // n0x0e8e c0x0000 (---------------) + I shinonsen - 0x002ac6c5, // n0x0e8f c0x0000 (---------------) + I shiso - 0x002c1786, // n0x0e90 c0x0000 (---------------) + I sumoto - 0x0023e346, // n0x0e91 c0x0000 (---------------) + I taishi - 0x00214a44, // n0x0e92 c0x0000 (---------------) + I taka - 0x00295c8a, // n0x0e93 c0x0000 (---------------) + I takarazuka - 0x0029b3c8, // n0x0e94 c0x0000 (---------------) + I takasago - 0x002f7e46, // n0x0e95 c0x0000 (---------------) + I takino - 0x002d3785, // n0x0e96 c0x0000 (---------------) + I tamba - 0x0020bb07, // n0x0e97 c0x0000 (---------------) + I tatsuno - 0x0024db07, // n0x0e98 c0x0000 (---------------) + I toyooka - 0x00351c44, // n0x0e99 c0x0000 (---------------) + I yabu - 0x0021b1c7, // n0x0e9a c0x0000 (---------------) + I yashiro - 0x00243804, // n0x0e9b c0x0000 (---------------) + I yoka - 0x00243806, // n0x0e9c c0x0000 (---------------) + I yokawa - 0x00215583, // n0x0e9d c0x0000 (---------------) + I ami - 0x002c1345, // n0x0e9e c0x0000 (---------------) + I asahi - 0x00345505, // n0x0e9f c0x0000 (---------------) + I bando - 0x0023e6c8, // n0x0ea0 c0x0000 (---------------) + I chikusei - 0x00214085, // n0x0ea1 c0x0000 (---------------) + I daigo - 0x002793c9, // n0x0ea2 c0x0000 (---------------) + I fujishiro - 0x002a2587, // n0x0ea3 c0x0000 (---------------) + I hitachi - 0x002af54b, // n0x0ea4 c0x0000 (---------------) + I hitachinaka - 0x002a258c, // n0x0ea5 c0x0000 (---------------) + I hitachiomiya - 0x002a320a, // n0x0ea6 c0x0000 (---------------) + I hitachiota - 0x002c4687, // n0x0ea7 c0x0000 (---------------) + I ibaraki - 0x00201283, // n0x0ea8 c0x0000 (---------------) + I ina - 0x00349888, // n0x0ea9 c0x0000 (---------------) + I inashiki - 0x00225d85, // n0x0eaa c0x0000 (---------------) + I itako - 0x002bc285, // n0x0eab c0x0000 (---------------) + I iwama - 0x00336e04, // n0x0eac c0x0000 (---------------) + I joso - 0x002867c6, // n0x0ead c0x0000 (---------------) + I kamisu - 0x0023ccc6, // n0x0eae c0x0000 (---------------) + I kasama - 0x002a8247, // n0x0eaf c0x0000 (---------------) + I kashima - 0x00205f8b, // n0x0eb0 c0x0000 (---------------) + I kasumigaura - 0x002171c4, // n0x0eb1 c0x0000 (---------------) + I koga - 0x00374b44, // n0x0eb2 c0x0000 (---------------) + I miho - 0x0026a944, // n0x0eb3 c0x0000 (---------------) + I mito - 0x002c8946, // n0x0eb4 c0x0000 (---------------) + I moriya - 0x002154c4, // n0x0eb5 c0x0000 (---------------) + I naka - 0x002c1e88, // n0x0eb6 c0x0000 (---------------) + I namegata - 0x00336c05, // n0x0eb7 c0x0000 (---------------) + I oarai - 0x00244bc5, // n0x0eb8 c0x0000 (---------------) + I ogawa - 0x002d8a07, // n0x0eb9 c0x0000 (---------------) + I omitama - 0x00205449, // n0x0eba c0x0000 (---------------) + I ryugasaki - 0x0034db85, // n0x0ebb c0x0000 (---------------) + I sakai - 0x0036fb0a, // n0x0ebc c0x0000 (---------------) + I sakuragawa - 0x002c8709, // n0x0ebd c0x0000 (---------------) + I shimodate - 0x0026534a, // n0x0ebe c0x0000 (---------------) + I shimotsuma - 0x00395789, // n0x0ebf c0x0000 (---------------) + I shirosato - 0x00330344, // n0x0ec0 c0x0000 (---------------) + I sowa - 0x002d0785, // n0x0ec1 c0x0000 (---------------) + I suifu - 0x002a9348, // n0x0ec2 c0x0000 (---------------) + I takahagi - 0x002f68cb, // n0x0ec3 c0x0000 (---------------) + I tamatsukuri - 0x002fa645, // n0x0ec4 c0x0000 (---------------) + I tokai - 0x00285986, // n0x0ec5 c0x0000 (---------------) + I tomobe - 0x00222004, // n0x0ec6 c0x0000 (---------------) + I tone - 0x00279906, // n0x0ec7 c0x0000 (---------------) + I toride - 0x002b0ac9, // n0x0ec8 c0x0000 (---------------) + I tsuchiura - 0x002241c7, // n0x0ec9 c0x0000 (---------------) + I tsukuba - 0x00315288, // n0x0eca c0x0000 (---------------) + I uchihara - 0x00244906, // n0x0ecb c0x0000 (---------------) + I ushiku - 0x002ffc87, // n0x0ecc c0x0000 (---------------) + I yachiyo - 0x00280908, // n0x0ecd c0x0000 (---------------) + I yamagata - 0x00382546, // n0x0ece c0x0000 (---------------) + I yawara - 0x00248704, // n0x0ecf c0x0000 (---------------) + I yuki - 0x0035b547, // n0x0ed0 c0x0000 (---------------) + I anamizu - 0x003462c5, // n0x0ed1 c0x0000 (---------------) + I hakui - 0x0034bfc7, // n0x0ed2 c0x0000 (---------------) + I hakusan - 0x00201b04, // n0x0ed3 c0x0000 (---------------) + I kaga - 0x00352406, // n0x0ed4 c0x0000 (---------------) + I kahoku - 0x00218f48, // n0x0ed5 c0x0000 (---------------) + I kanazawa - 0x00295888, // n0x0ed6 c0x0000 (---------------) + I kawakita - 0x002f1d87, // n0x0ed7 c0x0000 (---------------) + I komatsu - 0x00313848, // n0x0ed8 c0x0000 (---------------) + I nakanoto - 0x002b6ac5, // n0x0ed9 c0x0000 (---------------) + I nanao - 0x00209744, // n0x0eda c0x0000 (---------------) + I nomi - 0x00261708, // n0x0edb c0x0000 (---------------) + I nonoichi - 0x00259484, // n0x0edc c0x0000 (---------------) + I noto - 0x00210485, // n0x0edd c0x0000 (---------------) + I shika - 0x002eb884, // n0x0ede c0x0000 (---------------) + I suzu - 0x00268f47, // n0x0edf c0x0000 (---------------) + I tsubata - 0x00289d07, // n0x0ee0 c0x0000 (---------------) + I tsurugi - 0x00281688, // n0x0ee1 c0x0000 (---------------) + I uchinada - 0x002a2d46, // n0x0ee2 c0x0000 (---------------) + I wajima - 0x00214005, // n0x0ee3 c0x0000 (---------------) + I fudai - 0x002791c8, // n0x0ee4 c0x0000 (---------------) + I fujisawa - 0x00328e48, // n0x0ee5 c0x0000 (---------------) + I hanamaki - 0x0029ec89, // n0x0ee6 c0x0000 (---------------) + I hiraizumi - 0x0021b286, // n0x0ee7 c0x0000 (---------------) + I hirono - 0x00237488, // n0x0ee8 c0x0000 (---------------) + I ichinohe - 0x00281e0a, // n0x0ee9 c0x0000 (---------------) + I ichinoseki - 0x002f3dc8, // n0x0eea c0x0000 (---------------) + I iwaizumi - 0x002d9605, // n0x0eeb c0x0000 (---------------) + I iwate - 0x002249c6, // n0x0eec c0x0000 (---------------) + I joboji - 0x0028e748, // n0x0eed c0x0000 (---------------) + I kamaishi - 0x002f908a, // n0x0eee c0x0000 (---------------) + I kanegasaki - 0x0039a0c7, // n0x0eef c0x0000 (---------------) + I karumai - 0x002876c5, // n0x0ef0 c0x0000 (---------------) + I kawai - 0x00293e48, // n0x0ef1 c0x0000 (---------------) + I kitakami - 0x0036e5c4, // n0x0ef2 c0x0000 (---------------) + I kuji - 0x002ad686, // n0x0ef3 c0x0000 (---------------) + I kunohe - 0x002bd688, // n0x0ef4 c0x0000 (---------------) + I kuzumaki - 0x002097c6, // n0x0ef5 c0x0000 (---------------) + I miyako - 0x002e3648, // n0x0ef6 c0x0000 (---------------) + I mizusawa - 0x00219907, // n0x0ef7 c0x0000 (---------------) + I morioka - 0x00204a06, // n0x0ef8 c0x0000 (---------------) + I ninohe - 0x0037ac44, // n0x0ef9 c0x0000 (---------------) + I noda - 0x002db2c7, // n0x0efa c0x0000 (---------------) + I ofunato - 0x002f4cc4, // n0x0efb c0x0000 (---------------) + I oshu - 0x002b0a87, // n0x0efc c0x0000 (---------------) + I otsuchi - 0x0023204d, // n0x0efd c0x0000 (---------------) + I rikuzentakata - 0x00225fc5, // n0x0efe c0x0000 (---------------) + I shiwa - 0x002bdb0b, // n0x0eff c0x0000 (---------------) + I shizukuishi - 0x002a0686, // n0x0f00 c0x0000 (---------------) + I sumita - 0x00251448, // n0x0f01 c0x0000 (---------------) + I tanohata - 0x00385684, // n0x0f02 c0x0000 (---------------) + I tono - 0x00275d46, // n0x0f03 c0x0000 (---------------) + I yahaba - 0x0027c046, // n0x0f04 c0x0000 (---------------) + I yamada - 0x0038cec7, // n0x0f05 c0x0000 (---------------) + I ayagawa - 0x00294f0d, // n0x0f06 c0x0000 (---------------) + I higashikagawa - 0x00254007, // n0x0f07 c0x0000 (---------------) + I kanonji - 0x0033c8c8, // n0x0f08 c0x0000 (---------------) + I kotohira - 0x0035c945, // n0x0f09 c0x0000 (---------------) + I manno - 0x00296f08, // n0x0f0a c0x0000 (---------------) + I marugame - 0x002c2746, // n0x0f0b c0x0000 (---------------) + I mitoyo - 0x002b6b48, // n0x0f0c c0x0000 (---------------) + I naoshima - 0x0020f946, // n0x0f0d c0x0000 (---------------) + I sanuki - 0x0030fbc7, // n0x0f0e c0x0000 (---------------) + I tadotsu - 0x0021cb09, // n0x0f0f c0x0000 (---------------) + I takamatsu - 0x00385687, // n0x0f10 c0x0000 (---------------) + I tonosho - 0x00288ec8, // n0x0f11 c0x0000 (---------------) + I uchinomi - 0x00272805, // n0x0f12 c0x0000 (---------------) + I utazu - 0x0021bd48, // n0x0f13 c0x0000 (---------------) + I zentsuji - 0x00329b85, // n0x0f14 c0x0000 (---------------) + I akune - 0x00240c05, // n0x0f15 c0x0000 (---------------) + I amami - 0x002e6e05, // n0x0f16 c0x0000 (---------------) + I hioki - 0x00223803, // n0x0f17 c0x0000 (---------------) + I isa - 0x00282984, // n0x0f18 c0x0000 (---------------) + I isen - 0x0027db45, // n0x0f19 c0x0000 (---------------) + I izumi - 0x0026bcc9, // n0x0f1a c0x0000 (---------------) + I kagoshima - 0x002fe346, // n0x0f1b c0x0000 (---------------) + I kanoya - 0x002d5d48, // n0x0f1c c0x0000 (---------------) + I kawanabe - 0x002f9285, // n0x0f1d c0x0000 (---------------) + I kinko - 0x0032ec47, // n0x0f1e c0x0000 (---------------) + I kouyama - 0x0030feca, // n0x0f1f c0x0000 (---------------) + I makurazaki - 0x002c16c9, // n0x0f20 c0x0000 (---------------) + I matsumoto - 0x002b3b4a, // n0x0f21 c0x0000 (---------------) + I minamitane - 0x002ce1c8, // n0x0f22 c0x0000 (---------------) + I nakatane - 0x0021c3cc, // n0x0f23 c0x0000 (---------------) + I nishinoomote - 0x0028268d, // n0x0f24 c0x0000 (---------------) + I satsumasendai - 0x0035ad83, // n0x0f25 c0x0000 (---------------) + I soo - 0x002e3548, // n0x0f26 c0x0000 (---------------) + I tarumizu - 0x002021c5, // n0x0f27 c0x0000 (---------------) + I yusui - 0x0038d186, // n0x0f28 c0x0000 (---------------) + I aikawa - 0x00374746, // n0x0f29 c0x0000 (---------------) + I atsugi - 0x00245ec5, // n0x0f2a c0x0000 (---------------) + I ayase - 0x0028c449, // n0x0f2b c0x0000 (---------------) + I chigasaki - 0x0030eec5, // n0x0f2c c0x0000 (---------------) + I ebina - 0x002791c8, // n0x0f2d c0x0000 (---------------) + I fujisawa - 0x00259386, // n0x0f2e c0x0000 (---------------) + I hadano - 0x0033bc46, // n0x0f2f c0x0000 (---------------) + I hakone - 0x0029fe09, // n0x0f30 c0x0000 (---------------) + I hiratsuka - 0x00381d47, // n0x0f31 c0x0000 (---------------) + I isehara - 0x002f1a86, // n0x0f32 c0x0000 (---------------) + I kaisei - 0x0030fe48, // n0x0f33 c0x0000 (---------------) + I kamakura - 0x003a4308, // n0x0f34 c0x0000 (---------------) + I kiyokawa - 0x00357b47, // n0x0f35 c0x0000 (---------------) + I matsuda - 0x0022008e, // n0x0f36 c0x0000 (---------------) + I minamiashigara - 0x002c2985, // n0x0f37 c0x0000 (---------------) + I miura - 0x00306d05, // n0x0f38 c0x0000 (---------------) + I nakai - 0x002096c8, // n0x0f39 c0x0000 (---------------) + I ninomiya - 0x00321547, // n0x0f3a c0x0000 (---------------) + I odawara - 0x00204082, // n0x0f3b c0x0000 (---------------) + I oi - 0x002ba844, // n0x0f3c c0x0000 (---------------) + I oiso - 0x0039fdca, // n0x0f3d c0x0000 (---------------) + I sagamihara - 0x002b0e48, // n0x0f3e c0x0000 (---------------) + I samukawa - 0x00280386, // n0x0f3f c0x0000 (---------------) + I tsukui - 0x00297608, // n0x0f40 c0x0000 (---------------) + I yamakita - 0x0028f806, // n0x0f41 c0x0000 (---------------) + I yamato - 0x0030eb08, // n0x0f42 c0x0000 (---------------) + I yokosuka - 0x002ac908, // n0x0f43 c0x0000 (---------------) + I yugawara - 0x00240bc4, // n0x0f44 c0x0000 (---------------) + I zama - 0x00332045, // n0x0f45 c0x0000 (---------------) + I zushi - 0x00685284, // n0x0f46 c0x0001 (---------------) ! I city - 0x00685284, // n0x0f47 c0x0001 (---------------) ! I city - 0x00685284, // n0x0f48 c0x0001 (---------------) ! I city - 0x00202f43, // n0x0f49 c0x0000 (---------------) + I aki - 0x002331c6, // n0x0f4a c0x0000 (---------------) + I geisei - 0x00279686, // n0x0f4b c0x0000 (---------------) + I hidaka - 0x0029c60c, // n0x0f4c c0x0000 (---------------) + I higashitsuno - 0x00204a43, // n0x0f4d c0x0000 (---------------) + I ino - 0x002be6c6, // n0x0f4e c0x0000 (---------------) + I kagami - 0x00215544, // n0x0f4f c0x0000 (---------------) + I kami - 0x002c47c8, // n0x0f50 c0x0000 (---------------) + I kitagawa - 0x002ccf85, // n0x0f51 c0x0000 (---------------) + I kochi - 0x0039fec6, // n0x0f52 c0x0000 (---------------) + I mihara - 0x002b6008, // n0x0f53 c0x0000 (---------------) + I motoyama - 0x002cf346, // n0x0f54 c0x0000 (---------------) + I muroto - 0x0020ce46, // n0x0f55 c0x0000 (---------------) + I nahari - 0x0035c5c8, // n0x0f56 c0x0000 (---------------) + I nakamura - 0x002a0907, // n0x0f57 c0x0000 (---------------) + I nankoku - 0x0021fc09, // n0x0f58 c0x0000 (---------------) + I nishitosa - 0x002b9a0a, // n0x0f59 c0x0000 (---------------) + I niyodogawa - 0x00252e04, // n0x0f5a c0x0000 (---------------) + I ochi - 0x00243845, // n0x0f5b c0x0000 (---------------) + I okawa - 0x0025a445, // n0x0f5c c0x0000 (---------------) + I otoyo - 0x0021c706, // n0x0f5d c0x0000 (---------------) + I otsuki - 0x002530c6, // n0x0f5e c0x0000 (---------------) + I sakawa - 0x002a61c6, // n0x0f5f c0x0000 (---------------) + I sukumo - 0x002eac06, // n0x0f60 c0x0000 (---------------) + I susaki - 0x0021fd44, // n0x0f61 c0x0000 (---------------) + I tosa - 0x0021fd4b, // n0x0f62 c0x0000 (---------------) + I tosashimizu - 0x00243784, // n0x0f63 c0x0000 (---------------) + I toyo - 0x0020bb85, // n0x0f64 c0x0000 (---------------) + I tsuno - 0x002ab6c5, // n0x0f65 c0x0000 (---------------) + I umaji - 0x0027dec6, // n0x0f66 c0x0000 (---------------) + I yasuda - 0x0039dfc8, // n0x0f67 c0x0000 (---------------) + I yusuhara - 0x00282547, // n0x0f68 c0x0000 (---------------) + I amakusa - 0x00315044, // n0x0f69 c0x0000 (---------------) + I arao - 0x00250203, // n0x0f6a c0x0000 (---------------) + I aso - 0x0036e0c5, // n0x0f6b c0x0000 (---------------) + I choyo - 0x00247b87, // n0x0f6c c0x0000 (---------------) + I gyokuto - 0x002a3e49, // n0x0f6d c0x0000 (---------------) + I hitoyoshi - 0x0028244b, // n0x0f6e c0x0000 (---------------) + I kamiamakusa - 0x002a8247, // n0x0f6f c0x0000 (---------------) + I kashima - 0x0023e5c7, // n0x0f70 c0x0000 (---------------) + I kikuchi - 0x002de3c4, // n0x0f71 c0x0000 (---------------) + I kosa - 0x002b5f08, // n0x0f72 c0x0000 (---------------) + I kumamoto - 0x002f2007, // n0x0f73 c0x0000 (---------------) + I mashiki - 0x002a4086, // n0x0f74 c0x0000 (---------------) + I mifune - 0x00248488, // n0x0f75 c0x0000 (---------------) + I minamata - 0x002a63cb, // n0x0f76 c0x0000 (---------------) + I minamioguni - 0x0038a086, // n0x0f77 c0x0000 (---------------) + I nagasu - 0x0020ef09, // n0x0f78 c0x0000 (---------------) + I nishihara - 0x002a6545, // n0x0f79 c0x0000 (---------------) + I oguni - 0x003006c3, // n0x0f7a c0x0000 (---------------) + I ozu - 0x002c1786, // n0x0f7b c0x0000 (---------------) + I sumoto - 0x00219808, // n0x0f7c c0x0000 (---------------) + I takamori - 0x0020fa03, // n0x0f7d c0x0000 (---------------) + I uki - 0x00247c83, // n0x0f7e c0x0000 (---------------) + I uto - 0x00223e86, // n0x0f7f c0x0000 (---------------) + I yamaga - 0x0028f806, // n0x0f80 c0x0000 (---------------) + I yamato - 0x0037e00a, // n0x0f81 c0x0000 (---------------) + I yatsushiro - 0x0027b7c5, // n0x0f82 c0x0000 (---------------) + I ayabe - 0x0027be8b, // n0x0f83 c0x0000 (---------------) + I fukuchiyama - 0x0029d20b, // n0x0f84 c0x0000 (---------------) + I higashiyama - 0x0022f9c3, // n0x0f85 c0x0000 (---------------) + I ide - 0x0021d003, // n0x0f86 c0x0000 (---------------) + I ine - 0x002aea84, // n0x0f87 c0x0000 (---------------) + I joyo - 0x00219c07, // n0x0f88 c0x0000 (---------------) + I kameoka - 0x00219884, // n0x0f89 c0x0000 (---------------) + I kamo - 0x00203404, // n0x0f8a c0x0000 (---------------) + I kita - 0x002f5044, // n0x0f8b c0x0000 (---------------) + I kizu - 0x002f40c8, // n0x0f8c c0x0000 (---------------) + I kumiyama - 0x002d36c8, // n0x0f8d c0x0000 (---------------) + I kyotamba - 0x0030ac09, // n0x0f8e c0x0000 (---------------) + I kyotanabe - 0x00344b08, // n0x0f8f c0x0000 (---------------) + I kyotango - 0x002d4447, // n0x0f90 c0x0000 (---------------) + I maizuru - 0x00220086, // n0x0f91 c0x0000 (---------------) + I minami - 0x002d598f, // n0x0f92 c0x0000 (---------------) + I minamiyamashiro - 0x002c2ac6, // n0x0f93 c0x0000 (---------------) + I miyazu - 0x002ccf04, // n0x0f94 c0x0000 (---------------) + I muko - 0x002d350a, // n0x0f95 c0x0000 (---------------) + I nagaokakyo - 0x00247a87, // n0x0f96 c0x0000 (---------------) + I nakagyo - 0x00202746, // n0x0f97 c0x0000 (---------------) + I nantan - 0x0028f189, // n0x0f98 c0x0000 (---------------) + I oyamazaki - 0x0030ab85, // n0x0f99 c0x0000 (---------------) + I sakyo - 0x0023e805, // n0x0f9a c0x0000 (---------------) + I seika - 0x0030acc6, // n0x0f9b c0x0000 (---------------) + I tanabe - 0x0021be83, // n0x0f9c c0x0000 (---------------) + I uji - 0x0036e609, // n0x0f9d c0x0000 (---------------) + I ujitawara - 0x00218e46, // n0x0f9e c0x0000 (---------------) + I wazuka - 0x00219e49, // n0x0f9f c0x0000 (---------------) + I yamashina - 0x0038e7c6, // n0x0fa0 c0x0000 (---------------) + I yawata - 0x002c1345, // n0x0fa1 c0x0000 (---------------) + I asahi - 0x00223985, // n0x0fa2 c0x0000 (---------------) + I inabe - 0x00207103, // n0x0fa3 c0x0000 (---------------) + I ise - 0x00219d48, // n0x0fa4 c0x0000 (---------------) + I kameyama - 0x00380887, // n0x0fa5 c0x0000 (---------------) + I kawagoe - 0x002f3a84, // n0x0fa6 c0x0000 (---------------) + I kiho - 0x0021c808, // n0x0fa7 c0x0000 (---------------) + I kisosaki - 0x002a39c4, // n0x0fa8 c0x0000 (---------------) + I kiwa - 0x002bb946, // n0x0fa9 c0x0000 (---------------) + I komono - 0x0027e786, // n0x0faa c0x0000 (---------------) + I kumano - 0x00243046, // n0x0fab c0x0000 (---------------) + I kuwana - 0x002c8ac9, // n0x0fac c0x0000 (---------------) + I matsusaka - 0x002bc205, // n0x0fad c0x0000 (---------------) + I meiwa - 0x002a3c46, // n0x0fae c0x0000 (---------------) + I mihama - 0x00259789, // n0x0faf c0x0000 (---------------) + I minamiise - 0x002c1c06, // n0x0fb0 c0x0000 (---------------) + I misugi - 0x002d5a86, // n0x0fb1 c0x0000 (---------------) + I miyama - 0x0037afc6, // n0x0fb2 c0x0000 (---------------) + I nabari - 0x00203605, // n0x0fb3 c0x0000 (---------------) + I shima - 0x002eb886, // n0x0fb4 c0x0000 (---------------) + I suzuka - 0x0030fbc4, // n0x0fb5 c0x0000 (---------------) + I tado - 0x00283f45, // n0x0fb6 c0x0000 (---------------) + I taiki - 0x002bb3c4, // n0x0fb7 c0x0000 (---------------) + I taki - 0x00303686, // n0x0fb8 c0x0000 (---------------) + I tamaki - 0x00395944, // n0x0fb9 c0x0000 (---------------) + I toba - 0x0020bb83, // n0x0fba c0x0000 (---------------) + I tsu - 0x00287385, // n0x0fbb c0x0000 (---------------) + I udono - 0x0023b348, // n0x0fbc c0x0000 (---------------) + I ureshino - 0x0022bd47, // n0x0fbd c0x0000 (---------------) + I watarai - 0x002b4509, // n0x0fbe c0x0000 (---------------) + I yokkaichi - 0x002875c8, // n0x0fbf c0x0000 (---------------) + I furukawa - 0x00296b51, // n0x0fc0 c0x0000 (---------------) + I higashimatsushima - 0x0023e3ca, // n0x0fc1 c0x0000 (---------------) + I ishinomaki - 0x0022b0c7, // n0x0fc2 c0x0000 (---------------) + I iwanuma - 0x0038a606, // n0x0fc3 c0x0000 (---------------) + I kakuda - 0x00215544, // n0x0fc4 c0x0000 (---------------) + I kami - 0x002bb4c8, // n0x0fc5 c0x0000 (---------------) + I kawasaki - 0x00293109, // n0x0fc6 c0x0000 (---------------) + I kesennuma - 0x002a8388, // n0x0fc7 c0x0000 (---------------) + I marumori - 0x00296d0a, // n0x0fc8 c0x0000 (---------------) + I matsushima - 0x002a6f0d, // n0x0fc9 c0x0000 (---------------) + I minamisanriku - 0x0022b406, // n0x0fca c0x0000 (---------------) + I misato - 0x0035c6c6, // n0x0fcb c0x0000 (---------------) + I murata - 0x002db386, // n0x0fcc c0x0000 (---------------) + I natori - 0x002f6f47, // n0x0fcd c0x0000 (---------------) + I ogawara - 0x0029f145, // n0x0fce c0x0000 (---------------) + I ohira - 0x00351a07, // n0x0fcf c0x0000 (---------------) + I onagawa - 0x0021c8c5, // n0x0fd0 c0x0000 (---------------) + I osaki - 0x00291804, // n0x0fd1 c0x0000 (---------------) + I rifu - 0x002a8a46, // n0x0fd2 c0x0000 (---------------) + I semine - 0x0030d507, // n0x0fd3 c0x0000 (---------------) + I shibata - 0x0036e30d, // n0x0fd4 c0x0000 (---------------) + I shichikashuku - 0x0028e687, // n0x0fd5 c0x0000 (---------------) + I shikama - 0x0026ac08, // n0x0fd6 c0x0000 (---------------) + I shiogama - 0x002794c9, // n0x0fd7 c0x0000 (---------------) + I shiroishi - 0x002248c6, // n0x0fd8 c0x0000 (---------------) + I tagajo - 0x0023aa85, // n0x0fd9 c0x0000 (---------------) + I taiwa - 0x00210004, // n0x0fda c0x0000 (---------------) + I tome - 0x00260006, // n0x0fdb c0x0000 (---------------) + I tomiya - 0x00351b46, // n0x0fdc c0x0000 (---------------) + I wakuya - 0x002b0fc6, // n0x0fdd c0x0000 (---------------) + I watari - 0x0029a648, // n0x0fde c0x0000 (---------------) + I yamamoto - 0x00211103, // n0x0fdf c0x0000 (---------------) + I zao - 0x00209c43, // n0x0fe0 c0x0000 (---------------) + I aya - 0x00329905, // n0x0fe1 c0x0000 (---------------) + I ebino - 0x0023d146, // n0x0fe2 c0x0000 (---------------) + I gokase - 0x002ac8c5, // n0x0fe3 c0x0000 (---------------) + I hyuga - 0x00244b08, // n0x0fe4 c0x0000 (---------------) + I kadogawa - 0x0029bfca, // n0x0fe5 c0x0000 (---------------) + I kawaminami - 0x002c3bc4, // n0x0fe6 c0x0000 (---------------) + I kijo - 0x002c47c8, // n0x0fe7 c0x0000 (---------------) + I kitagawa - 0x00295b08, // n0x0fe8 c0x0000 (---------------) + I kitakata - 0x0027dd07, // n0x0fe9 c0x0000 (---------------) + I kitaura - 0x002f9349, // n0x0fea c0x0000 (---------------) + I kobayashi - 0x002b5c08, // n0x0feb c0x0000 (---------------) + I kunitomi - 0x00310247, // n0x0fec c0x0000 (---------------) + I kushima - 0x00294746, // n0x0fed c0x0000 (---------------) + I mimata - 0x002097ca, // n0x0fee c0x0000 (---------------) + I miyakonojo - 0x00260088, // n0x0fef c0x0000 (---------------) + I miyazaki - 0x002c0089, // n0x0ff0 c0x0000 (---------------) + I morotsuka - 0x00268048, // n0x0ff1 c0x0000 (---------------) + I nichinan - 0x0021aa49, // n0x0ff2 c0x0000 (---------------) + I nishimera - 0x002c0507, // n0x0ff3 c0x0000 (---------------) + I nobeoka - 0x003449c5, // n0x0ff4 c0x0000 (---------------) + I saito - 0x002a17c6, // n0x0ff5 c0x0000 (---------------) + I shiiba - 0x00394b08, // n0x0ff6 c0x0000 (---------------) + I shintomi - 0x002515c8, // n0x0ff7 c0x0000 (---------------) + I takaharu - 0x0021a248, // n0x0ff8 c0x0000 (---------------) + I takanabe - 0x00214a48, // n0x0ff9 c0x0000 (---------------) + I takazaki - 0x0020bb85, // n0x0ffa c0x0000 (---------------) + I tsuno - 0x00200344, // n0x0ffb c0x0000 (---------------) + I achi - 0x00380588, // n0x0ffc c0x0000 (---------------) + I agematsu - 0x00202844, // n0x0ffd c0x0000 (---------------) + I anan - 0x00395584, // n0x0ffe c0x0000 (---------------) + I aoki - 0x002c1345, // n0x0fff c0x0000 (---------------) + I asahi - 0x00290747, // n0x1000 c0x0000 (---------------) + I azumino - 0x00205209, // n0x1001 c0x0000 (---------------) + I chikuhoku - 0x0038ccc7, // n0x1002 c0x0000 (---------------) + I chikuma - 0x0020c285, // n0x1003 c0x0000 (---------------) + I chino - 0x00276f06, // n0x1004 c0x0000 (---------------) + I fujimi - 0x0033f506, // n0x1005 c0x0000 (---------------) + I hakuba - 0x0020f044, // n0x1006 c0x0000 (---------------) + I hara - 0x002a0146, // n0x1007 c0x0000 (---------------) + I hiraya - 0x00213e44, // n0x1008 c0x0000 (---------------) + I iida - 0x002520c6, // n0x1009 c0x0000 (---------------) + I iijima - 0x0037f846, // n0x100a c0x0000 (---------------) + I iiyama - 0x00212e86, // n0x100b c0x0000 (---------------) + I iizuna - 0x00202a45, // n0x100c c0x0000 (---------------) + I ikeda - 0x002449c7, // n0x100d c0x0000 (---------------) + I ikusaka - 0x00201283, // n0x100e c0x0000 (---------------) + I ina - 0x0025b5c9, // n0x100f c0x0000 (---------------) + I karuizawa - 0x002f1788, // n0x1010 c0x0000 (---------------) + I kawakami - 0x0021c804, // n0x1011 c0x0000 (---------------) + I kiso - 0x003100cd, // n0x1012 c0x0000 (---------------) + I kisofukushima - 0x00295988, // n0x1013 c0x0000 (---------------) + I kitaaiki - 0x0028e188, // n0x1014 c0x0000 (---------------) + I komagane - 0x002c0006, // n0x1015 c0x0000 (---------------) + I komoro - 0x0021cc09, // n0x1016 c0x0000 (---------------) + I matsukawa - 0x002c16c9, // n0x1017 c0x0000 (---------------) + I matsumoto - 0x002b07c5, // n0x1018 c0x0000 (---------------) + I miasa - 0x0029c0ca, // n0x1019 c0x0000 (---------------) + I minamiaiki - 0x0027f90a, // n0x101a c0x0000 (---------------) + I minamimaki - 0x00289ecc, // n0x101b c0x0000 (---------------) + I minamiminowa - 0x0028a046, // n0x101c c0x0000 (---------------) + I minowa - 0x002777c6, // n0x101d c0x0000 (---------------) + I miyada - 0x002c3346, // n0x101e c0x0000 (---------------) + I miyota - 0x0035d709, // n0x101f c0x0000 (---------------) + I mochizuki - 0x0038d3c6, // n0x1020 c0x0000 (---------------) + I nagano - 0x00286946, // n0x1021 c0x0000 (---------------) + I nagawa - 0x0030ef86, // n0x1022 c0x0000 (---------------) + I nagiso - 0x002af708, // n0x1023 c0x0000 (---------------) + I nakagawa - 0x00313846, // n0x1024 c0x0000 (---------------) + I nakano - 0x002c8e0b, // n0x1025 c0x0000 (---------------) + I nozawaonsen - 0x002908c5, // n0x1026 c0x0000 (---------------) + I obuse - 0x00244bc5, // n0x1027 c0x0000 (---------------) + I ogawa - 0x00277a45, // n0x1028 c0x0000 (---------------) + I okaya - 0x00201386, // n0x1029 c0x0000 (---------------) + I omachi - 0x00209783, // n0x102a c0x0000 (---------------) + I omi - 0x00242fc6, // n0x102b c0x0000 (---------------) + I ookuwa - 0x0028e607, // n0x102c c0x0000 (---------------) + I ooshika - 0x002bb385, // n0x102d c0x0000 (---------------) + I otaki - 0x0025a545, // n0x102e c0x0000 (---------------) + I otari - 0x002e27c5, // n0x102f c0x0000 (---------------) + I sakae - 0x0031f706, // n0x1030 c0x0000 (---------------) + I sakaki - 0x002b0884, // n0x1031 c0x0000 (---------------) + I saku - 0x00368b86, // n0x1032 c0x0000 (---------------) + I sakuho - 0x0025f9c9, // n0x1033 c0x0000 (---------------) + I shimosuwa - 0x0020120c, // n0x1034 c0x0000 (---------------) + I shinanomachi - 0x00291548, // n0x1035 c0x0000 (---------------) + I shiojiri - 0x0025fb04, // n0x1036 c0x0000 (---------------) + I suwa - 0x002eb506, // n0x1037 c0x0000 (---------------) + I suzaka - 0x002a0786, // n0x1038 c0x0000 (---------------) + I takagi - 0x00219808, // n0x1039 c0x0000 (---------------) + I takamori - 0x002c2008, // n0x103a c0x0000 (---------------) + I takayama - 0x00201109, // n0x103b c0x0000 (---------------) + I tateshina - 0x0020bb07, // n0x103c c0x0000 (---------------) + I tatsuno - 0x00367009, // n0x103d c0x0000 (---------------) + I togakushi - 0x0026c7c6, // n0x103e c0x0000 (---------------) + I togura - 0x0022b384, // n0x103f c0x0000 (---------------) + I tomi - 0x0020c884, // n0x1040 c0x0000 (---------------) + I ueda - 0x002544c4, // n0x1041 c0x0000 (---------------) + I wada - 0x00280908, // n0x1042 c0x0000 (---------------) + I yamagata - 0x0020504a, // n0x1043 c0x0000 (---------------) + I yamanouchi - 0x0034db06, // n0x1044 c0x0000 (---------------) + I yasaka - 0x00353547, // n0x1045 c0x0000 (---------------) + I yasuoka - 0x002dbf47, // n0x1046 c0x0000 (---------------) + I chijiwa - 0x00225885, // n0x1047 c0x0000 (---------------) + I futsu - 0x00285644, // n0x1048 c0x0000 (---------------) + I goto - 0x0028d546, // n0x1049 c0x0000 (---------------) + I hasami - 0x0033c9c6, // n0x104a c0x0000 (---------------) + I hirado - 0x0023b843, // n0x104b c0x0000 (---------------) + I iki - 0x002f15c7, // n0x104c c0x0000 (---------------) + I isahaya - 0x0032bfc8, // n0x104d c0x0000 (---------------) + I kawatana - 0x002b090a, // n0x104e c0x0000 (---------------) + I kuchinotsu - 0x002cbf08, // n0x104f c0x0000 (---------------) + I matsuura - 0x002c3a48, // n0x1050 c0x0000 (---------------) + I nagasaki - 0x00395985, // n0x1051 c0x0000 (---------------) + I obama - 0x002f7605, // n0x1052 c0x0000 (---------------) + I omura - 0x002b0105, // n0x1053 c0x0000 (---------------) + I oseto - 0x00225c46, // n0x1054 c0x0000 (---------------) + I saikai - 0x0022d946, // n0x1055 c0x0000 (---------------) + I sasebo - 0x002168c5, // n0x1056 c0x0000 (---------------) + I seihi - 0x00312989, // n0x1057 c0x0000 (---------------) + I shimabara - 0x0028544c, // n0x1058 c0x0000 (---------------) + I shinkamigoto - 0x002b01c7, // n0x1059 c0x0000 (---------------) + I togitsu - 0x00296d88, // n0x105a c0x0000 (---------------) + I tsushima - 0x0028db85, // n0x105b c0x0000 (---------------) + I unzen - 0x00685284, // n0x105c c0x0001 (---------------) ! I city - 0x0022a9c4, // n0x105d c0x0000 (---------------) + I ando - 0x002bde44, // n0x105e c0x0000 (---------------) + I gose - 0x0020c3c6, // n0x105f c0x0000 (---------------) + I heguri - 0x0029dd8e, // n0x1060 c0x0000 (---------------) + I higashiyoshino - 0x0023e887, // n0x1061 c0x0000 (---------------) + I ikaruga - 0x0028e145, // n0x1062 c0x0000 (---------------) + I ikoma - 0x0028f60c, // n0x1063 c0x0000 (---------------) + I kamikitayama - 0x002a3887, // n0x1064 c0x0000 (---------------) + I kanmaki - 0x0030d487, // n0x1065 c0x0000 (---------------) + I kashiba - 0x00325a09, // n0x1066 c0x0000 (---------------) + I kashihara - 0x002159c9, // n0x1067 c0x0000 (---------------) + I katsuragi - 0x002876c5, // n0x1068 c0x0000 (---------------) + I kawai - 0x002f1788, // n0x1069 c0x0000 (---------------) + I kawakami - 0x002c07c9, // n0x106a c0x0000 (---------------) + I kawanishi - 0x002d98c5, // n0x106b c0x0000 (---------------) + I koryo - 0x002bb2c8, // n0x106c c0x0000 (---------------) + I kurotaki - 0x002c93c6, // n0x106d c0x0000 (---------------) + I mitsue - 0x00230a86, // n0x106e c0x0000 (---------------) + I miyake - 0x002b8504, // n0x106f c0x0000 (---------------) + I nara - 0x003299c8, // n0x1070 c0x0000 (---------------) + I nosegawa - 0x00224a83, // n0x1071 c0x0000 (---------------) + I oji - 0x00308e44, // n0x1072 c0x0000 (---------------) + I ouda - 0x0036e145, // n0x1073 c0x0000 (---------------) + I oyodo - 0x00306847, // n0x1074 c0x0000 (---------------) + I sakurai - 0x00398a05, // n0x1075 c0x0000 (---------------) + I sango - 0x00281cc9, // n0x1076 c0x0000 (---------------) + I shimoichi - 0x00257a4d, // n0x1077 c0x0000 (---------------) + I shimokitayama - 0x00279e06, // n0x1078 c0x0000 (---------------) + I shinjo - 0x00250244, // n0x1079 c0x0000 (---------------) + I soni - 0x00294848, // n0x107a c0x0000 (---------------) + I takatori - 0x0027714a, // n0x107b c0x0000 (---------------) + I tawaramoto - 0x0021e387, // n0x107c c0x0000 (---------------) + I tenkawa - 0x00347985, // n0x107d c0x0000 (---------------) + I tenri - 0x00214043, // n0x107e c0x0000 (---------------) + I uda - 0x0029d3ce, // n0x107f c0x0000 (---------------) + I yamatokoriyama - 0x0028f80c, // n0x1080 c0x0000 (---------------) + I yamatotakada - 0x002fa987, // n0x1081 c0x0000 (---------------) + I yamazoe - 0x0029df47, // n0x1082 c0x0000 (---------------) + I yoshino - 0x00201b43, // n0x1083 c0x0000 (---------------) + I aga - 0x0038d405, // n0x1084 c0x0000 (---------------) + I agano - 0x002bde45, // n0x1085 c0x0000 (---------------) + I gosen - 0x002979c8, // n0x1086 c0x0000 (---------------) + I itoigawa - 0x00293c89, // n0x1087 c0x0000 (---------------) + I izumozaki - 0x00292986, // n0x1088 c0x0000 (---------------) + I joetsu - 0x00219884, // n0x1089 c0x0000 (---------------) + I kamo - 0x0022b006, // n0x108a c0x0000 (---------------) + I kariwa - 0x003a40cb, // n0x108b c0x0000 (---------------) + I kashiwazaki - 0x002c820c, // n0x108c c0x0000 (---------------) + I minamiuonuma - 0x00310a87, // n0x108d c0x0000 (---------------) + I mitsuke - 0x002ccc45, // n0x108e c0x0000 (---------------) + I muika - 0x0037e548, // n0x108f c0x0000 (---------------) + I murakami - 0x00357905, // n0x1090 c0x0000 (---------------) + I myoko - 0x002d3507, // n0x1091 c0x0000 (---------------) + I nagaoka - 0x00287e47, // n0x1092 c0x0000 (---------------) + I niigata - 0x0024d2c5, // n0x1093 c0x0000 (---------------) + I ojiya - 0x00209783, // n0x1094 c0x0000 (---------------) + I omi - 0x0035ef04, // n0x1095 c0x0000 (---------------) + I sado - 0x00200fc5, // n0x1096 c0x0000 (---------------) + I sanjo - 0x002e8e45, // n0x1097 c0x0000 (---------------) + I seiro - 0x002e8e46, // n0x1098 c0x0000 (---------------) + I seirou - 0x0025c288, // n0x1099 c0x0000 (---------------) + I sekikawa - 0x0030d507, // n0x109a c0x0000 (---------------) + I shibata - 0x00374a46, // n0x109b c0x0000 (---------------) + I tagami - 0x0038b946, // n0x109c c0x0000 (---------------) + I tainai - 0x002e6d46, // n0x109d c0x0000 (---------------) + I tochio - 0x002aa249, // n0x109e c0x0000 (---------------) + I tokamachi - 0x0038bb47, // n0x109f c0x0000 (---------------) + I tsubame - 0x00292806, // n0x10a0 c0x0000 (---------------) + I tsunan - 0x002c8386, // n0x10a1 c0x0000 (---------------) + I uonuma - 0x0024d386, // n0x10a2 c0x0000 (---------------) + I yahiko - 0x002a8105, // n0x10a3 c0x0000 (---------------) + I yoita - 0x00209146, // n0x10a4 c0x0000 (---------------) + I yuzawa - 0x00389e05, // n0x10a5 c0x0000 (---------------) + I beppu - 0x002306c8, // n0x10a6 c0x0000 (---------------) + I bungoono - 0x0029254b, // n0x10a7 c0x0000 (---------------) + I bungotakada - 0x0028d346, // n0x10a8 c0x0000 (---------------) + I hasama - 0x002dbf84, // n0x10a9 c0x0000 (---------------) + I hiji - 0x002f8e49, // n0x10aa c0x0000 (---------------) + I himeshima - 0x002a2584, // n0x10ab c0x0000 (---------------) + I hita - 0x002c9348, // n0x10ac c0x0000 (---------------) + I kamitsue - 0x0033f6c7, // n0x10ad c0x0000 (---------------) + I kokonoe - 0x00283484, // n0x10ae c0x0000 (---------------) + I kuju - 0x002b50c8, // n0x10af c0x0000 (---------------) + I kunisaki - 0x002bc7c4, // n0x10b0 c0x0000 (---------------) + I kusu - 0x002a8144, // n0x10b1 c0x0000 (---------------) + I oita - 0x00288905, // n0x10b2 c0x0000 (---------------) + I saiki - 0x002fdb06, // n0x10b3 c0x0000 (---------------) + I taketa - 0x002f4007, // n0x10b4 c0x0000 (---------------) + I tsukumi - 0x00244a43, // n0x10b5 c0x0000 (---------------) + I usa - 0x0029ce85, // n0x10b6 c0x0000 (---------------) + I usuki - 0x002becc4, // n0x10b7 c0x0000 (---------------) + I yufu - 0x00306d46, // n0x10b8 c0x0000 (---------------) + I akaiwa - 0x002b0848, // n0x10b9 c0x0000 (---------------) + I asakuchi - 0x0032bd05, // n0x10ba c0x0000 (---------------) + I bizen - 0x002901c9, // n0x10bb c0x0000 (---------------) + I hayashima - 0x00206c05, // n0x10bc c0x0000 (---------------) + I ibara - 0x002be6c8, // n0x10bd c0x0000 (---------------) + I kagamino - 0x0030c6c7, // n0x10be c0x0000 (---------------) + I kasaoka - 0x0037bcc8, // n0x10bf c0x0000 (---------------) + I kibichuo - 0x002b4347, // n0x10c0 c0x0000 (---------------) + I kumenan - 0x002f8889, // n0x10c1 c0x0000 (---------------) + I kurashiki - 0x0022a246, // n0x10c2 c0x0000 (---------------) + I maniwa - 0x003493c6, // n0x10c3 c0x0000 (---------------) + I misaki - 0x0025a144, // n0x10c4 c0x0000 (---------------) + I nagi - 0x00294685, // n0x10c5 c0x0000 (---------------) + I niimi - 0x002f26cc, // n0x10c6 c0x0000 (---------------) + I nishiawakura - 0x00277a47, // n0x10c7 c0x0000 (---------------) + I okayama - 0x00277e47, // n0x10c8 c0x0000 (---------------) + I satosho - 0x002dbe08, // n0x10c9 c0x0000 (---------------) + I setouchi - 0x00279e06, // n0x10ca c0x0000 (---------------) + I shinjo - 0x0029f084, // n0x10cb c0x0000 (---------------) + I shoo - 0x00326904, // n0x10cc c0x0000 (---------------) + I soja - 0x00280a89, // n0x10cd c0x0000 (---------------) + I takahashi - 0x002c3446, // n0x10ce c0x0000 (---------------) + I tamano - 0x00218907, // n0x10cf c0x0000 (---------------) + I tsuyama - 0x0038cb04, // n0x10d0 c0x0000 (---------------) + I wake - 0x002fe446, // n0x10d1 c0x0000 (---------------) + I yakage - 0x0030c2c5, // n0x10d2 c0x0000 (---------------) + I aguni - 0x002a2887, // n0x10d3 c0x0000 (---------------) + I ginowan - 0x002c8d86, // n0x10d4 c0x0000 (---------------) + I ginoza - 0x0035cc89, // n0x10d5 c0x0000 (---------------) + I gushikami - 0x0027f747, // n0x10d6 c0x0000 (---------------) + I haebaru - 0x00264247, // n0x10d7 c0x0000 (---------------) + I higashi - 0x0029fc86, // n0x10d8 c0x0000 (---------------) + I hirara - 0x00244485, // n0x10d9 c0x0000 (---------------) + I iheya - 0x0027e2c8, // n0x10da c0x0000 (---------------) + I ishigaki - 0x00218cc8, // n0x10db c0x0000 (---------------) + I ishikawa - 0x0023c306, // n0x10dc c0x0000 (---------------) + I itoman - 0x0032bd45, // n0x10dd c0x0000 (---------------) + I izena - 0x00271486, // n0x10de c0x0000 (---------------) + I kadena - 0x00215343, // n0x10df c0x0000 (---------------) + I kin - 0x00297849, // n0x10e0 c0x0000 (---------------) + I kitadaito - 0x002a5f4e, // n0x10e1 c0x0000 (---------------) + I kitanakagusuku - 0x002b4048, // n0x10e2 c0x0000 (---------------) + I kumejima - 0x002a3ac8, // n0x10e3 c0x0000 (---------------) + I kunigami - 0x0023c10b, // n0x10e4 c0x0000 (---------------) + I minamidaito - 0x00290406, // n0x10e5 c0x0000 (---------------) + I motobu - 0x0022fb44, // n0x10e6 c0x0000 (---------------) + I nago - 0x0020ce44, // n0x10e7 c0x0000 (---------------) + I naha - 0x002a604a, // n0x10e8 c0x0000 (---------------) + I nakagusuku - 0x00219287, // n0x10e9 c0x0000 (---------------) + I nakijin - 0x002928c5, // n0x10ea c0x0000 (---------------) + I nanjo - 0x0020ef09, // n0x10eb c0x0000 (---------------) + I nishihara - 0x002ba4c5, // n0x10ec c0x0000 (---------------) + I ogimi - 0x003955c7, // n0x10ed c0x0000 (---------------) + I okinawa - 0x00300f84, // n0x10ee c0x0000 (---------------) + I onna - 0x00325107, // n0x10ef c0x0000 (---------------) + I shimoji - 0x0022b288, // n0x10f0 c0x0000 (---------------) + I taketomi - 0x002d0e46, // n0x10f1 c0x0000 (---------------) + I tarama - 0x002f4e89, // n0x10f2 c0x0000 (---------------) + I tokashiki - 0x002b5d0a, // n0x10f3 c0x0000 (---------------) + I tomigusuku - 0x00219206, // n0x10f4 c0x0000 (---------------) + I tonaki - 0x002954c6, // n0x10f5 c0x0000 (---------------) + I urasoe - 0x002ab645, // n0x10f6 c0x0000 (---------------) + I uruma - 0x00370dc5, // n0x10f7 c0x0000 (---------------) + I yaese - 0x00228587, // n0x10f8 c0x0000 (---------------) + I yomitan - 0x00228a08, // n0x10f9 c0x0000 (---------------) + I yonabaru - 0x0030c208, // n0x10fa c0x0000 (---------------) + I yonaguni - 0x00240bc6, // n0x10fb c0x0000 (---------------) + I zamami - 0x00223a05, // n0x10fc c0x0000 (---------------) + I abeno - 0x00252e4e, // n0x10fd c0x0000 (---------------) + I chihayaakasaka - 0x0032efc4, // n0x10fe c0x0000 (---------------) + I chuo - 0x0023c285, // n0x10ff c0x0000 (---------------) + I daito - 0x00276889, // n0x1100 c0x0000 (---------------) + I fujiidera - 0x00267188, // n0x1101 c0x0000 (---------------) + I habikino - 0x003a0846, // n0x1102 c0x0000 (---------------) + I hannan - 0x0029a2cc, // n0x1103 c0x0000 (---------------) + I higashiosaka - 0x0029bbd0, // n0x1104 c0x0000 (---------------) + I higashisumiyoshi - 0x0029d9cf, // n0x1105 c0x0000 (---------------) + I higashiyodogawa - 0x0029f188, // n0x1106 c0x0000 (---------------) + I hirakata - 0x002c4687, // n0x1107 c0x0000 (---------------) + I ibaraki - 0x00202a45, // n0x1108 c0x0000 (---------------) + I ikeda - 0x0027db45, // n0x1109 c0x0000 (---------------) + I izumi - 0x002f3e89, // n0x110a c0x0000 (---------------) + I izumiotsu - 0x00294049, // n0x110b c0x0000 (---------------) + I izumisano - 0x0021f4c6, // n0x110c c0x0000 (---------------) + I kadoma - 0x002fa6c7, // n0x110d c0x0000 (---------------) + I kaizuka - 0x00386d85, // n0x110e c0x0000 (---------------) + I kanan - 0x0036ab89, // n0x110f c0x0000 (---------------) + I kashiwara - 0x00330746, // n0x1110 c0x0000 (---------------) + I katano - 0x0038d20d, // n0x1111 c0x0000 (---------------) + I kawachinagano - 0x002889c9, // n0x1112 c0x0000 (---------------) + I kishiwada - 0x00203404, // n0x1113 c0x0000 (---------------) + I kita - 0x002b3dc8, // n0x1114 c0x0000 (---------------) + I kumatori - 0x00380649, // n0x1115 c0x0000 (---------------) + I matsubara - 0x0034dcc6, // n0x1116 c0x0000 (---------------) + I minato - 0x00277005, // n0x1117 c0x0000 (---------------) + I minoh - 0x003493c6, // n0x1118 c0x0000 (---------------) + I misaki - 0x00315149, // n0x1119 c0x0000 (---------------) + I moriguchi - 0x0030b948, // n0x111a c0x0000 (---------------) + I neyagawa - 0x0020e5c5, // n0x111b c0x0000 (---------------) + I nishi - 0x0025c204, // n0x111c c0x0000 (---------------) + I nose - 0x0029a48b, // n0x111d c0x0000 (---------------) + I osakasayama - 0x0034db85, // n0x111e c0x0000 (---------------) + I sakai - 0x00226706, // n0x111f c0x0000 (---------------) + I sayama - 0x002829c6, // n0x1120 c0x0000 (---------------) + I sennan - 0x00245886, // n0x1121 c0x0000 (---------------) + I settsu - 0x0032550b, // n0x1122 c0x0000 (---------------) + I shijonawate - 0x002902c9, // n0x1123 c0x0000 (---------------) + I shimamoto - 0x00216c05, // n0x1124 c0x0000 (---------------) + I suita - 0x0037ba87, // n0x1125 c0x0000 (---------------) + I tadaoka - 0x0023e346, // n0x1126 c0x0000 (---------------) + I taishi - 0x00232306, // n0x1127 c0x0000 (---------------) + I tajiri - 0x00281b88, // n0x1128 c0x0000 (---------------) + I takaishi - 0x002fdc09, // n0x1129 c0x0000 (---------------) + I takatsuki - 0x0026a9cc, // n0x112a c0x0000 (---------------) + I tondabayashi - 0x00247988, // n0x112b c0x0000 (---------------) + I toyonaka - 0x003654c6, // n0x112c c0x0000 (---------------) + I toyono - 0x003445c3, // n0x112d c0x0000 (---------------) + I yao - 0x002538c6, // n0x112e c0x0000 (---------------) + I ariake - 0x0027d805, // n0x112f c0x0000 (---------------) + I arita - 0x0027c1c8, // n0x1130 c0x0000 (---------------) + I fukudomi - 0x00223406, // n0x1131 c0x0000 (---------------) + I genkai - 0x002a2ac8, // n0x1132 c0x0000 (---------------) + I hamatama - 0x0034d185, // n0x1133 c0x0000 (---------------) + I hizen - 0x0027ae05, // n0x1134 c0x0000 (---------------) + I imari - 0x0030c948, // n0x1135 c0x0000 (---------------) + I kamimine - 0x002eb987, // n0x1136 c0x0000 (---------------) + I kanzaki - 0x00374687, // n0x1137 c0x0000 (---------------) + I karatsu - 0x002a8247, // n0x1138 c0x0000 (---------------) + I kashima - 0x0021c988, // n0x1139 c0x0000 (---------------) + I kitagata - 0x0028f348, // n0x113a c0x0000 (---------------) + I kitahata - 0x00248c06, // n0x113b c0x0000 (---------------) + I kiyama - 0x003034c7, // n0x113c c0x0000 (---------------) + I kouhoku - 0x002ad187, // n0x113d c0x0000 (---------------) + I kyuragi - 0x0027d6ca, // n0x113e c0x0000 (---------------) + I nishiarita - 0x00278543, // n0x113f c0x0000 (---------------) + I ogi - 0x00201386, // n0x1140 c0x0000 (---------------) + I omachi - 0x00205185, // n0x1141 c0x0000 (---------------) + I ouchi - 0x002324c4, // n0x1142 c0x0000 (---------------) + I saga - 0x002794c9, // n0x1143 c0x0000 (---------------) + I shiroishi - 0x002f8804, // n0x1144 c0x0000 (---------------) + I taku - 0x0022bdc4, // n0x1145 c0x0000 (---------------) + I tara - 0x002a0604, // n0x1146 c0x0000 (---------------) + I tosu - 0x0029df4b, // n0x1147 c0x0000 (---------------) + I yoshinogari - 0x003807c7, // n0x1148 c0x0000 (---------------) + I arakawa - 0x00253085, // n0x1149 c0x0000 (---------------) + I asaka - 0x002923c8, // n0x114a c0x0000 (---------------) + I chichibu - 0x00276f06, // n0x114b c0x0000 (---------------) + I fujimi - 0x00276f08, // n0x114c c0x0000 (---------------) + I fujimino - 0x0027b706, // n0x114d c0x0000 (---------------) + I fukaya - 0x0028b005, // n0x114e c0x0000 (---------------) + I hanno - 0x0028ba45, // n0x114f c0x0000 (---------------) + I hanyu - 0x0028dec6, // n0x1150 c0x0000 (---------------) + I hasuda - 0x0028e388, // n0x1151 c0x0000 (---------------) + I hatogaya - 0x0028f0c8, // n0x1152 c0x0000 (---------------) + I hatoyama - 0x00279686, // n0x1153 c0x0000 (---------------) + I hidaka - 0x0029220f, // n0x1154 c0x0000 (---------------) + I higashichichibu - 0x00297310, // n0x1155 c0x0000 (---------------) + I higashimatsuyama - 0x0037f145, // n0x1156 c0x0000 (---------------) + I honjo - 0x00201283, // n0x1157 c0x0000 (---------------) + I ina - 0x0024fdc5, // n0x1158 c0x0000 (---------------) + I iruma - 0x002febc8, // n0x1159 c0x0000 (---------------) + I iwatsuki - 0x00293f49, // n0x115a c0x0000 (---------------) + I kamiizumi - 0x002e98c8, // n0x115b c0x0000 (---------------) + I kamikawa - 0x0034df48, // n0x115c c0x0000 (---------------) + I kamisato - 0x00203ac8, // n0x115d c0x0000 (---------------) + I kasukabe - 0x00380887, // n0x115e c0x0000 (---------------) + I kawagoe - 0x00276bc9, // n0x115f c0x0000 (---------------) + I kawaguchi - 0x002a2cc8, // n0x1160 c0x0000 (---------------) + I kawajima - 0x002b1584, // n0x1161 c0x0000 (---------------) + I kazo - 0x002a0488, // n0x1162 c0x0000 (---------------) + I kitamoto - 0x00283cc9, // n0x1163 c0x0000 (---------------) + I koshigaya - 0x00308107, // n0x1164 c0x0000 (---------------) + I kounosu - 0x002a5ec4, // n0x1165 c0x0000 (---------------) + I kuki - 0x0038cd88, // n0x1166 c0x0000 (---------------) + I kumagaya - 0x0024478a, // n0x1167 c0x0000 (---------------) + I matsubushi - 0x002d9e06, // n0x1168 c0x0000 (---------------) + I minano - 0x0022b406, // n0x1169 c0x0000 (---------------) + I misato - 0x0021b149, // n0x116a c0x0000 (---------------) + I miyashiro - 0x0029be07, // n0x116b c0x0000 (---------------) + I miyoshi - 0x002c9748, // n0x116c c0x0000 (---------------) + I moroyama - 0x00208588, // n0x116d c0x0000 (---------------) + I nagatoro - 0x0038c988, // n0x116e c0x0000 (---------------) + I namegawa - 0x00353085, // n0x116f c0x0000 (---------------) + I niiza - 0x00371d85, // n0x1170 c0x0000 (---------------) + I ogano - 0x00244bc5, // n0x1171 c0x0000 (---------------) + I ogawa - 0x002bde05, // n0x1172 c0x0000 (---------------) + I ogose - 0x002ecf07, // n0x1173 c0x0000 (---------------) + I okegawa - 0x00209785, // n0x1174 c0x0000 (---------------) + I omiya - 0x002bb385, // n0x1175 c0x0000 (---------------) + I otaki - 0x00344e86, // n0x1176 c0x0000 (---------------) + I ranzan - 0x002e9807, // n0x1177 c0x0000 (---------------) + I ryokami - 0x002f6807, // n0x1178 c0x0000 (---------------) + I saitama - 0x00244a86, // n0x1179 c0x0000 (---------------) + I sakado - 0x002ce705, // n0x117a c0x0000 (---------------) + I satte - 0x00226706, // n0x117b c0x0000 (---------------) + I sayama - 0x0024c005, // n0x117c c0x0000 (---------------) + I shiki - 0x002a7788, // n0x117d c0x0000 (---------------) + I shiraoka - 0x002d8084, // n0x117e c0x0000 (---------------) + I soka - 0x002c1c86, // n0x117f c0x0000 (---------------) + I sugito - 0x00261b04, // n0x1180 c0x0000 (---------------) + I toda - 0x00222bc8, // n0x1181 c0x0000 (---------------) + I tokigawa - 0x0038170a, // n0x1182 c0x0000 (---------------) + I tokorozawa - 0x0027abcc, // n0x1183 c0x0000 (---------------) + I tsurugashima - 0x00206185, // n0x1184 c0x0000 (---------------) + I urawa - 0x002f7006, // n0x1185 c0x0000 (---------------) + I warabi - 0x0026ab86, // n0x1186 c0x0000 (---------------) + I yashio - 0x00227b46, // n0x1187 c0x0000 (---------------) + I yokoze - 0x002fedc4, // n0x1188 c0x0000 (---------------) + I yono - 0x0030c585, // n0x1189 c0x0000 (---------------) + I yorii - 0x0027b547, // n0x118a c0x0000 (---------------) + I yoshida - 0x0029be89, // n0x118b c0x0000 (---------------) + I yoshikawa - 0x002a3f47, // n0x118c c0x0000 (---------------) + I yoshimi - 0x00685284, // n0x118d c0x0001 (---------------) ! I city - 0x00685284, // n0x118e c0x0001 (---------------) ! I city - 0x00314ec5, // n0x118f c0x0000 (---------------) + I aisho - 0x0022f684, // n0x1190 c0x0000 (---------------) + I gamo - 0x00299c8a, // n0x1191 c0x0000 (---------------) + I higashiomi - 0x00276d86, // n0x1192 c0x0000 (---------------) + I hikone - 0x0034dec4, // n0x1193 c0x0000 (---------------) + I koka - 0x002026c5, // n0x1194 c0x0000 (---------------) + I konan - 0x0033ed85, // n0x1195 c0x0000 (---------------) + I kosei - 0x00301844, // n0x1196 c0x0000 (---------------) + I koto - 0x00282607, // n0x1197 c0x0000 (---------------) + I kusatsu - 0x00206b87, // n0x1198 c0x0000 (---------------) + I maibara - 0x002c8948, // n0x1199 c0x0000 (---------------) + I moriyama - 0x0026f748, // n0x119a c0x0000 (---------------) + I nagahama - 0x0020e5c9, // n0x119b c0x0000 (---------------) + I nishiazai - 0x00259488, // n0x119c c0x0000 (---------------) + I notogawa - 0x00299e4b, // n0x119d c0x0000 (---------------) + I omihachiman - 0x0021c704, // n0x119e c0x0000 (---------------) + I otsu - 0x00300085, // n0x119f c0x0000 (---------------) + I ritto - 0x0027f645, // n0x11a0 c0x0000 (---------------) + I ryuoh - 0x002a81c9, // n0x11a1 c0x0000 (---------------) + I takashima - 0x002fdc09, // n0x11a2 c0x0000 (---------------) + I takatsuki - 0x002f8d48, // n0x11a3 c0x0000 (---------------) + I torahime - 0x00259a88, // n0x11a4 c0x0000 (---------------) + I toyosato - 0x0027dec4, // n0x11a5 c0x0000 (---------------) + I yasu - 0x002a07c5, // n0x11a6 c0x0000 (---------------) + I akagi - 0x00201ec3, // n0x11a7 c0x0000 (---------------) + I ama - 0x0021c6c5, // n0x11a8 c0x0000 (---------------) + I gotsu - 0x002a3cc6, // n0x11a9 c0x0000 (---------------) + I hamada - 0x00293acc, // n0x11aa c0x0000 (---------------) + I higashiizumo - 0x00218d46, // n0x11ab c0x0000 (---------------) + I hikawa - 0x0024c046, // n0x11ac c0x0000 (---------------) + I hikimi - 0x00293c85, // n0x11ad c0x0000 (---------------) + I izumo - 0x0031f788, // n0x11ae c0x0000 (---------------) + I kakinoki - 0x002b41c6, // n0x11af c0x0000 (---------------) + I masuda - 0x0038a786, // n0x11b0 c0x0000 (---------------) + I matsue - 0x0022b406, // n0x11b1 c0x0000 (---------------) + I misato - 0x0021d5cc, // n0x11b2 c0x0000 (---------------) + I nishinoshima - 0x002b1184, // n0x11b3 c0x0000 (---------------) + I ohda - 0x002e6e8a, // n0x11b4 c0x0000 (---------------) + I okinoshima - 0x003a0a08, // n0x11b5 c0x0000 (---------------) + I okuizumo - 0x00293907, // n0x11b6 c0x0000 (---------------) + I shimane - 0x00248606, // n0x11b7 c0x0000 (---------------) + I tamayu - 0x00292a47, // n0x11b8 c0x0000 (---------------) + I tsuwano - 0x002e0ac5, // n0x11b9 c0x0000 (---------------) + I unnan - 0x00329706, // n0x11ba c0x0000 (---------------) + I yakumo - 0x00350a46, // n0x11bb c0x0000 (---------------) + I yasugi - 0x00374547, // n0x11bc c0x0000 (---------------) + I yatsuka - 0x0022be04, // n0x11bd c0x0000 (---------------) + I arai - 0x00269045, // n0x11be c0x0000 (---------------) + I atami - 0x00276884, // n0x11bf c0x0000 (---------------) + I fuji - 0x00291887, // n0x11c0 c0x0000 (---------------) + I fujieda - 0x00276ac8, // n0x11c1 c0x0000 (---------------) + I fujikawa - 0x0027764a, // n0x11c2 c0x0000 (---------------) + I fujinomiya - 0x0027ee47, // n0x11c3 c0x0000 (---------------) + I fukuroi - 0x0023c487, // n0x11c4 c0x0000 (---------------) + I gotemba - 0x002c4607, // n0x11c5 c0x0000 (---------------) + I haibara - 0x00357a49, // n0x11c6 c0x0000 (---------------) + I hamamatsu - 0x00293aca, // n0x11c7 c0x0000 (---------------) + I higashiizu - 0x0021fd03, // n0x11c8 c0x0000 (---------------) + I ito - 0x0022bd05, // n0x11c9 c0x0000 (---------------) + I iwata - 0x00212ec3, // n0x11ca c0x0000 (---------------) + I izu - 0x002f5089, // n0x11cb c0x0000 (---------------) + I izunokuni - 0x002b55c8, // n0x11cc c0x0000 (---------------) + I kakegawa - 0x002a7907, // n0x11cd c0x0000 (---------------) + I kannami - 0x002e99c9, // n0x11ce c0x0000 (---------------) + I kawanehon - 0x00218dc6, // n0x11cf c0x0000 (---------------) + I kawazu - 0x003a2808, // n0x11d0 c0x0000 (---------------) + I kikugawa - 0x002de3c5, // n0x11d1 c0x0000 (---------------) + I kosai - 0x00328f4a, // n0x11d2 c0x0000 (---------------) + I makinohara - 0x002cfdc9, // n0x11d3 c0x0000 (---------------) + I matsuzaki - 0x0026a689, // n0x11d4 c0x0000 (---------------) + I minamiizu - 0x002c1587, // n0x11d5 c0x0000 (---------------) + I mishima - 0x002a8489, // n0x11d6 c0x0000 (---------------) + I morimachi - 0x00212d88, // n0x11d7 c0x0000 (---------------) + I nishiizu - 0x002eb686, // n0x11d8 c0x0000 (---------------) + I numazu - 0x002f71c8, // n0x11d9 c0x0000 (---------------) + I omaezaki - 0x00211247, // n0x11da c0x0000 (---------------) + I shimada - 0x0021fe47, // n0x11db c0x0000 (---------------) + I shimizu - 0x002c8707, // n0x11dc c0x0000 (---------------) + I shimoda - 0x002fe1c8, // n0x11dd c0x0000 (---------------) + I shizuoka - 0x002eb386, // n0x11de c0x0000 (---------------) + I susono - 0x00244545, // n0x11df c0x0000 (---------------) + I yaizu - 0x0027b547, // n0x11e0 c0x0000 (---------------) + I yoshida - 0x00294fc8, // n0x11e1 c0x0000 (---------------) + I ashikaga - 0x003472c4, // n0x11e2 c0x0000 (---------------) + I bato - 0x00282d84, // n0x11e3 c0x0000 (---------------) + I haga - 0x002f1987, // n0x11e4 c0x0000 (---------------) + I ichikai - 0x002acc87, // n0x11e5 c0x0000 (---------------) + I iwafune - 0x002c064a, // n0x11e6 c0x0000 (---------------) + I kaminokawa - 0x002eb606, // n0x11e7 c0x0000 (---------------) + I kanuma - 0x002fa80a, // n0x11e8 c0x0000 (---------------) + I karasuyama - 0x002ba787, // n0x11e9 c0x0000 (---------------) + I kuroiso - 0x0032ed87, // n0x11ea c0x0000 (---------------) + I mashiko - 0x00240cc4, // n0x11eb c0x0000 (---------------) + I mibu - 0x00256bc4, // n0x11ec c0x0000 (---------------) + I moka - 0x0022c146, // n0x11ed c0x0000 (---------------) + I motegi - 0x00313b04, // n0x11ee c0x0000 (---------------) + I nasu - 0x00313b0c, // n0x11ef c0x0000 (---------------) + I nasushiobara - 0x003a3a85, // n0x11f0 c0x0000 (---------------) + I nikko - 0x00214889, // n0x11f1 c0x0000 (---------------) + I nishikata - 0x00278504, // n0x11f2 c0x0000 (---------------) + I nogi - 0x0029f145, // n0x11f3 c0x0000 (---------------) + I ohira - 0x002770c8, // n0x11f4 c0x0000 (---------------) + I ohtawara - 0x0024df85, // n0x11f5 c0x0000 (---------------) + I oyama - 0x00306846, // n0x11f6 c0x0000 (---------------) + I sakura - 0x00205e84, // n0x11f7 c0x0000 (---------------) + I sano - 0x00263b0a, // n0x11f8 c0x0000 (---------------) + I shimotsuke - 0x002992c6, // n0x11f9 c0x0000 (---------------) + I shioya - 0x0025668a, // n0x11fa c0x0000 (---------------) + I takanezawa - 0x00347347, // n0x11fb c0x0000 (---------------) + I tochigi - 0x0022e485, // n0x11fc c0x0000 (---------------) + I tsuga - 0x0021be85, // n0x11fd c0x0000 (---------------) + I ujiie - 0x002258ca, // n0x11fe c0x0000 (---------------) + I utsunomiya - 0x002a0245, // n0x11ff c0x0000 (---------------) + I yaita - 0x0029ed46, // n0x1200 c0x0000 (---------------) + I aizumi - 0x00202844, // n0x1201 c0x0000 (---------------) + I anan - 0x002aec06, // n0x1202 c0x0000 (---------------) + I ichiba - 0x00228645, // n0x1203 c0x0000 (---------------) + I itano - 0x002234c6, // n0x1204 c0x0000 (---------------) + I kainan - 0x002f1d8c, // n0x1205 c0x0000 (---------------) + I komatsushima - 0x002c98ca, // n0x1206 c0x0000 (---------------) + I matsushige - 0x0027fa04, // n0x1207 c0x0000 (---------------) + I mima - 0x00220086, // n0x1208 c0x0000 (---------------) + I minami - 0x0029be07, // n0x1209 c0x0000 (---------------) + I miyoshi - 0x002cc304, // n0x120a c0x0000 (---------------) + I mugi - 0x002af708, // n0x120b c0x0000 (---------------) + I nakagawa - 0x00381606, // n0x120c c0x0000 (---------------) + I naruto - 0x00252cc9, // n0x120d c0x0000 (---------------) + I sanagochi - 0x002aab09, // n0x120e c0x0000 (---------------) + I shishikui - 0x0037ea09, // n0x120f c0x0000 (---------------) + I tokushima - 0x00369846, // n0x1210 c0x0000 (---------------) + I wajiki - 0x00211346, // n0x1211 c0x0000 (---------------) + I adachi - 0x002f7307, // n0x1212 c0x0000 (---------------) + I akiruno - 0x003128c8, // n0x1213 c0x0000 (---------------) + I akishima - 0x00211149, // n0x1214 c0x0000 (---------------) + I aogashima - 0x003807c7, // n0x1215 c0x0000 (---------------) + I arakawa - 0x002b6946, // n0x1216 c0x0000 (---------------) + I bunkyo - 0x002ffd07, // n0x1217 c0x0000 (---------------) + I chiyoda - 0x002db245, // n0x1218 c0x0000 (---------------) + I chofu - 0x0032efc4, // n0x1219 c0x0000 (---------------) + I chuo - 0x002f6ec7, // n0x121a c0x0000 (---------------) + I edogawa - 0x002bed45, // n0x121b c0x0000 (---------------) + I fuchu - 0x00288845, // n0x121c c0x0000 (---------------) + I fussa - 0x002fc547, // n0x121d c0x0000 (---------------) + I hachijo - 0x0024d188, // n0x121e c0x0000 (---------------) + I hachioji - 0x0037e4c6, // n0x121f c0x0000 (---------------) + I hamura - 0x0029634d, // n0x1220 c0x0000 (---------------) + I higashikurume - 0x00297bcf, // n0x1221 c0x0000 (---------------) + I higashimurayama - 0x0029d20d, // n0x1222 c0x0000 (---------------) + I higashiyamato - 0x0020c2c4, // n0x1223 c0x0000 (---------------) + I hino - 0x0023b446, // n0x1224 c0x0000 (---------------) + I hinode - 0x002d1108, // n0x1225 c0x0000 (---------------) + I hinohara - 0x0030ef45, // n0x1226 c0x0000 (---------------) + I inagi - 0x0027d888, // n0x1227 c0x0000 (---------------) + I itabashi - 0x0021034a, // n0x1228 c0x0000 (---------------) + I katsushika - 0x00203404, // n0x1229 c0x0000 (---------------) + I kita - 0x002f2146, // n0x122a c0x0000 (---------------) + I kiyose - 0x00208b87, // n0x122b c0x0000 (---------------) + I kodaira - 0x002171c7, // n0x122c c0x0000 (---------------) + I koganei - 0x002a09c9, // n0x122d c0x0000 (---------------) + I kokubunji - 0x002f7185, // n0x122e c0x0000 (---------------) + I komae - 0x00301844, // n0x122f c0x0000 (---------------) + I koto - 0x00331f8a, // n0x1230 c0x0000 (---------------) + I kouzushima - 0x002b57c9, // n0x1231 c0x0000 (---------------) + I kunitachi - 0x002a8587, // n0x1232 c0x0000 (---------------) + I machida - 0x00296606, // n0x1233 c0x0000 (---------------) + I meguro - 0x0034dcc6, // n0x1234 c0x0000 (---------------) + I minato - 0x002a0706, // n0x1235 c0x0000 (---------------) + I mitaka - 0x0035b606, // n0x1236 c0x0000 (---------------) + I mizuho - 0x002cfa8f, // n0x1237 c0x0000 (---------------) + I musashimurayama - 0x002d0fc9, // n0x1238 c0x0000 (---------------) + I musashino - 0x00313846, // n0x1239 c0x0000 (---------------) + I nakano - 0x0035d406, // n0x123a c0x0000 (---------------) + I nerima - 0x0038de49, // n0x123b c0x0000 (---------------) + I ogasawara - 0x003035c7, // n0x123c c0x0000 (---------------) + I okutama - 0x00210043, // n0x123d c0x0000 (---------------) + I ome - 0x002035c6, // n0x123e c0x0000 (---------------) + I oshima - 0x002010c3, // n0x123f c0x0000 (---------------) + I ota - 0x00245d88, // n0x1240 c0x0000 (---------------) + I setagaya - 0x002ffb47, // n0x1241 c0x0000 (---------------) + I shibuya - 0x0029f389, // n0x1242 c0x0000 (---------------) + I shinagawa - 0x0027e608, // n0x1243 c0x0000 (---------------) + I shinjuku - 0x003747c8, // n0x1244 c0x0000 (---------------) + I suginami - 0x00216546, // n0x1245 c0x0000 (---------------) + I sumida - 0x00223bc9, // n0x1246 c0x0000 (---------------) + I tachikawa - 0x00287f85, // n0x1247 c0x0000 (---------------) + I taito - 0x00248604, // n0x1248 c0x0000 (---------------) + I tama - 0x00247cc7, // n0x1249 c0x0000 (---------------) + I toshima - 0x0035d785, // n0x124a c0x0000 (---------------) + I chizu - 0x0020c2c4, // n0x124b c0x0000 (---------------) + I hino - 0x00253148, // n0x124c c0x0000 (---------------) + I kawahara - 0x00219544, // n0x124d c0x0000 (---------------) + I koge - 0x00301847, // n0x124e c0x0000 (---------------) + I kotoura - 0x0036ce86, // n0x124f c0x0000 (---------------) + I misasa - 0x002e7cc5, // n0x1250 c0x0000 (---------------) + I nanbu - 0x00268048, // n0x1251 c0x0000 (---------------) + I nichinan - 0x0034db8b, // n0x1252 c0x0000 (---------------) + I sakaiminato - 0x002fb607, // n0x1253 c0x0000 (---------------) + I tottori - 0x00225b46, // n0x1254 c0x0000 (---------------) + I wakasa - 0x002c2b44, // n0x1255 c0x0000 (---------------) + I yazu - 0x0022fac6, // n0x1256 c0x0000 (---------------) + I yonago - 0x002c1345, // n0x1257 c0x0000 (---------------) + I asahi - 0x002bed45, // n0x1258 c0x0000 (---------------) + I fuchu - 0x0027e049, // n0x1259 c0x0000 (---------------) + I fukumitsu - 0x00282209, // n0x125a c0x0000 (---------------) + I funahashi - 0x0021fe84, // n0x125b c0x0000 (---------------) + I himi - 0x0021fec5, // n0x125c c0x0000 (---------------) + I imizu - 0x002200c5, // n0x125d c0x0000 (---------------) + I inami - 0x00328dc6, // n0x125e c0x0000 (---------------) + I johana - 0x002f1888, // n0x125f c0x0000 (---------------) + I kamiichi - 0x002b9e06, // n0x1260 c0x0000 (---------------) + I kurobe - 0x0032be0b, // n0x1261 c0x0000 (---------------) + I nakaniikawa - 0x0030100a, // n0x1262 c0x0000 (---------------) + I namerikawa - 0x002f4dc5, // n0x1263 c0x0000 (---------------) + I nanto - 0x0028bac6, // n0x1264 c0x0000 (---------------) + I nyuzen - 0x002f3785, // n0x1265 c0x0000 (---------------) + I oyabe - 0x00216cc5, // n0x1266 c0x0000 (---------------) + I taira - 0x0028f4c7, // n0x1267 c0x0000 (---------------) + I takaoka - 0x00201d88, // n0x1268 c0x0000 (---------------) + I tateyama - 0x00259504, // n0x1269 c0x0000 (---------------) + I toga - 0x002b06c6, // n0x126a c0x0000 (---------------) + I tonami - 0x0028f146, // n0x126b c0x0000 (---------------) + I toyama - 0x00212f47, // n0x126c c0x0000 (---------------) + I unazuki - 0x00300684, // n0x126d c0x0000 (---------------) + I uozu - 0x0027c046, // n0x126e c0x0000 (---------------) + I yamada - 0x0023f485, // n0x126f c0x0000 (---------------) + I arida - 0x0023f489, // n0x1270 c0x0000 (---------------) + I aridagawa - 0x00211544, // n0x1271 c0x0000 (---------------) + I gobo - 0x002857c9, // n0x1272 c0x0000 (---------------) + I hashimoto - 0x00279686, // n0x1273 c0x0000 (---------------) + I hidaka - 0x002bbb88, // n0x1274 c0x0000 (---------------) + I hirogawa - 0x002200c5, // n0x1275 c0x0000 (---------------) + I inami - 0x002dc045, // n0x1276 c0x0000 (---------------) + I iwade - 0x002234c6, // n0x1277 c0x0000 (---------------) + I kainan - 0x0026a8c9, // n0x1278 c0x0000 (---------------) + I kamitonda - 0x002159c9, // n0x1279 c0x0000 (---------------) + I katsuragi - 0x0024c0c6, // n0x127a c0x0000 (---------------) + I kimino - 0x00267288, // n0x127b c0x0000 (---------------) + I kinokawa - 0x00257b88, // n0x127c c0x0000 (---------------) + I kitayama - 0x002f3744, // n0x127d c0x0000 (---------------) + I koya - 0x0035bb04, // n0x127e c0x0000 (---------------) + I koza - 0x0035bb08, // n0x127f c0x0000 (---------------) + I kozagawa - 0x0031d888, // n0x1280 c0x0000 (---------------) + I kudoyama - 0x00367109, // n0x1281 c0x0000 (---------------) + I kushimoto - 0x002a3c46, // n0x1282 c0x0000 (---------------) + I mihama - 0x0022b406, // n0x1283 c0x0000 (---------------) + I misato - 0x0031af4d, // n0x1284 c0x0000 (---------------) + I nachikatsuura - 0x00356786, // n0x1285 c0x0000 (---------------) + I shingu - 0x0029a9c9, // n0x1286 c0x0000 (---------------) + I shirahama - 0x00201545, // n0x1287 c0x0000 (---------------) + I taiji - 0x0030acc6, // n0x1288 c0x0000 (---------------) + I tanabe - 0x00223d88, // n0x1289 c0x0000 (---------------) + I wakayama - 0x003187c5, // n0x128a c0x0000 (---------------) + I yuasa - 0x002ad1c4, // n0x128b c0x0000 (---------------) + I yura - 0x002c1345, // n0x128c c0x0000 (---------------) + I asahi - 0x00281888, // n0x128d c0x0000 (---------------) + I funagata - 0x00299a49, // n0x128e c0x0000 (---------------) + I higashine - 0x00276944, // n0x128f c0x0000 (---------------) + I iide - 0x00352406, // n0x1290 c0x0000 (---------------) + I kahoku - 0x0024de4a, // n0x1291 c0x0000 (---------------) + I kaminoyama - 0x002ca0c8, // n0x1292 c0x0000 (---------------) + I kaneyama - 0x002c07c9, // n0x1293 c0x0000 (---------------) + I kawanishi - 0x002932ca, // n0x1294 c0x0000 (---------------) + I mamurogawa - 0x002e9946, // n0x1295 c0x0000 (---------------) + I mikawa - 0x00297d88, // n0x1296 c0x0000 (---------------) + I murayama - 0x002d32c5, // n0x1297 c0x0000 (---------------) + I nagai - 0x002cbd88, // n0x1298 c0x0000 (---------------) + I nakayama - 0x002b4445, // n0x1299 c0x0000 (---------------) + I nanyo - 0x00218c89, // n0x129a c0x0000 (---------------) + I nishikawa - 0x00360809, // n0x129b c0x0000 (---------------) + I obanazawa - 0x0020cbc2, // n0x129c c0x0000 (---------------) + I oe - 0x002a6545, // n0x129d c0x0000 (---------------) + I oguni - 0x0026c446, // n0x129e c0x0000 (---------------) + I ohkura - 0x002795c7, // n0x129f c0x0000 (---------------) + I oishida - 0x002324c5, // n0x12a0 c0x0000 (---------------) + I sagae - 0x002f7d46, // n0x12a1 c0x0000 (---------------) + I sakata - 0x00318888, // n0x12a2 c0x0000 (---------------) + I sakegawa - 0x00279e06, // n0x12a3 c0x0000 (---------------) + I shinjo - 0x002a9209, // n0x12a4 c0x0000 (---------------) + I shirataka - 0x00277f46, // n0x12a5 c0x0000 (---------------) + I shonai - 0x00281a08, // n0x12a6 c0x0000 (---------------) + I takahata - 0x002a8c85, // n0x12a7 c0x0000 (---------------) + I tendo - 0x00266a46, // n0x12a8 c0x0000 (---------------) + I tozawa - 0x0030fcc8, // n0x12a9 c0x0000 (---------------) + I tsuruoka - 0x00280908, // n0x12aa c0x0000 (---------------) + I yamagata - 0x0037f8c8, // n0x12ab c0x0000 (---------------) + I yamanobe - 0x00249148, // n0x12ac c0x0000 (---------------) + I yonezawa - 0x00209144, // n0x12ad c0x0000 (---------------) + I yuza - 0x0022c583, // n0x12ae c0x0000 (---------------) + I abu - 0x002a9444, // n0x12af c0x0000 (---------------) + I hagi - 0x0022af86, // n0x12b0 c0x0000 (---------------) + I hikari - 0x002db284, // n0x12b1 c0x0000 (---------------) + I hofu - 0x002a3a07, // n0x12b2 c0x0000 (---------------) + I iwakuni - 0x0038a689, // n0x12b3 c0x0000 (---------------) + I kudamatsu - 0x002c2345, // n0x12b4 c0x0000 (---------------) + I mitou - 0x00208586, // n0x12b5 c0x0000 (---------------) + I nagato - 0x002035c6, // n0x12b6 c0x0000 (---------------) + I oshima - 0x0025c0cb, // n0x12b7 c0x0000 (---------------) + I shimonoseki - 0x002f4d06, // n0x12b8 c0x0000 (---------------) + I shunan - 0x0031db86, // n0x12b9 c0x0000 (---------------) + I tabuse - 0x0022b508, // n0x12ba c0x0000 (---------------) + I tokuyama - 0x0025a486, // n0x12bb c0x0000 (---------------) + I toyota - 0x002a7203, // n0x12bc c0x0000 (---------------) + I ube - 0x00204483, // n0x12bd c0x0000 (---------------) + I yuu - 0x0032efc4, // n0x12be c0x0000 (---------------) + I chuo - 0x00237385, // n0x12bf c0x0000 (---------------) + I doshi - 0x002ad347, // n0x12c0 c0x0000 (---------------) + I fuefuki - 0x00276ac8, // n0x12c1 c0x0000 (---------------) + I fujikawa - 0x00276acf, // n0x12c2 c0x0000 (---------------) + I fujikawaguchiko - 0x0027b44b, // n0x12c3 c0x0000 (---------------) + I fujiyoshida - 0x002f1688, // n0x12c4 c0x0000 (---------------) + I hayakawa - 0x00352486, // n0x12c5 c0x0000 (---------------) + I hokuto - 0x0026180e, // n0x12c6 c0x0000 (---------------) + I ichikawamisato - 0x002234c3, // n0x12c7 c0x0000 (---------------) + I kai - 0x0023ff84, // n0x12c8 c0x0000 (---------------) + I kofu - 0x002f4c85, // n0x12c9 c0x0000 (---------------) + I koshu - 0x002ff886, // n0x12ca c0x0000 (---------------) + I kosuge - 0x0028d64b, // n0x12cb c0x0000 (---------------) + I minami-alps - 0x00290806, // n0x12cc c0x0000 (---------------) + I minobu - 0x002154c9, // n0x12cd c0x0000 (---------------) + I nakamichi - 0x002e7cc5, // n0x12ce c0x0000 (---------------) + I nanbu - 0x0037d108, // n0x12cf c0x0000 (---------------) + I narusawa - 0x0020aa88, // n0x12d0 c0x0000 (---------------) + I nirasaki - 0x0021588c, // n0x12d1 c0x0000 (---------------) + I nishikatsura - 0x0029df86, // n0x12d2 c0x0000 (---------------) + I oshino - 0x0021c706, // n0x12d3 c0x0000 (---------------) + I otsuki - 0x002b25c5, // n0x12d4 c0x0000 (---------------) + I showa - 0x00288c88, // n0x12d5 c0x0000 (---------------) + I tabayama - 0x0027abc5, // n0x12d6 c0x0000 (---------------) + I tsuru - 0x003831c8, // n0x12d7 c0x0000 (---------------) + I uenohara - 0x0029d64a, // n0x12d8 c0x0000 (---------------) + I yamanakako - 0x002a1649, // n0x12d9 c0x0000 (---------------) + I yamanashi - 0x00685284, // n0x12da c0x0001 (---------------) ! I city - 0x2d20a442, // n0x12db c0x00b4 (n0x12dc-n0x12dd) o I co - 0x000ff148, // n0x12dc c0x0000 (---------------) + blogspot - 0x00234803, // n0x12dd c0x0000 (---------------) + I com - 0x0023a1c3, // n0x12de c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x12df c0x0000 (---------------) + I gov - 0x002119c3, // n0x12e0 c0x0000 (---------------) + I mil - 0x002207c3, // n0x12e1 c0x0000 (---------------) + I net - 0x00225403, // n0x12e2 c0x0000 (---------------) + I org - 0x0032bd03, // n0x12e3 c0x0000 (---------------) + I biz - 0x00234803, // n0x12e4 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x12e5 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x12e6 c0x0000 (---------------) + I gov - 0x00201804, // n0x12e7 c0x0000 (---------------) + I info - 0x002207c3, // n0x12e8 c0x0000 (---------------) + I net - 0x00225403, // n0x12e9 c0x0000 (---------------) + I org - 0x002357c3, // n0x12ea c0x0000 (---------------) + I ass - 0x002ceb44, // n0x12eb c0x0000 (---------------) + I asso - 0x00234803, // n0x12ec c0x0000 (---------------) + I com - 0x0023d404, // n0x12ed c0x0000 (---------------) + I coop - 0x0023a1c3, // n0x12ee c0x0000 (---------------) + I edu - 0x0023ef84, // n0x12ef c0x0000 (---------------) + I gouv - 0x0027c5c3, // n0x12f0 c0x0000 (---------------) + I gov - 0x00232787, // n0x12f1 c0x0000 (---------------) + I medecin - 0x002119c3, // n0x12f2 c0x0000 (---------------) + I mil - 0x00201343, // n0x12f3 c0x0000 (---------------) + I nom - 0x0035cfc8, // n0x12f4 c0x0000 (---------------) + I notaires - 0x00225403, // n0x12f5 c0x0000 (---------------) + I org - 0x002f648b, // n0x12f6 c0x0000 (---------------) + I pharmaciens - 0x002e13c3, // n0x12f7 c0x0000 (---------------) + I prd - 0x002470c6, // n0x12f8 c0x0000 (---------------) + I presse - 0x00200c42, // n0x12f9 c0x0000 (---------------) + I tm - 0x0023040b, // n0x12fa c0x0000 (---------------) + I veterinaire - 0x0023a1c3, // n0x12fb c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x12fc c0x0000 (---------------) + I gov - 0x002207c3, // n0x12fd c0x0000 (---------------) + I net - 0x00225403, // n0x12fe c0x0000 (---------------) + I org - 0x00234803, // n0x12ff c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1300 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1301 c0x0000 (---------------) + I gov - 0x00225403, // n0x1302 c0x0000 (---------------) + I org - 0x0022d4c3, // n0x1303 c0x0000 (---------------) + I rep - 0x00204883, // n0x1304 c0x0000 (---------------) + I tra - 0x00200342, // n0x1305 c0x0000 (---------------) + I ac - 0x000ff148, // n0x1306 c0x0000 (---------------) + blogspot - 0x002b8945, // n0x1307 c0x0000 (---------------) + I busan - 0x00319448, // n0x1308 c0x0000 (---------------) + I chungbuk - 0x0032d3c8, // n0x1309 c0x0000 (---------------) + I chungnam - 0x0020a442, // n0x130a c0x0000 (---------------) + I co - 0x00254545, // n0x130b c0x0000 (---------------) + I daegu - 0x003293c7, // n0x130c c0x0000 (---------------) + I daejeon - 0x00200082, // n0x130d c0x0000 (---------------) + I es - 0x00215707, // n0x130e c0x0000 (---------------) + I gangwon - 0x0020ec82, // n0x130f c0x0000 (---------------) + I go - 0x00234cc7, // n0x1310 c0x0000 (---------------) + I gwangju - 0x002f4a89, // n0x1311 c0x0000 (---------------) + I gyeongbuk - 0x002d2e88, // n0x1312 c0x0000 (---------------) + I gyeonggi - 0x0038c809, // n0x1313 c0x0000 (---------------) + I gyeongnam - 0x00235402, // n0x1314 c0x0000 (---------------) + I hs - 0x00265007, // n0x1315 c0x0000 (---------------) + I incheon - 0x0024be44, // n0x1316 c0x0000 (---------------) + I jeju - 0x00329487, // n0x1317 c0x0000 (---------------) + I jeonbuk - 0x00300f07, // n0x1318 c0x0000 (---------------) + I jeonnam - 0x002b8102, // n0x1319 c0x0000 (---------------) + I kg - 0x002119c3, // n0x131a c0x0000 (---------------) + I mil - 0x0020df02, // n0x131b c0x0000 (---------------) + I ms - 0x002030c2, // n0x131c c0x0000 (---------------) + I ne - 0x00200d82, // n0x131d c0x0000 (---------------) + I or - 0x00200582, // n0x131e c0x0000 (---------------) + I pe - 0x00206842, // n0x131f c0x0000 (---------------) + I re - 0x00217f42, // n0x1320 c0x0000 (---------------) + I sc - 0x0028a505, // n0x1321 c0x0000 (---------------) + I seoul - 0x0024e3c5, // n0x1322 c0x0000 (---------------) + I ulsan - 0x00234803, // n0x1323 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1324 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1325 c0x0000 (---------------) + I gov - 0x002207c3, // n0x1326 c0x0000 (---------------) + I net - 0x00225403, // n0x1327 c0x0000 (---------------) + I org - 0x00234803, // n0x1328 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1329 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x132a c0x0000 (---------------) + I gov - 0x002119c3, // n0x132b c0x0000 (---------------) + I mil - 0x002207c3, // n0x132c c0x0000 (---------------) + I net - 0x00225403, // n0x132d c0x0000 (---------------) + I org - 0x00000141, // n0x132e c0x0000 (---------------) + c - 0x00234803, // n0x132f c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1330 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1331 c0x0000 (---------------) + I gov - 0x00201804, // n0x1332 c0x0000 (---------------) + I info - 0x002014c3, // n0x1333 c0x0000 (---------------) + I int - 0x002207c3, // n0x1334 c0x0000 (---------------) + I net - 0x00225403, // n0x1335 c0x0000 (---------------) + I org - 0x00221183, // n0x1336 c0x0000 (---------------) + I per - 0x00234803, // n0x1337 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1338 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1339 c0x0000 (---------------) + I gov - 0x002207c3, // n0x133a c0x0000 (---------------) + I net - 0x00225403, // n0x133b c0x0000 (---------------) + I org - 0x0020a442, // n0x133c c0x0000 (---------------) + I co - 0x00234803, // n0x133d c0x0000 (---------------) + I com - 0x0023a1c3, // n0x133e c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x133f c0x0000 (---------------) + I gov - 0x002207c3, // n0x1340 c0x0000 (---------------) + I net - 0x00225403, // n0x1341 c0x0000 (---------------) + I org - 0x000ff148, // n0x1342 c0x0000 (---------------) + blogspot - 0x00200342, // n0x1343 c0x0000 (---------------) + I ac - 0x002bd384, // n0x1344 c0x0000 (---------------) + I assn - 0x00234803, // n0x1345 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1346 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1347 c0x0000 (---------------) + I gov - 0x00305303, // n0x1348 c0x0000 (---------------) + I grp - 0x00235ec5, // n0x1349 c0x0000 (---------------) + I hotel - 0x002014c3, // n0x134a c0x0000 (---------------) + I int - 0x0030dc03, // n0x134b c0x0000 (---------------) + I ltd - 0x002207c3, // n0x134c c0x0000 (---------------) + I net - 0x00230743, // n0x134d c0x0000 (---------------) + I ngo - 0x00225403, // n0x134e c0x0000 (---------------) + I org - 0x00217f43, // n0x134f c0x0000 (---------------) + I sch - 0x002738c3, // n0x1350 c0x0000 (---------------) + I soc - 0x00221cc3, // n0x1351 c0x0000 (---------------) + I web - 0x00234803, // n0x1352 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1353 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1354 c0x0000 (---------------) + I gov - 0x002207c3, // n0x1355 c0x0000 (---------------) + I net - 0x00225403, // n0x1356 c0x0000 (---------------) + I org - 0x0020a442, // n0x1357 c0x0000 (---------------) + I co - 0x00225403, // n0x1358 c0x0000 (---------------) + I org - 0x000ff148, // n0x1359 c0x0000 (---------------) + blogspot - 0x0027c5c3, // n0x135a c0x0000 (---------------) + I gov - 0x000ff148, // n0x135b c0x0000 (---------------) + blogspot - 0x002d0a83, // n0x135c c0x0000 (---------------) + I asn - 0x00234803, // n0x135d c0x0000 (---------------) + I com - 0x00237d44, // n0x135e c0x0000 (---------------) + I conf - 0x0023a1c3, // n0x135f c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1360 c0x0000 (---------------) + I gov - 0x0020ae82, // n0x1361 c0x0000 (---------------) + I id - 0x002119c3, // n0x1362 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1363 c0x0000 (---------------) + I net - 0x00225403, // n0x1364 c0x0000 (---------------) + I org - 0x00234803, // n0x1365 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1366 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1367 c0x0000 (---------------) + I gov - 0x0020ae82, // n0x1368 c0x0000 (---------------) + I id - 0x002127c3, // n0x1369 c0x0000 (---------------) + I med - 0x002207c3, // n0x136a c0x0000 (---------------) + I net - 0x00225403, // n0x136b c0x0000 (---------------) + I org - 0x002db1c3, // n0x136c c0x0000 (---------------) + I plc - 0x00217f43, // n0x136d c0x0000 (---------------) + I sch - 0x00200342, // n0x136e c0x0000 (---------------) + I ac - 0x0020a442, // n0x136f c0x0000 (---------------) + I co - 0x0027c5c3, // n0x1370 c0x0000 (---------------) + I gov - 0x002207c3, // n0x1371 c0x0000 (---------------) + I net - 0x00225403, // n0x1372 c0x0000 (---------------) + I org - 0x002470c5, // n0x1373 c0x0000 (---------------) + I press - 0x002ceb44, // n0x1374 c0x0000 (---------------) + I asso - 0x00200c42, // n0x1375 c0x0000 (---------------) + I tm - 0x000ff148, // n0x1376 c0x0000 (---------------) + blogspot - 0x00200342, // n0x1377 c0x0000 (---------------) + I ac - 0x0020a442, // n0x1378 c0x0000 (---------------) + I co - 0x0019a48b, // n0x1379 c0x0000 (---------------) + diskstation - 0x00108d47, // n0x137a c0x0000 (---------------) + dscloud - 0x0023a1c3, // n0x137b c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x137c c0x0000 (---------------) + I gov - 0x001226c4, // n0x137d c0x0000 (---------------) + i234 - 0x00248b43, // n0x137e c0x0000 (---------------) + I its - 0x00108cc4, // n0x137f c0x0000 (---------------) + myds - 0x002207c3, // n0x1380 c0x0000 (---------------) + I net - 0x00225403, // n0x1381 c0x0000 (---------------) + I org - 0x002e1e84, // n0x1382 c0x0000 (---------------) + I priv - 0x000f4908, // n0x1383 c0x0000 (---------------) + synology - 0x0020a442, // n0x1384 c0x0000 (---------------) + I co - 0x00234803, // n0x1385 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1386 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1387 c0x0000 (---------------) + I gov - 0x002119c3, // n0x1388 c0x0000 (---------------) + I mil - 0x00201343, // n0x1389 c0x0000 (---------------) + I nom - 0x00225403, // n0x138a c0x0000 (---------------) + I org - 0x002e13c3, // n0x138b c0x0000 (---------------) + I prd - 0x00200c42, // n0x138c c0x0000 (---------------) + I tm - 0x000ff148, // n0x138d c0x0000 (---------------) + blogspot - 0x00234803, // n0x138e c0x0000 (---------------) + I com - 0x0023a1c3, // n0x138f c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1390 c0x0000 (---------------) + I gov - 0x00201643, // n0x1391 c0x0000 (---------------) + I inf - 0x00207d04, // n0x1392 c0x0000 (---------------) + I name - 0x002207c3, // n0x1393 c0x0000 (---------------) + I net - 0x00225403, // n0x1394 c0x0000 (---------------) + I org - 0x00234803, // n0x1395 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1396 c0x0000 (---------------) + I edu - 0x0023ef84, // n0x1397 c0x0000 (---------------) + I gouv - 0x0027c5c3, // n0x1398 c0x0000 (---------------) + I gov - 0x002207c3, // n0x1399 c0x0000 (---------------) + I net - 0x00225403, // n0x139a c0x0000 (---------------) + I org - 0x002470c6, // n0x139b c0x0000 (---------------) + I presse - 0x0023a1c3, // n0x139c c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x139d c0x0000 (---------------) + I gov - 0x0016c103, // n0x139e c0x0000 (---------------) + nyc - 0x00225403, // n0x139f c0x0000 (---------------) + I org - 0x00234803, // n0x13a0 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x13a1 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x13a2 c0x0000 (---------------) + I gov - 0x002207c3, // n0x13a3 c0x0000 (---------------) + I net - 0x00225403, // n0x13a4 c0x0000 (---------------) + I org - 0x00108d47, // n0x13a5 c0x0000 (---------------) + dscloud - 0x000ff148, // n0x13a6 c0x0000 (---------------) + blogspot - 0x0027c5c3, // n0x13a7 c0x0000 (---------------) + I gov - 0x00234803, // n0x13a8 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x13a9 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x13aa c0x0000 (---------------) + I gov - 0x002207c3, // n0x13ab c0x0000 (---------------) + I net - 0x00225403, // n0x13ac c0x0000 (---------------) + I org - 0x35634803, // n0x13ad c0x00d5 (n0x13b1-n0x13b2) + I com - 0x0023a1c3, // n0x13ae c0x0000 (---------------) + I edu - 0x002207c3, // n0x13af c0x0000 (---------------) + I net - 0x00225403, // n0x13b0 c0x0000 (---------------) + I org - 0x000ff148, // n0x13b1 c0x0000 (---------------) + blogspot - 0x00200342, // n0x13b2 c0x0000 (---------------) + I ac - 0x0020a442, // n0x13b3 c0x0000 (---------------) + I co - 0x00234803, // n0x13b4 c0x0000 (---------------) + I com - 0x0027c5c3, // n0x13b5 c0x0000 (---------------) + I gov - 0x002207c3, // n0x13b6 c0x0000 (---------------) + I net - 0x00200d82, // n0x13b7 c0x0000 (---------------) + I or - 0x00225403, // n0x13b8 c0x0000 (---------------) + I org - 0x00308b87, // n0x13b9 c0x0000 (---------------) + I academy - 0x0026ff8b, // n0x13ba c0x0000 (---------------) + I agriculture - 0x00207383, // n0x13bb c0x0000 (---------------) + I air - 0x0023ac08, // n0x13bc c0x0000 (---------------) + I airguard - 0x00312647, // n0x13bd c0x0000 (---------------) + I alabama - 0x00278846, // n0x13be c0x0000 (---------------) + I alaska - 0x00366045, // n0x13bf c0x0000 (---------------) + I amber - 0x002b4e89, // n0x13c0 c0x0000 (---------------) + I ambulance - 0x00246e48, // n0x13c1 c0x0000 (---------------) + I american - 0x00270949, // n0x13c2 c0x0000 (---------------) + I americana - 0x00270950, // n0x13c3 c0x0000 (---------------) + I americanantiques - 0x003570cb, // n0x13c4 c0x0000 (---------------) + I americanart - 0x002b4cc9, // n0x13c5 c0x0000 (---------------) + I amsterdam - 0x00203f43, // n0x13c6 c0x0000 (---------------) + I and - 0x00322089, // n0x13c7 c0x0000 (---------------) + I annefrank - 0x00238e06, // n0x13c8 c0x0000 (---------------) + I anthro - 0x00238e0c, // n0x13c9 c0x0000 (---------------) + I anthropology - 0x00270b48, // n0x13ca c0x0000 (---------------) + I antiques - 0x003a0f08, // n0x13cb c0x0000 (---------------) + I aquarium - 0x0024a989, // n0x13cc c0x0000 (---------------) + I arboretum - 0x0029e74e, // n0x13cd c0x0000 (---------------) + I archaeological - 0x0036f34b, // n0x13ce c0x0000 (---------------) + I archaeology - 0x0030b34c, // n0x13cf c0x0000 (---------------) + I architecture - 0x00201d03, // n0x13d0 c0x0000 (---------------) + I art - 0x00333ecc, // n0x13d1 c0x0000 (---------------) + I artanddesign - 0x00206e49, // n0x13d2 c0x0000 (---------------) + I artcenter - 0x0020a307, // n0x13d3 c0x0000 (---------------) + I artdeco - 0x0023a10c, // n0x13d4 c0x0000 (---------------) + I arteducation - 0x0039178a, // n0x13d5 c0x0000 (---------------) + I artgallery - 0x0024a4c4, // n0x13d6 c0x0000 (---------------) + I arts - 0x0037fb8d, // n0x13d7 c0x0000 (---------------) + I artsandcrafts - 0x00333d88, // n0x13d8 c0x0000 (---------------) + I asmatart - 0x0038008d, // n0x13d9 c0x0000 (---------------) + I assassination - 0x00246846, // n0x13da c0x0000 (---------------) + I assisi - 0x002ceb4b, // n0x13db c0x0000 (---------------) + I association - 0x0022fc89, // n0x13dc c0x0000 (---------------) + I astronomy - 0x00224787, // n0x13dd c0x0000 (---------------) + I atlanta - 0x002e3806, // n0x13de c0x0000 (---------------) + I austin - 0x00307009, // n0x13df c0x0000 (---------------) + I australia - 0x00325c0a, // n0x13e0 c0x0000 (---------------) + I automotive - 0x003590c8, // n0x13e1 c0x0000 (---------------) + I aviation - 0x002e0e04, // n0x13e2 c0x0000 (---------------) + I axis - 0x00275e47, // n0x13e3 c0x0000 (---------------) + I badajoz - 0x002a18c7, // n0x13e4 c0x0000 (---------------) + I baghdad - 0x00310d44, // n0x13e5 c0x0000 (---------------) + I bahn - 0x0022f304, // n0x13e6 c0x0000 (---------------) + I bale - 0x0026e389, // n0x13e7 c0x0000 (---------------) + I baltimore - 0x002c3889, // n0x13e8 c0x0000 (---------------) + I barcelona - 0x0022b948, // n0x13e9 c0x0000 (---------------) + I baseball - 0x00210d45, // n0x13ea c0x0000 (---------------) + I basel - 0x00385405, // n0x13eb c0x0000 (---------------) + I baths - 0x0020b946, // n0x13ec c0x0000 (---------------) + I bauern - 0x0037fa49, // n0x13ed c0x0000 (---------------) + I beauxarts - 0x002105cd, // n0x13ee c0x0000 (---------------) + I beeldengeluid - 0x0030adc8, // n0x13ef c0x0000 (---------------) + I bellevue - 0x0020b847, // n0x13f0 c0x0000 (---------------) + I bergbau - 0x003660c8, // n0x13f1 c0x0000 (---------------) + I berkeley - 0x00356486, // n0x13f2 c0x0000 (---------------) + I berlin - 0x0038fdc4, // n0x13f3 c0x0000 (---------------) + I bern - 0x00355ec5, // n0x13f4 c0x0000 (---------------) + I bible - 0x00202d86, // n0x13f5 c0x0000 (---------------) + I bilbao - 0x00204704, // n0x13f6 c0x0000 (---------------) + I bill - 0x00206d47, // n0x13f7 c0x0000 (---------------) + I birdart - 0x00208d4a, // n0x13f8 c0x0000 (---------------) + I birthplace - 0x002147c4, // n0x13f9 c0x0000 (---------------) + I bonn - 0x00219146, // n0x13fa c0x0000 (---------------) + I boston - 0x0021a6c9, // n0x13fb c0x0000 (---------------) + I botanical - 0x0021a6cf, // n0x13fc c0x0000 (---------------) + I botanicalgarden - 0x0021ac8d, // n0x13fd c0x0000 (---------------) + I botanicgarden - 0x0021b406, // n0x13fe c0x0000 (---------------) + I botany - 0x0021ce50, // n0x13ff c0x0000 (---------------) + I brandywinevalley - 0x0021d246, // n0x1400 c0x0000 (---------------) + I brasil - 0x0021ea87, // n0x1401 c0x0000 (---------------) + I bristol - 0x0021ee07, // n0x1402 c0x0000 (---------------) + I british - 0x0021ee0f, // n0x1403 c0x0000 (---------------) + I britishcolumbia - 0x00220409, // n0x1404 c0x0000 (---------------) + I broadcast - 0x00222dc6, // n0x1405 c0x0000 (---------------) + I brunel - 0x002255c7, // n0x1406 c0x0000 (---------------) + I brussel - 0x002255c8, // n0x1407 c0x0000 (---------------) + I brussels - 0x00226489, // n0x1408 c0x0000 (---------------) + I bruxelles - 0x00290508, // n0x1409 c0x0000 (---------------) + I building - 0x002d9207, // n0x140a c0x0000 (---------------) + I burghof - 0x0020b103, // n0x140b c0x0000 (---------------) + I bus - 0x00234686, // n0x140c c0x0000 (---------------) + I bushey - 0x00200e08, // n0x140d c0x0000 (---------------) + I cadaques - 0x0029ea0a, // n0x140e c0x0000 (---------------) + I california - 0x00221d89, // n0x140f c0x0000 (---------------) + I cambridge - 0x0020ad83, // n0x1410 c0x0000 (---------------) + I can - 0x003292c6, // n0x1411 c0x0000 (---------------) + I canada - 0x002b050a, // n0x1412 c0x0000 (---------------) + I capebreton - 0x00364447, // n0x1413 c0x0000 (---------------) + I carrier - 0x0020a14a, // n0x1414 c0x0000 (---------------) + I cartoonart - 0x002139ce, // n0x1415 c0x0000 (---------------) + I casadelamoneda - 0x00220546, // n0x1416 c0x0000 (---------------) + I castle - 0x002a6747, // n0x1417 c0x0000 (---------------) + I castres - 0x0020e906, // n0x1418 c0x0000 (---------------) + I celtic - 0x00206f06, // n0x1419 c0x0000 (---------------) + I center - 0x00371b8b, // n0x141a c0x0000 (---------------) + I chattanooga - 0x0026284a, // n0x141b c0x0000 (---------------) + I cheltenham - 0x00378acd, // n0x141c c0x0000 (---------------) + I chesapeakebay - 0x00211407, // n0x141d c0x0000 (---------------) + I chicago - 0x00273948, // n0x141e c0x0000 (---------------) + I children - 0x00273949, // n0x141f c0x0000 (---------------) + I childrens - 0x0027394f, // n0x1420 c0x0000 (---------------) + I childrensgarden - 0x0023340c, // n0x1421 c0x0000 (---------------) + I chiropractic - 0x002b9609, // n0x1422 c0x0000 (---------------) + I chocolate - 0x00375e0e, // n0x1423 c0x0000 (---------------) + I christiansburg - 0x0023288a, // n0x1424 c0x0000 (---------------) + I cincinnati - 0x002d3b46, // n0x1425 c0x0000 (---------------) + I cinema - 0x0033fb06, // n0x1426 c0x0000 (---------------) + I circus - 0x00362ccc, // n0x1427 c0x0000 (---------------) + I civilisation - 0x0036658c, // n0x1428 c0x0000 (---------------) + I civilization - 0x0036c188, // n0x1429 c0x0000 (---------------) + I civilwar - 0x00385587, // n0x142a c0x0000 (---------------) + I clinton - 0x002afa85, // n0x142b c0x0000 (---------------) + I clock - 0x0038d684, // n0x142c c0x0000 (---------------) + I coal - 0x00382a4e, // n0x142d c0x0000 (---------------) + I coastaldefence - 0x00325e84, // n0x142e c0x0000 (---------------) + I cody - 0x002315c7, // n0x142f c0x0000 (---------------) + I coldwar - 0x00261d8a, // n0x1430 c0x0000 (---------------) + I collection - 0x002339d4, // n0x1431 c0x0000 (---------------) + I colonialwilliamsburg - 0x002340cf, // n0x1432 c0x0000 (---------------) + I coloradoplateau - 0x0021efc8, // n0x1433 c0x0000 (---------------) + I columbia - 0x00234548, // n0x1434 c0x0000 (---------------) + I columbus - 0x0035eb0d, // n0x1435 c0x0000 (---------------) + I communication - 0x0035eb0e, // n0x1436 c0x0000 (---------------) + I communications - 0x00234809, // n0x1437 c0x0000 (---------------) + I community - 0x00236508, // n0x1438 c0x0000 (---------------) + I computer - 0x0023650f, // n0x1439 c0x0000 (---------------) + I computerhistory - 0x00239e0c, // n0x143a c0x0000 (---------------) + I contemporary - 0x00239e0f, // n0x143b c0x0000 (---------------) + I contemporaryart - 0x0023b187, // n0x143c c0x0000 (---------------) + I convent - 0x0023db8a, // n0x143d c0x0000 (---------------) + I copenhagen - 0x0021c14b, // n0x143e c0x0000 (---------------) + I corporation - 0x00240248, // n0x143f c0x0000 (---------------) + I corvette - 0x00241907, // n0x1440 c0x0000 (---------------) + I costume - 0x00339a0d, // n0x1441 c0x0000 (---------------) + I countryestate - 0x00227446, // n0x1442 c0x0000 (---------------) + I county - 0x0037fd46, // n0x1443 c0x0000 (---------------) + I crafts - 0x00242e49, // n0x1444 c0x0000 (---------------) + I cranbrook - 0x00323748, // n0x1445 c0x0000 (---------------) + I creation - 0x00247448, // n0x1446 c0x0000 (---------------) + I cultural - 0x0024744e, // n0x1447 c0x0000 (---------------) + I culturalcenter - 0x00270087, // n0x1448 c0x0000 (---------------) + I culture - 0x00253bc5, // n0x1449 c0x0000 (---------------) + I cyber - 0x00248ec5, // n0x144a c0x0000 (---------------) + I cymru - 0x00210144, // n0x144b c0x0000 (---------------) + I dali - 0x00278b06, // n0x144c c0x0000 (---------------) + I dallas - 0x0022b848, // n0x144d c0x0000 (---------------) + I database - 0x00314403, // n0x144e c0x0000 (---------------) + I ddr - 0x0025bd8e, // n0x144f c0x0000 (---------------) + I decorativearts - 0x00339dc8, // n0x1450 c0x0000 (---------------) + I delaware - 0x00279a0b, // n0x1451 c0x0000 (---------------) + I delmenhorst - 0x0035f507, // n0x1452 c0x0000 (---------------) + I denmark - 0x002730c5, // n0x1453 c0x0000 (---------------) + I depot - 0x00226286, // n0x1454 c0x0000 (---------------) + I design - 0x002a9dc7, // n0x1455 c0x0000 (---------------) + I detroit - 0x002fb208, // n0x1456 c0x0000 (---------------) + I dinosaur - 0x00227f89, // n0x1457 c0x0000 (---------------) + I discovery - 0x00238485, // n0x1458 c0x0000 (---------------) + I dolls - 0x002873c8, // n0x1459 c0x0000 (---------------) + I donostia - 0x0020b306, // n0x145a c0x0000 (---------------) + I durham - 0x003726ca, // n0x145b c0x0000 (---------------) + I eastafrica - 0x00382949, // n0x145c c0x0000 (---------------) + I eastcoast - 0x0023a1c9, // n0x145d c0x0000 (---------------) + I education - 0x0023a1cb, // n0x145e c0x0000 (---------------) + I educational - 0x0028c1c8, // n0x145f c0x0000 (---------------) + I egyptian - 0x00310c09, // n0x1460 c0x0000 (---------------) + I eisenbahn - 0x00210e06, // n0x1461 c0x0000 (---------------) + I elburg - 0x002e718a, // n0x1462 c0x0000 (---------------) + I elvendrell - 0x0022f88a, // n0x1463 c0x0000 (---------------) + I embroidery - 0x0023dd8c, // n0x1464 c0x0000 (---------------) + I encyclopedic - 0x003961c7, // n0x1465 c0x0000 (---------------) + I england - 0x002d2c8a, // n0x1466 c0x0000 (---------------) + I entomology - 0x0032adcb, // n0x1467 c0x0000 (---------------) + I environment - 0x0032add9, // n0x1468 c0x0000 (---------------) + I environmentalconservation - 0x00337b08, // n0x1469 c0x0000 (---------------) + I epilepsy - 0x00247145, // n0x146a c0x0000 (---------------) + I essex - 0x002c5046, // n0x146b c0x0000 (---------------) + I estate - 0x003160c9, // n0x146c c0x0000 (---------------) + I ethnology - 0x00207dc6, // n0x146d c0x0000 (---------------) + I exeter - 0x0020e38a, // n0x146e c0x0000 (---------------) + I exhibition - 0x0036be46, // n0x146f c0x0000 (---------------) + I family - 0x002c51c4, // n0x1470 c0x0000 (---------------) + I farm - 0x002c51cd, // n0x1471 c0x0000 (---------------) + I farmequipment - 0x00312ec7, // n0x1472 c0x0000 (---------------) + I farmers - 0x0039a289, // n0x1473 c0x0000 (---------------) + I farmstead - 0x00364d85, // n0x1474 c0x0000 (---------------) + I field - 0x00365148, // n0x1475 c0x0000 (---------------) + I figueres - 0x00377a89, // n0x1476 c0x0000 (---------------) + I filatelia - 0x00249d04, // n0x1477 c0x0000 (---------------) + I film - 0x0024a3c7, // n0x1478 c0x0000 (---------------) + I fineart - 0x0024a3c8, // n0x1479 c0x0000 (---------------) + I finearts - 0x0024acc7, // n0x147a c0x0000 (---------------) + I finland - 0x00263948, // n0x147b c0x0000 (---------------) + I flanders - 0x00251107, // n0x147c c0x0000 (---------------) + I florida - 0x0033a705, // n0x147d c0x0000 (---------------) + I force - 0x002585cc, // n0x147e c0x0000 (---------------) + I fortmissoula - 0x00259189, // n0x147f c0x0000 (---------------) + I fortworth - 0x002bb04a, // n0x1480 c0x0000 (---------------) + I foundation - 0x00381bc9, // n0x1481 c0x0000 (---------------) + I francaise - 0x00322189, // n0x1482 c0x0000 (---------------) + I frankfurt - 0x0035d1cc, // n0x1483 c0x0000 (---------------) + I franziskaner - 0x002e95cb, // n0x1484 c0x0000 (---------------) + I freemasonry - 0x0025ac48, // n0x1485 c0x0000 (---------------) + I freiburg - 0x0025c488, // n0x1486 c0x0000 (---------------) + I fribourg - 0x0025f844, // n0x1487 c0x0000 (---------------) + I frog - 0x00283148, // n0x1488 c0x0000 (---------------) + I fundacio - 0x00284849, // n0x1489 c0x0000 (---------------) + I furniture - 0x00391847, // n0x148a c0x0000 (---------------) + I gallery - 0x0021a906, // n0x148b c0x0000 (---------------) + I garden - 0x00245507, // n0x148c c0x0000 (---------------) + I gateway - 0x00228209, // n0x148d c0x0000 (---------------) + I geelvinck - 0x00395e0b, // n0x148e c0x0000 (---------------) + I gemological - 0x00324e07, // n0x148f c0x0000 (---------------) + I geology - 0x0035f147, // n0x1490 c0x0000 (---------------) + I georgia - 0x00278587, // n0x1491 c0x0000 (---------------) + I giessen - 0x00380004, // n0x1492 c0x0000 (---------------) + I glas - 0x00380005, // n0x1493 c0x0000 (---------------) + I glass - 0x002a7ec5, // n0x1494 c0x0000 (---------------) + I gorge - 0x0033620b, // n0x1495 c0x0000 (---------------) + I grandrapids - 0x0038f044, // n0x1496 c0x0000 (---------------) + I graz - 0x00356888, // n0x1497 c0x0000 (---------------) + I guernsey - 0x0028640a, // n0x1498 c0x0000 (---------------) + I halloffame - 0x0020b3c7, // n0x1499 c0x0000 (---------------) + I hamburg - 0x002fba87, // n0x149a c0x0000 (---------------) + I handson - 0x0028ced2, // n0x149b c0x0000 (---------------) + I harvestcelebration - 0x00251fc6, // n0x149c c0x0000 (---------------) + I hawaii - 0x002ad786, // n0x149d c0x0000 (---------------) + I health - 0x0030e00e, // n0x149e c0x0000 (---------------) + I heimatunduhren - 0x00255cc6, // n0x149f c0x0000 (---------------) + I hellas - 0x00203288, // n0x14a0 c0x0000 (---------------) + I helsinki - 0x0029104f, // n0x14a1 c0x0000 (---------------) + I hembygdsforbund - 0x00380448, // n0x14a2 c0x0000 (---------------) + I heritage - 0x0036b608, // n0x14a3 c0x0000 (---------------) + I histoire - 0x002b1b4a, // n0x14a4 c0x0000 (---------------) + I historical - 0x002b1b51, // n0x14a5 c0x0000 (---------------) + I historicalsociety - 0x002a1f8e, // n0x14a6 c0x0000 (---------------) + I historichouses - 0x002559ca, // n0x14a7 c0x0000 (---------------) + I historisch - 0x002559cc, // n0x14a8 c0x0000 (---------------) + I historisches - 0x00236707, // n0x14a9 c0x0000 (---------------) + I history - 0x00236710, // n0x14aa c0x0000 (---------------) + I historyofscience - 0x00202008, // n0x14ab c0x0000 (---------------) + I horology - 0x002a2185, // n0x14ac c0x0000 (---------------) + I house - 0x002aa8ca, // n0x14ad c0x0000 (---------------) + I humanities - 0x0020474c, // n0x14ae c0x0000 (---------------) + I illustration - 0x002b6c8d, // n0x14af c0x0000 (---------------) + I imageandsound - 0x002a0fc6, // n0x14b0 c0x0000 (---------------) + I indian - 0x002a0fc7, // n0x14b1 c0x0000 (---------------) + I indiana - 0x002a0fcc, // n0x14b2 c0x0000 (---------------) + I indianapolis - 0x002edc4c, // n0x14b3 c0x0000 (---------------) + I indianmarket - 0x0030034c, // n0x14b4 c0x0000 (---------------) + I intelligence - 0x0034658b, // n0x14b5 c0x0000 (---------------) + I interactive - 0x00286c04, // n0x14b6 c0x0000 (---------------) + I iraq - 0x0021b2c4, // n0x14b7 c0x0000 (---------------) + I iron - 0x0034d509, // n0x14b8 c0x0000 (---------------) + I isleofman - 0x002cbac7, // n0x14b9 c0x0000 (---------------) + I jamison - 0x00372089, // n0x14ba c0x0000 (---------------) + I jefferson - 0x002800c9, // n0x14bb c0x0000 (---------------) + I jerusalem - 0x0035ff07, // n0x14bc c0x0000 (---------------) + I jewelry - 0x00391606, // n0x14bd c0x0000 (---------------) + I jewish - 0x00391609, // n0x14be c0x0000 (---------------) + I jewishart - 0x00399c83, // n0x14bf c0x0000 (---------------) + I jfk - 0x00279f0a, // n0x14c0 c0x0000 (---------------) + I journalism - 0x0038d8c7, // n0x14c1 c0x0000 (---------------) + I judaica - 0x0027628b, // n0x14c2 c0x0000 (---------------) + I judygarland - 0x0037894a, // n0x14c3 c0x0000 (---------------) + I juedisches - 0x00234e04, // n0x14c4 c0x0000 (---------------) + I juif - 0x00353686, // n0x14c5 c0x0000 (---------------) + I karate - 0x0027f0c9, // n0x14c6 c0x0000 (---------------) + I karikatur - 0x0028c604, // n0x14c7 c0x0000 (---------------) + I kids - 0x003a3b4a, // n0x14c8 c0x0000 (---------------) + I koebenhavn - 0x00225e45, // n0x14c9 c0x0000 (---------------) + I koeln - 0x002b7985, // n0x14ca c0x0000 (---------------) + I kunst - 0x002b798d, // n0x14cb c0x0000 (---------------) + I kunstsammlung - 0x002b7cce, // n0x14cc c0x0000 (---------------) + I kunstunddesign - 0x0031c685, // n0x14cd c0x0000 (---------------) + I labor - 0x003872c6, // n0x14ce c0x0000 (---------------) + I labour - 0x00246cc7, // n0x14cf c0x0000 (---------------) + I lajolla - 0x002cdb0a, // n0x14d0 c0x0000 (---------------) + I lancashire - 0x00326186, // n0x14d1 c0x0000 (---------------) + I landes - 0x0035acc4, // n0x14d2 c0x0000 (---------------) + I lans - 0x002da3c7, // n0x14d3 c0x0000 (---------------) + I larsson - 0x0021188b, // n0x14d4 c0x0000 (---------------) + I lewismiller - 0x00356547, // n0x14d5 c0x0000 (---------------) + I lincoln - 0x00204e44, // n0x14d6 c0x0000 (---------------) + I linz - 0x002df106, // n0x14d7 c0x0000 (---------------) + I living - 0x002df10d, // n0x14d8 c0x0000 (---------------) + I livinghistory - 0x00321d0c, // n0x14d9 c0x0000 (---------------) + I localhistory - 0x0030d246, // n0x14da c0x0000 (---------------) + I london - 0x0030afca, // n0x14db c0x0000 (---------------) + I losangeles - 0x0022d3c6, // n0x14dc c0x0000 (---------------) + I louvre - 0x002994c8, // n0x14dd c0x0000 (---------------) + I loyalist - 0x002e8387, // n0x14de c0x0000 (---------------) + I lucerne - 0x0023944a, // n0x14df c0x0000 (---------------) + I luxembourg - 0x002400c6, // n0x14e0 c0x0000 (---------------) + I luzern - 0x00211303, // n0x14e1 c0x0000 (---------------) + I mad - 0x0031e246, // n0x14e2 c0x0000 (---------------) + I madrid - 0x00200c88, // n0x14e3 c0x0000 (---------------) + I mallorca - 0x0029a04a, // n0x14e4 c0x0000 (---------------) + I manchester - 0x0024fe87, // n0x14e5 c0x0000 (---------------) + I mansion - 0x0024fe88, // n0x14e6 c0x0000 (---------------) + I mansions - 0x00265f44, // n0x14e7 c0x0000 (---------------) + I manx - 0x00277b87, // n0x14e8 c0x0000 (---------------) + I marburg - 0x00215e08, // n0x14e9 c0x0000 (---------------) + I maritime - 0x002a2e48, // n0x14ea c0x0000 (---------------) + I maritimo - 0x002521c8, // n0x14eb c0x0000 (---------------) + I maryland - 0x0027fd4a, // n0x14ec c0x0000 (---------------) + I marylhurst - 0x00302485, // n0x14ed c0x0000 (---------------) + I media - 0x0023a6c7, // n0x14ee c0x0000 (---------------) + I medical - 0x00255813, // n0x14ef c0x0000 (---------------) + I medizinhistorisches - 0x00257906, // n0x14f0 c0x0000 (---------------) + I meeres - 0x0027c8c8, // n0x14f1 c0x0000 (---------------) + I memorial - 0x00222489, // n0x14f2 c0x0000 (---------------) + I mesaverde - 0x002155c8, // n0x14f3 c0x0000 (---------------) + I michigan - 0x002165cb, // n0x14f4 c0x0000 (---------------) + I midatlantic - 0x002ba588, // n0x14f5 c0x0000 (---------------) + I military - 0x002119c4, // n0x14f6 c0x0000 (---------------) + I mill - 0x0030ca46, // n0x14f7 c0x0000 (---------------) + I miners - 0x003a4fc6, // n0x14f8 c0x0000 (---------------) + I mining - 0x002fd949, // n0x14f9 c0x0000 (---------------) + I minnesota - 0x002c1a07, // n0x14fa c0x0000 (---------------) + I missile - 0x002586c8, // n0x14fb c0x0000 (---------------) + I missoula - 0x003a0b86, // n0x14fc c0x0000 (---------------) + I modern - 0x0022e144, // n0x14fd c0x0000 (---------------) + I moma - 0x002c9605, // n0x14fe c0x0000 (---------------) + I money - 0x002c4448, // n0x14ff c0x0000 (---------------) + I monmouth - 0x002c4b8a, // n0x1500 c0x0000 (---------------) + I monticello - 0x002c4e48, // n0x1501 c0x0000 (---------------) + I montreal - 0x002c9d46, // n0x1502 c0x0000 (---------------) + I moscow - 0x0029a74a, // n0x1503 c0x0000 (---------------) + I motorcycle - 0x002e8fc8, // n0x1504 c0x0000 (---------------) + I muenchen - 0x002cc108, // n0x1505 c0x0000 (---------------) + I muenster - 0x002cd488, // n0x1506 c0x0000 (---------------) + I mulhouse - 0x002ce3c6, // n0x1507 c0x0000 (---------------) + I muncie - 0x002d1306, // n0x1508 c0x0000 (---------------) + I museet - 0x0031118c, // n0x1509 c0x0000 (---------------) + I museumcenter - 0x002d1810, // n0x150a c0x0000 (---------------) + I museumvereniging - 0x00280605, // n0x150b c0x0000 (---------------) + I music - 0x00320648, // n0x150c c0x0000 (---------------) + I national - 0x00320650, // n0x150d c0x0000 (---------------) + I nationalfirearms - 0x00380250, // n0x150e c0x0000 (---------------) + I nationalheritage - 0x002707ce, // n0x150f c0x0000 (---------------) + I nativeamerican - 0x00310e0e, // n0x1510 c0x0000 (---------------) + I naturalhistory - 0x00310e14, // n0x1511 c0x0000 (---------------) + I naturalhistorymuseum - 0x0039b1cf, // n0x1512 c0x0000 (---------------) + I naturalsciences - 0x0039b586, // n0x1513 c0x0000 (---------------) + I nature - 0x002e3cd1, // n0x1514 c0x0000 (---------------) + I naturhistorisches - 0x0030f453, // n0x1515 c0x0000 (---------------) + I natuurwetenschappen - 0x0030f8c8, // n0x1516 c0x0000 (---------------) + I naumburg - 0x00227685, // n0x1517 c0x0000 (---------------) + I naval - 0x0024c508, // n0x1518 c0x0000 (---------------) + I nebraska - 0x002c4045, // n0x1519 c0x0000 (---------------) + I neues - 0x002296cc, // n0x151a c0x0000 (---------------) + I newhampshire - 0x003673c9, // n0x151b c0x0000 (---------------) + I newjersey - 0x00231409, // n0x151c c0x0000 (---------------) + I newmexico - 0x00245287, // n0x151d c0x0000 (---------------) + I newport - 0x00222089, // n0x151e c0x0000 (---------------) + I newspaper - 0x00313107, // n0x151f c0x0000 (---------------) + I newyork - 0x0028ef46, // n0x1520 c0x0000 (---------------) + I niepce - 0x00355cc7, // n0x1521 c0x0000 (---------------) + I norfolk - 0x00233805, // n0x1522 c0x0000 (---------------) + I north - 0x002dbb03, // n0x1523 c0x0000 (---------------) + I nrw - 0x003145c9, // n0x1524 c0x0000 (---------------) + I nuernberg - 0x00378e89, // n0x1525 c0x0000 (---------------) + I nuremberg - 0x0036c103, // n0x1526 c0x0000 (---------------) + I nyc - 0x0039f944, // n0x1527 c0x0000 (---------------) + I nyny - 0x0030ce8d, // n0x1528 c0x0000 (---------------) + I oceanographic - 0x00398b0f, // n0x1529 c0x0000 (---------------) + I oceanographique - 0x002fc485, // n0x152a c0x0000 (---------------) + I omaha - 0x0031e6c6, // n0x152b c0x0000 (---------------) + I online - 0x00200887, // n0x152c c0x0000 (---------------) + I ontario - 0x00341d87, // n0x152d c0x0000 (---------------) + I openair - 0x00289886, // n0x152e c0x0000 (---------------) + I oregon - 0x0028988b, // n0x152f c0x0000 (---------------) + I oregontrail - 0x002a33c5, // n0x1530 c0x0000 (---------------) + I otago - 0x0039da86, // n0x1531 c0x0000 (---------------) + I oxford - 0x00390047, // n0x1532 c0x0000 (---------------) + I pacific - 0x0026cc49, // n0x1533 c0x0000 (---------------) + I paderborn - 0x00328706, // n0x1534 c0x0000 (---------------) + I palace - 0x00209e45, // n0x1535 c0x0000 (---------------) + I paleo - 0x0030538b, // n0x1536 c0x0000 (---------------) + I palmsprings - 0x0024b286, // n0x1537 c0x0000 (---------------) + I panama - 0x0026a205, // n0x1538 c0x0000 (---------------) + I paris - 0x002b7408, // n0x1539 c0x0000 (---------------) + I pasadena - 0x0034e788, // n0x153a c0x0000 (---------------) + I pharmacy - 0x002d614c, // n0x153b c0x0000 (---------------) + I philadelphia - 0x002d6150, // n0x153c c0x0000 (---------------) + I philadelphiaarea - 0x002d6809, // n0x153d c0x0000 (---------------) + I philately - 0x002d6c47, // n0x153e c0x0000 (---------------) + I phoenix - 0x002d70cb, // n0x153f c0x0000 (---------------) + I photography - 0x002d8446, // n0x1540 c0x0000 (---------------) + I pilots - 0x002d90ca, // n0x1541 c0x0000 (---------------) + I pittsburgh - 0x002d9b8b, // n0x1542 c0x0000 (---------------) + I planetarium - 0x002d9f8a, // n0x1543 c0x0000 (---------------) + I plantation - 0x002da206, // n0x1544 c0x0000 (---------------) + I plants - 0x002db085, // n0x1545 c0x0000 (---------------) + I plaza - 0x00312546, // n0x1546 c0x0000 (---------------) + I portal - 0x00278108, // n0x1547 c0x0000 (---------------) + I portland - 0x0024534a, // n0x1548 c0x0000 (---------------) + I portlligat - 0x0035e79c, // n0x1549 c0x0000 (---------------) + I posts-and-telecommunications - 0x002e148c, // n0x154a c0x0000 (---------------) + I preservation - 0x002e1788, // n0x154b c0x0000 (---------------) + I presidio - 0x002470c5, // n0x154c c0x0000 (---------------) + I press - 0x002e5347, // n0x154d c0x0000 (---------------) + I project - 0x0029e386, // n0x154e c0x0000 (---------------) + I public - 0x00389ec5, // n0x154f c0x0000 (---------------) + I pubol - 0x0021b6c6, // n0x1550 c0x0000 (---------------) + I quebec - 0x00289a48, // n0x1551 c0x0000 (---------------) + I railroad - 0x002b5a47, // n0x1552 c0x0000 (---------------) + I railway - 0x0029e648, // n0x1553 c0x0000 (---------------) + I research - 0x002a684a, // n0x1554 c0x0000 (---------------) + I resistance - 0x0030738c, // n0x1555 c0x0000 (---------------) + I riodejaneiro - 0x00307609, // n0x1556 c0x0000 (---------------) + I rochester - 0x0039dd87, // n0x1557 c0x0000 (---------------) + I rockart - 0x0022a1c4, // n0x1558 c0x0000 (---------------) + I roma - 0x00251746, // n0x1559 c0x0000 (---------------) + I russia - 0x0036b18a, // n0x155a c0x0000 (---------------) + I saintlouis - 0x002801c5, // n0x155b c0x0000 (---------------) + I salem - 0x00327e0c, // n0x155c c0x0000 (---------------) + I salvadordali - 0x00347588, // n0x155d c0x0000 (---------------) + I salzburg - 0x0023cfc8, // n0x155e c0x0000 (---------------) + I sandiego - 0x0038b04c, // n0x155f c0x0000 (---------------) + I sanfrancisco - 0x0020dbcc, // n0x1560 c0x0000 (---------------) + I santabarbara - 0x0020df49, // n0x1561 c0x0000 (---------------) + I santacruz - 0x0020e187, // n0x1562 c0x0000 (---------------) + I santafe - 0x0023ec4c, // n0x1563 c0x0000 (---------------) + I saskatchewan - 0x002db744, // n0x1564 c0x0000 (---------------) + I satx - 0x0037500a, // n0x1565 c0x0000 (---------------) + I savannahga - 0x0038c24c, // n0x1566 c0x0000 (---------------) + I schlesisches - 0x0026dacb, // n0x1567 c0x0000 (---------------) + I schoenbrunn - 0x0039c14b, // n0x1568 c0x0000 (---------------) + I schokoladen - 0x0022ec46, // n0x1569 c0x0000 (---------------) + I school - 0x00236007, // n0x156a c0x0000 (---------------) + I schweiz - 0x00236947, // n0x156b c0x0000 (---------------) + I science - 0x0023694f, // n0x156c c0x0000 (---------------) + I science-fiction - 0x002ee591, // n0x156d c0x0000 (---------------) + I scienceandhistory - 0x003a3612, // n0x156e c0x0000 (---------------) + I scienceandindustry - 0x0023858d, // n0x156f c0x0000 (---------------) + I sciencecenter - 0x0023858e, // n0x1570 c0x0000 (---------------) + I sciencecenters - 0x002388ce, // n0x1571 c0x0000 (---------------) + I sciencehistory - 0x0039b388, // n0x1572 c0x0000 (---------------) + I sciences - 0x0039b392, // n0x1573 c0x0000 (---------------) + I sciencesnaturelles - 0x0038b288, // n0x1574 c0x0000 (---------------) + I scotland - 0x002faf47, // n0x1575 c0x0000 (---------------) + I seaport - 0x0024d8ca, // n0x1576 c0x0000 (---------------) + I settlement - 0x0020f788, // n0x1577 c0x0000 (---------------) + I settlers - 0x00255c85, // n0x1578 c0x0000 (---------------) + I shell - 0x002ecd4a, // n0x1579 c0x0000 (---------------) + I sherbrooke - 0x0021e887, // n0x157a c0x0000 (---------------) + I sibenik - 0x00357404, // n0x157b c0x0000 (---------------) + I silk - 0x002302c3, // n0x157c c0x0000 (---------------) + I ski - 0x00296805, // n0x157d c0x0000 (---------------) + I skole - 0x002b1dc7, // n0x157e c0x0000 (---------------) + I society - 0x002e1207, // n0x157f c0x0000 (---------------) + I sologne - 0x002b6e8e, // n0x1580 c0x0000 (---------------) + I soundandvision - 0x00301c0d, // n0x1581 c0x0000 (---------------) + I southcarolina - 0x00305a09, // n0x1582 c0x0000 (---------------) + I southwest - 0x00206605, // n0x1583 c0x0000 (---------------) + I space - 0x00334903, // n0x1584 c0x0000 (---------------) + I spy - 0x0038c506, // n0x1585 c0x0000 (---------------) + I square - 0x00363505, // n0x1586 c0x0000 (---------------) + I stadt - 0x00279c48, // n0x1587 c0x0000 (---------------) + I stalbans - 0x00326bc9, // n0x1588 c0x0000 (---------------) + I starnberg - 0x002038c5, // n0x1589 c0x0000 (---------------) + I state - 0x00339c0f, // n0x158a c0x0000 (---------------) + I stateofdelaware - 0x002daec7, // n0x158b c0x0000 (---------------) + I station - 0x00365ec5, // n0x158c c0x0000 (---------------) + I steam - 0x002d4dca, // n0x158d c0x0000 (---------------) + I steiermark - 0x002d5386, // n0x158e c0x0000 (---------------) + I stjohn - 0x00299649, // n0x158f c0x0000 (---------------) + I stockholm - 0x0039080c, // n0x1590 c0x0000 (---------------) + I stpetersburg - 0x002ea249, // n0x1591 c0x0000 (---------------) + I stuttgart - 0x00202246, // n0x1592 c0x0000 (---------------) + I suisse - 0x0028620c, // n0x1593 c0x0000 (---------------) + I surgeonshall - 0x002eaa86, // n0x1594 c0x0000 (---------------) + I surrey - 0x002ed0c8, // n0x1595 c0x0000 (---------------) + I svizzera - 0x002ed2c6, // n0x1596 c0x0000 (---------------) + I sweden - 0x00337c86, // n0x1597 c0x0000 (---------------) + I sydney - 0x0025b504, // n0x1598 c0x0000 (---------------) + I tank - 0x0025afc3, // n0x1599 c0x0000 (---------------) + I tcm - 0x002ab20a, // n0x159a c0x0000 (---------------) + I technology - 0x0022a4d1, // n0x159b c0x0000 (---------------) + I telekommunikation - 0x002b97ca, // n0x159c c0x0000 (---------------) + I television - 0x0034e345, // n0x159d c0x0000 (---------------) + I texas - 0x00325747, // n0x159e c0x0000 (---------------) + I textile - 0x00256087, // n0x159f c0x0000 (---------------) + I theater - 0x00215f04, // n0x15a0 c0x0000 (---------------) + I time - 0x00215f0b, // n0x15a1 c0x0000 (---------------) + I timekeeping - 0x002042c8, // n0x15a2 c0x0000 (---------------) + I topology - 0x002b3ec6, // n0x15a3 c0x0000 (---------------) + I torino - 0x002dbe85, // n0x15a4 c0x0000 (---------------) + I touch - 0x002d7904, // n0x15a5 c0x0000 (---------------) + I town - 0x00294349, // n0x15a6 c0x0000 (---------------) + I transport - 0x0038e3c4, // n0x15a7 c0x0000 (---------------) + I tree - 0x00342247, // n0x15a8 c0x0000 (---------------) + I trolley - 0x0032cec5, // n0x15a9 c0x0000 (---------------) + I trust - 0x0032cec7, // n0x15aa c0x0000 (---------------) + I trustee - 0x0030e245, // n0x15ab c0x0000 (---------------) + I uhren - 0x0034cf03, // n0x15ac c0x0000 (---------------) + I ulm - 0x002fae08, // n0x15ad c0x0000 (---------------) + I undersea - 0x0030c34a, // n0x15ae c0x0000 (---------------) + I university - 0x00244a43, // n0x15af c0x0000 (---------------) + I usa - 0x002b898a, // n0x15b0 c0x0000 (---------------) + I usantiques - 0x0028ea06, // n0x15b1 c0x0000 (---------------) + I usarts - 0x0033998f, // n0x15b2 c0x0000 (---------------) + I uscountryestate - 0x0033fc09, // n0x15b3 c0x0000 (---------------) + I usculture - 0x0025bd10, // n0x15b4 c0x0000 (---------------) + I usdecorativearts - 0x00269a88, // n0x15b5 c0x0000 (---------------) + I usgarden - 0x002ca5c9, // n0x15b6 c0x0000 (---------------) + I ushistory - 0x0029c447, // n0x15b7 c0x0000 (---------------) + I ushuaia - 0x002df08f, // n0x15b8 c0x0000 (---------------) + I uslivinghistory - 0x002e9d84, // n0x15b9 c0x0000 (---------------) + I utah - 0x0023f004, // n0x15ba c0x0000 (---------------) + I uvic - 0x00215106, // n0x15bb c0x0000 (---------------) + I valley - 0x00237bc6, // n0x15bc c0x0000 (---------------) + I vantaa - 0x0031bf0a, // n0x15bd c0x0000 (---------------) + I versailles - 0x0033b586, // n0x15be c0x0000 (---------------) + I viking - 0x002eee07, // n0x15bf c0x0000 (---------------) + I village - 0x002f7748, // n0x15c0 c0x0000 (---------------) + I virginia - 0x002f7947, // n0x15c1 c0x0000 (---------------) + I virtual - 0x002f7b07, // n0x15c2 c0x0000 (---------------) + I virtuel - 0x00348aca, // n0x15c3 c0x0000 (---------------) + I vlaanderen - 0x002fac4b, // n0x15c4 c0x0000 (---------------) + I volkenkunde - 0x0030bfc5, // n0x15c5 c0x0000 (---------------) + I wales - 0x003a3248, // n0x15c6 c0x0000 (---------------) + I wallonie - 0x002316c3, // n0x15c7 c0x0000 (---------------) + I war - 0x0023f7cc, // n0x15c8 c0x0000 (---------------) + I washingtondc - 0x003735cf, // n0x15c9 c0x0000 (---------------) + I watch-and-clock - 0x002af88d, // n0x15ca c0x0000 (---------------) + I watchandclock - 0x0023d707, // n0x15cb c0x0000 (---------------) + I western - 0x00305b49, // n0x15cc c0x0000 (---------------) + I westfalen - 0x002dbb87, // n0x15cd c0x0000 (---------------) + I whaling - 0x003390c8, // n0x15ce c0x0000 (---------------) + I wildlife - 0x00233bcc, // n0x15cf c0x0000 (---------------) + I williamsburg - 0x00284648, // n0x15d0 c0x0000 (---------------) + I windmill - 0x0033ad48, // n0x15d1 c0x0000 (---------------) + I workshop - 0x00315d4e, // n0x15d2 c0x0000 (---------------) + I xn--9dbhblg6di - 0x00323954, // n0x15d3 c0x0000 (---------------) + I xn--comunicaes-v6a2o - 0x00323e64, // n0x15d4 c0x0000 (---------------) + I xn--correios-e-telecomunicaes-ghc29a - 0x0033f2ca, // n0x15d5 c0x0000 (---------------) + I xn--h1aegh - 0x00357d0b, // n0x15d6 c0x0000 (---------------) + I xn--lns-qla - 0x003131c4, // n0x15d7 c0x0000 (---------------) + I york - 0x003131c9, // n0x15d8 c0x0000 (---------------) + I yorkshire - 0x002f21c8, // n0x15d9 c0x0000 (---------------) + I yosemite - 0x00249385, // n0x15da c0x0000 (---------------) + I youth - 0x00313f0a, // n0x15db c0x0000 (---------------) + I zoological - 0x0038c6c7, // n0x15dc c0x0000 (---------------) + I zoology - 0x00232584, // n0x15dd c0x0000 (---------------) + I aero - 0x0032bd03, // n0x15de c0x0000 (---------------) + I biz - 0x00234803, // n0x15df c0x0000 (---------------) + I com - 0x0023d404, // n0x15e0 c0x0000 (---------------) + I coop - 0x0023a1c3, // n0x15e1 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x15e2 c0x0000 (---------------) + I gov - 0x00201804, // n0x15e3 c0x0000 (---------------) + I info - 0x002014c3, // n0x15e4 c0x0000 (---------------) + I int - 0x002119c3, // n0x15e5 c0x0000 (---------------) + I mil - 0x002d1806, // n0x15e6 c0x0000 (---------------) + I museum - 0x00207d04, // n0x15e7 c0x0000 (---------------) + I name - 0x002207c3, // n0x15e8 c0x0000 (---------------) + I net - 0x00225403, // n0x15e9 c0x0000 (---------------) + I org - 0x002210c3, // n0x15ea c0x0000 (---------------) + I pro - 0x00200342, // n0x15eb c0x0000 (---------------) + I ac - 0x0032bd03, // n0x15ec c0x0000 (---------------) + I biz - 0x0020a442, // n0x15ed c0x0000 (---------------) + I co - 0x00234803, // n0x15ee c0x0000 (---------------) + I com - 0x0023d404, // n0x15ef c0x0000 (---------------) + I coop - 0x0023a1c3, // n0x15f0 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x15f1 c0x0000 (---------------) + I gov - 0x002014c3, // n0x15f2 c0x0000 (---------------) + I int - 0x002d1806, // n0x15f3 c0x0000 (---------------) + I museum - 0x002207c3, // n0x15f4 c0x0000 (---------------) + I net - 0x00225403, // n0x15f5 c0x0000 (---------------) + I org - 0x000ff148, // n0x15f6 c0x0000 (---------------) + blogspot - 0x00234803, // n0x15f7 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x15f8 c0x0000 (---------------) + I edu - 0x00211543, // n0x15f9 c0x0000 (---------------) + I gob - 0x002207c3, // n0x15fa c0x0000 (---------------) + I net - 0x00225403, // n0x15fb c0x0000 (---------------) + I org - 0x000ff148, // n0x15fc c0x0000 (---------------) + blogspot - 0x00234803, // n0x15fd c0x0000 (---------------) + I com - 0x0023a1c3, // n0x15fe c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x15ff c0x0000 (---------------) + I gov - 0x002119c3, // n0x1600 c0x0000 (---------------) + I mil - 0x00207d04, // n0x1601 c0x0000 (---------------) + I name - 0x002207c3, // n0x1602 c0x0000 (---------------) + I net - 0x00225403, // n0x1603 c0x0000 (---------------) + I org - 0x0062b748, // n0x1604 c0x0001 (---------------) ! I teledata - 0x00200e02, // n0x1605 c0x0000 (---------------) + I ca - 0x0022c882, // n0x1606 c0x0000 (---------------) + I cc - 0x0020a442, // n0x1607 c0x0000 (---------------) + I co - 0x00234803, // n0x1608 c0x0000 (---------------) + I com - 0x002271c2, // n0x1609 c0x0000 (---------------) + I dr - 0x00201282, // n0x160a c0x0000 (---------------) + I in - 0x00201804, // n0x160b c0x0000 (---------------) + I info - 0x00209504, // n0x160c c0x0000 (---------------) + I mobi - 0x00218482, // n0x160d c0x0000 (---------------) + I mx - 0x00207d04, // n0x160e c0x0000 (---------------) + I name - 0x00200d82, // n0x160f c0x0000 (---------------) + I or - 0x00225403, // n0x1610 c0x0000 (---------------) + I org - 0x002210c3, // n0x1611 c0x0000 (---------------) + I pro - 0x0022ec46, // n0x1612 c0x0000 (---------------) + I school - 0x0020cd02, // n0x1613 c0x0000 (---------------) + I tv - 0x00202202, // n0x1614 c0x0000 (---------------) + I us - 0x00206402, // n0x1615 c0x0000 (---------------) + I ws - 0x37e223c3, // n0x1616 c0x00df (n0x1618-n0x1619) o I her - 0x38216a43, // n0x1617 c0x00e0 (n0x1619-n0x161a) o I his - 0x00056906, // n0x1618 c0x0000 (---------------) + forgot - 0x00056906, // n0x1619 c0x0000 (---------------) + forgot - 0x002ceb44, // n0x161a c0x0000 (---------------) + I asso - 0x0011dd0c, // n0x161b c0x0000 (---------------) + at-band-camp - 0x00080f0c, // n0x161c c0x0000 (---------------) + azure-mobile - 0x000c2b8d, // n0x161d c0x0000 (---------------) + azurewebsites - 0x0002d7c7, // n0x161e c0x0000 (---------------) + blogdns - 0x00020bc8, // n0x161f c0x0000 (---------------) + broke-it - 0x001987ca, // n0x1620 c0x0000 (---------------) + buyshouses - 0x38e50345, // n0x1621 c0x00e3 (n0x1650-n0x1651) o I cdn77 - 0x00050349, // n0x1622 c0x0000 (---------------) + cdn77-ssl - 0x00108dc8, // n0x1623 c0x0000 (---------------) + cloudapp - 0x0019ee8a, // n0x1624 c0x0000 (---------------) + cloudfront - 0x0002e64e, // n0x1625 c0x0000 (---------------) + cloudfunctions - 0x001456c8, // n0x1626 c0x0000 (---------------) + dnsalias - 0x0007bbc7, // n0x1627 c0x0000 (---------------) + dnsdojo - 0x0000cb87, // n0x1628 c0x0000 (---------------) + does-it - 0x00168e09, // n0x1629 c0x0000 (---------------) + dontexist - 0x0018bfc7, // n0x162a c0x0000 (---------------) + dsmynas - 0x00197988, // n0x162b c0x0000 (---------------) + dynalias - 0x000e3209, // n0x162c c0x0000 (---------------) + dynathome - 0x000a8ccd, // n0x162d c0x0000 (---------------) + endofinternet - 0x0018be48, // n0x162e c0x0000 (---------------) + familyds - 0x39239c86, // n0x162f c0x00e4 (n0x1651-n0x1653) o I fastly - 0x000610c7, // n0x1630 c0x0000 (---------------) + from-az - 0x00061c47, // n0x1631 c0x0000 (---------------) + from-co - 0x00065a07, // n0x1632 c0x0000 (---------------) + from-la - 0x0006c147, // n0x1633 c0x0000 (---------------) + from-ny - 0x0000b902, // n0x1634 c0x0000 (---------------) + gb - 0x00122447, // n0x1635 c0x0000 (---------------) + gets-it - 0x00062a0c, // n0x1636 c0x0000 (---------------) + ham-radio-op - 0x00147c07, // n0x1637 c0x0000 (---------------) + homeftp - 0x000a4e46, // n0x1638 c0x0000 (---------------) + homeip - 0x000a54c9, // n0x1639 c0x0000 (---------------) + homelinux - 0x000a6ac8, // n0x163a c0x0000 (---------------) + homeunix - 0x0001dc42, // n0x163b c0x0000 (---------------) + hu - 0x00001282, // n0x163c c0x0000 (---------------) + in - 0x00003d4b, // n0x163d c0x0000 (---------------) + in-the-band - 0x0000fa89, // n0x163e c0x0000 (---------------) + is-a-chef - 0x000487c9, // n0x163f c0x0000 (---------------) + is-a-geek - 0x00085d48, // n0x1640 c0x0000 (---------------) + isa-geek - 0x000af2c2, // n0x1641 c0x0000 (---------------) + jp - 0x00151d49, // n0x1642 c0x0000 (---------------) + kicks-ass - 0x0002194d, // n0x1643 c0x0000 (---------------) + office-on-the - 0x000dde07, // n0x1644 c0x0000 (---------------) + podzone - 0x00112b48, // n0x1645 c0x0000 (---------------) + rackmaze - 0x00042a0d, // n0x1646 c0x0000 (---------------) + scrapper-site - 0x00002342, // n0x1647 c0x0000 (---------------) + se - 0x00067646, // n0x1648 c0x0000 (---------------) + selfip - 0x00090988, // n0x1649 c0x0000 (---------------) + sells-it - 0x000cd608, // n0x164a c0x0000 (---------------) + servebbs - 0x000747c8, // n0x164b c0x0000 (---------------) + serveftp - 0x00052548, // n0x164c c0x0000 (---------------) + thruhere - 0x00001ac2, // n0x164d c0x0000 (---------------) + uk - 0x00111746, // n0x164e c0x0000 (---------------) + webhop - 0x00000182, // n0x164f c0x0000 (---------------) + za - 0x000006c1, // n0x1650 c0x0000 (---------------) + r - 0x396e2284, // n0x1651 c0x00e5 (n0x1653-n0x1655) o I prod - 0x39a504c3, // n0x1652 c0x00e6 (n0x1655-n0x1658) o I ssl - 0x000001c1, // n0x1653 c0x0000 (---------------) + a - 0x0000bf46, // n0x1654 c0x0000 (---------------) + global - 0x000001c1, // n0x1655 c0x0000 (---------------) + a - 0x00000001, // n0x1656 c0x0000 (---------------) + b - 0x0000bf46, // n0x1657 c0x0000 (---------------) + global - 0x0024a4c4, // n0x1658 c0x0000 (---------------) + I arts - 0x00234803, // n0x1659 c0x0000 (---------------) + I com - 0x0024bac4, // n0x165a c0x0000 (---------------) + I firm - 0x00201804, // n0x165b c0x0000 (---------------) + I info - 0x002207c3, // n0x165c c0x0000 (---------------) + I net - 0x00222345, // n0x165d c0x0000 (---------------) + I other - 0x00221183, // n0x165e c0x0000 (---------------) + I per - 0x00229943, // n0x165f c0x0000 (---------------) + I rec - 0x00363685, // n0x1660 c0x0000 (---------------) + I store - 0x00221cc3, // n0x1661 c0x0000 (---------------) + I web - 0x3a634803, // n0x1662 c0x00e9 (n0x166b-n0x166c) + I com - 0x0023a1c3, // n0x1663 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1664 c0x0000 (---------------) + I gov - 0x002119c3, // n0x1665 c0x0000 (---------------) + I mil - 0x00209504, // n0x1666 c0x0000 (---------------) + I mobi - 0x00207d04, // n0x1667 c0x0000 (---------------) + I name - 0x002207c3, // n0x1668 c0x0000 (---------------) + I net - 0x00225403, // n0x1669 c0x0000 (---------------) + I org - 0x00217f43, // n0x166a c0x0000 (---------------) + I sch - 0x000ff148, // n0x166b c0x0000 (---------------) + blogspot - 0x00200342, // n0x166c c0x0000 (---------------) + I ac - 0x0032bd03, // n0x166d c0x0000 (---------------) + I biz - 0x0020a442, // n0x166e c0x0000 (---------------) + I co - 0x00234803, // n0x166f c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1670 c0x0000 (---------------) + I edu - 0x00211543, // n0x1671 c0x0000 (---------------) + I gob - 0x00201282, // n0x1672 c0x0000 (---------------) + I in - 0x00201804, // n0x1673 c0x0000 (---------------) + I info - 0x002014c3, // n0x1674 c0x0000 (---------------) + I int - 0x002119c3, // n0x1675 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1676 c0x0000 (---------------) + I net - 0x00201343, // n0x1677 c0x0000 (---------------) + I nom - 0x00225403, // n0x1678 c0x0000 (---------------) + I org - 0x00221cc3, // n0x1679 c0x0000 (---------------) + I web - 0x000ff148, // n0x167a c0x0000 (---------------) + blogspot - 0x00364982, // n0x167b c0x0000 (---------------) + I bv - 0x0000a442, // n0x167c c0x0000 (---------------) + co - 0x3b6231c2, // n0x167d c0x00ed (n0x1953-n0x1954) + I aa - 0x0038da88, // n0x167e c0x0000 (---------------) + I aarborte - 0x00223646, // n0x167f c0x0000 (---------------) + I aejrie - 0x002be006, // n0x1680 c0x0000 (---------------) + I afjord - 0x00222fc7, // n0x1681 c0x0000 (---------------) + I agdenes - 0x3ba034c2, // n0x1682 c0x00ee (n0x1954-n0x1955) + I ah - 0x3bf2de48, // n0x1683 c0x00ef (n0x1955-n0x1956) o I akershus - 0x0038b74a, // n0x1684 c0x0000 (---------------) + I aknoluokta - 0x00260b48, // n0x1685 c0x0000 (---------------) + I akrehamn - 0x00200cc2, // n0x1686 c0x0000 (---------------) + I al - 0x0038d709, // n0x1687 c0x0000 (---------------) + I alaheadju - 0x0030c007, // n0x1688 c0x0000 (---------------) + I alesund - 0x0021a886, // n0x1689 c0x0000 (---------------) + I algard - 0x00207889, // n0x168a c0x0000 (---------------) + I alstahaug - 0x0023a804, // n0x168b c0x0000 (---------------) + I alta - 0x002bea86, // n0x168c c0x0000 (---------------) + I alvdal - 0x002be404, // n0x168d c0x0000 (---------------) + I amli - 0x00277284, // n0x168e c0x0000 (---------------) + I amot - 0x00258209, // n0x168f c0x0000 (---------------) + I andasuolo - 0x002b9086, // n0x1690 c0x0000 (---------------) + I andebu - 0x0022a9c5, // n0x1691 c0x0000 (---------------) + I andoy - 0x00263fc5, // n0x1692 c0x0000 (---------------) + I ardal - 0x00235c07, // n0x1693 c0x0000 (---------------) + I aremark - 0x002baa07, // n0x1694 c0x0000 (---------------) + I arendal - 0x003103c4, // n0x1695 c0x0000 (---------------) + I arna - 0x00223206, // n0x1696 c0x0000 (---------------) + I aseral - 0x002e4c85, // n0x1697 c0x0000 (---------------) + I asker - 0x00268dc5, // n0x1698 c0x0000 (---------------) + I askim - 0x002f36c5, // n0x1699 c0x0000 (---------------) + I askoy - 0x003851c7, // n0x169a c0x0000 (---------------) + I askvoll - 0x0035f345, // n0x169b c0x0000 (---------------) + I asnes - 0x00307c49, // n0x169c c0x0000 (---------------) + I audnedaln - 0x0025b7c5, // n0x169d c0x0000 (---------------) + I aukra - 0x002fb344, // n0x169e c0x0000 (---------------) + I aure - 0x003260c7, // n0x169f c0x0000 (---------------) + I aurland - 0x0026ef0e, // n0x16a0 c0x0000 (---------------) + I aurskog-holand - 0x0039ae09, // n0x16a1 c0x0000 (---------------) + I austevoll - 0x0030dec9, // n0x16a2 c0x0000 (---------------) + I austrheim - 0x0032ac06, // n0x16a3 c0x0000 (---------------) + I averoy - 0x002ebf88, // n0x16a4 c0x0000 (---------------) + I badaddja - 0x002aed0b, // n0x16a5 c0x0000 (---------------) + I bahcavuotna - 0x002d3fcc, // n0x16a6 c0x0000 (---------------) + I bahccavuotna - 0x00265c86, // n0x16a7 c0x0000 (---------------) + I baidar - 0x0036f207, // n0x16a8 c0x0000 (---------------) + I bajddar - 0x00226f85, // n0x16a9 c0x0000 (---------------) + I balat - 0x0022f30a, // n0x16aa c0x0000 (---------------) + I balestrand - 0x00309b49, // n0x16ab c0x0000 (---------------) + I ballangen - 0x0025b149, // n0x16ac c0x0000 (---------------) + I balsfjord - 0x00399e86, // n0x16ad c0x0000 (---------------) + I bamble - 0x002ec505, // n0x16ae c0x0000 (---------------) + I bardu - 0x0027f805, // n0x16af c0x0000 (---------------) + I barum - 0x00354689, // n0x16b0 c0x0000 (---------------) + I batsfjord - 0x002f384b, // n0x16b1 c0x0000 (---------------) + I bearalvahki - 0x0027b886, // n0x16b2 c0x0000 (---------------) + I beardu - 0x00332446, // n0x16b3 c0x0000 (---------------) + I beiarn - 0x0020b844, // n0x16b4 c0x0000 (---------------) + I berg - 0x0028cb86, // n0x16b5 c0x0000 (---------------) + I bergen - 0x00253c48, // n0x16b6 c0x0000 (---------------) + I berlevag - 0x00200b06, // n0x16b7 c0x0000 (---------------) + I bievat - 0x0036a986, // n0x16b8 c0x0000 (---------------) + I bindal - 0x00207f48, // n0x16b9 c0x0000 (---------------) + I birkenes - 0x00208fc7, // n0x16ba c0x0000 (---------------) + I bjarkoy - 0x002092c9, // n0x16bb c0x0000 (---------------) + I bjerkreim - 0x0020a985, // n0x16bc c0x0000 (---------------) + I bjugn - 0x000ff148, // n0x16bd c0x0000 (---------------) + blogspot - 0x0020cb04, // n0x16be c0x0000 (---------------) + I bodo - 0x002f9a44, // n0x16bf c0x0000 (---------------) + I bokn - 0x00212145, // n0x16c0 c0x0000 (---------------) + I bomlo - 0x00388589, // n0x16c1 c0x0000 (---------------) + I bremanger - 0x00221507, // n0x16c2 c0x0000 (---------------) + I bronnoy - 0x0022150b, // n0x16c3 c0x0000 (---------------) + I bronnoysund - 0x002228ca, // n0x16c4 c0x0000 (---------------) + I brumunddal - 0x00226c85, // n0x16c5 c0x0000 (---------------) + I bryne - 0x3c20b102, // n0x16c6 c0x00f0 (n0x1956-n0x1957) + I bu - 0x00378807, // n0x16c7 c0x0000 (---------------) + I budejju - 0x3c629488, // n0x16c8 c0x00f1 (n0x1957-n0x1958) o I buskerud - 0x002ba247, // n0x16c9 c0x0000 (---------------) + I bygland - 0x002b93c5, // n0x16ca c0x0000 (---------------) + I bykle - 0x00321b0a, // n0x16cb c0x0000 (---------------) + I cahcesuolo - 0x0000a442, // n0x16cc c0x0000 (---------------) + co - 0x002b120b, // n0x16cd c0x0000 (---------------) + I davvenjarga - 0x00213cca, // n0x16ce c0x0000 (---------------) + I davvesiida - 0x0039dbc6, // n0x16cf c0x0000 (---------------) + I deatnu - 0x002730c3, // n0x16d0 c0x0000 (---------------) + I dep - 0x00231d8d, // n0x16d1 c0x0000 (---------------) + I dielddanuorri - 0x00306a8c, // n0x16d2 c0x0000 (---------------) + I divtasvuodna - 0x0030a58d, // n0x16d3 c0x0000 (---------------) + I divttasvuotna - 0x0035c505, // n0x16d4 c0x0000 (---------------) + I donna - 0x00262405, // n0x16d5 c0x0000 (---------------) + I dovre - 0x00314447, // n0x16d6 c0x0000 (---------------) + I drammen - 0x00354889, // n0x16d7 c0x0000 (---------------) + I drangedal - 0x0038b646, // n0x16d8 c0x0000 (---------------) + I drobak - 0x0033b405, // n0x16d9 c0x0000 (---------------) + I dyroy - 0x0022c9c8, // n0x16da c0x0000 (---------------) + I egersund - 0x0024c803, // n0x16db c0x0000 (---------------) + I eid - 0x0032d6c8, // n0x16dc c0x0000 (---------------) + I eidfjord - 0x0028ca88, // n0x16dd c0x0000 (---------------) + I eidsberg - 0x002bf987, // n0x16de c0x0000 (---------------) + I eidskog - 0x0024c808, // n0x16df c0x0000 (---------------) + I eidsvoll - 0x002005c9, // n0x16e0 c0x0000 (---------------) + I eigersund - 0x0023bf87, // n0x16e1 c0x0000 (---------------) + I elverum - 0x0026fb07, // n0x16e2 c0x0000 (---------------) + I enebakk - 0x002786c8, // n0x16e3 c0x0000 (---------------) + I engerdal - 0x002f9744, // n0x16e4 c0x0000 (---------------) + I etne - 0x002f9747, // n0x16e5 c0x0000 (---------------) + I etnedal - 0x00246748, // n0x16e6 c0x0000 (---------------) + I evenassi - 0x00203006, // n0x16e7 c0x0000 (---------------) + I evenes - 0x0038accf, // n0x16e8 c0x0000 (---------------) + I evje-og-hornnes - 0x0020fc87, // n0x16e9 c0x0000 (---------------) + I farsund - 0x00245a46, // n0x16ea c0x0000 (---------------) + I fauske - 0x0024bd85, // n0x16eb c0x0000 (---------------) + I fedje - 0x00345f03, // n0x16ec c0x0000 (---------------) + I fet - 0x00345f07, // n0x16ed c0x0000 (---------------) + I fetsund - 0x002353c3, // n0x16ee c0x0000 (---------------) + I fhs - 0x0024ae86, // n0x16ef c0x0000 (---------------) + I finnoy - 0x0024cfc6, // n0x16f0 c0x0000 (---------------) + I fitjar - 0x0024ecc6, // n0x16f1 c0x0000 (---------------) + I fjaler - 0x0028ffc5, // n0x16f2 c0x0000 (---------------) + I fjell - 0x00263943, // n0x16f3 c0x0000 (---------------) + I fla - 0x0037b948, // n0x16f4 c0x0000 (---------------) + I flakstad - 0x00318309, // n0x16f5 c0x0000 (---------------) + I flatanger - 0x00363e8b, // n0x16f6 c0x0000 (---------------) + I flekkefjord - 0x00373c88, // n0x16f7 c0x0000 (---------------) + I flesberg - 0x00250dc5, // n0x16f8 c0x0000 (---------------) + I flora - 0x002518c5, // n0x16f9 c0x0000 (---------------) + I floro - 0x3ca34ec2, // n0x16fa c0x00f2 (n0x1958-n0x1959) + I fm - 0x00355d89, // n0x16fb c0x0000 (---------------) + I folkebibl - 0x00254ac7, // n0x16fc c0x0000 (---------------) + I folldal - 0x0039db05, // n0x16fd c0x0000 (---------------) + I forde - 0x00258107, // n0x16fe c0x0000 (---------------) + I forsand - 0x00259d46, // n0x16ff c0x0000 (---------------) + I fosnes - 0x0035b4c5, // n0x1700 c0x0000 (---------------) + I frana - 0x0036334b, // n0x1701 c0x0000 (---------------) + I fredrikstad - 0x0025ac44, // n0x1702 c0x0000 (---------------) + I frei - 0x00260705, // n0x1703 c0x0000 (---------------) + I frogn - 0x00260847, // n0x1704 c0x0000 (---------------) + I froland - 0x00275846, // n0x1705 c0x0000 (---------------) + I frosta - 0x00275c85, // n0x1706 c0x0000 (---------------) + I froya - 0x00283347, // n0x1707 c0x0000 (---------------) + I fuoisku - 0x002843c7, // n0x1708 c0x0000 (---------------) + I fuossko - 0x0028e9c4, // n0x1709 c0x0000 (---------------) + I fusa - 0x0028a94a, // n0x170a c0x0000 (---------------) + I fylkesbibl - 0x0028ae08, // n0x170b c0x0000 (---------------) + I fyresdal - 0x002d3349, // n0x170c c0x0000 (---------------) + I gaivuotna - 0x0021ec45, // n0x170d c0x0000 (---------------) + I galsa - 0x002b1446, // n0x170e c0x0000 (---------------) + I gamvik - 0x00253e0a, // n0x170f c0x0000 (---------------) + I gangaviika - 0x00263ec6, // n0x1710 c0x0000 (---------------) + I gaular - 0x00268b07, // n0x1711 c0x0000 (---------------) + I gausdal - 0x002d300d, // n0x1712 c0x0000 (---------------) + I giehtavuoatna - 0x0022c249, // n0x1713 c0x0000 (---------------) + I gildeskal - 0x00334505, // n0x1714 c0x0000 (---------------) + I giske - 0x00230e47, // n0x1715 c0x0000 (---------------) + I gjemnes - 0x0030e788, // n0x1716 c0x0000 (---------------) + I gjerdrum - 0x0030fa88, // n0x1717 c0x0000 (---------------) + I gjerstad - 0x00333947, // n0x1718 c0x0000 (---------------) + I gjesdal - 0x00228fc6, // n0x1719 c0x0000 (---------------) + I gjovik - 0x00210f47, // n0x171a c0x0000 (---------------) + I gloppen - 0x0024bcc3, // n0x171b c0x0000 (---------------) + I gol - 0x00336204, // n0x171c c0x0000 (---------------) + I gran - 0x00359505, // n0x171d c0x0000 (---------------) + I grane - 0x00380d07, // n0x171e c0x0000 (---------------) + I granvin - 0x00384bc9, // n0x171f c0x0000 (---------------) + I gratangen - 0x0021b888, // n0x1720 c0x0000 (---------------) + I grimstad - 0x00268a05, // n0x1721 c0x0000 (---------------) + I grong - 0x0033b6c4, // n0x1722 c0x0000 (---------------) + I grue - 0x00239685, // n0x1723 c0x0000 (---------------) + I gulen - 0x0034cc0d, // n0x1724 c0x0000 (---------------) + I guovdageaidnu - 0x002037c2, // n0x1725 c0x0000 (---------------) + I ha - 0x0036b806, // n0x1726 c0x0000 (---------------) + I habmer - 0x00267586, // n0x1727 c0x0000 (---------------) + I hadsel - 0x002a594a, // n0x1728 c0x0000 (---------------) + I hagebostad - 0x0035dd06, // n0x1729 c0x0000 (---------------) + I halden - 0x0036b0c5, // n0x172a c0x0000 (---------------) + I halsa - 0x0026f845, // n0x172b c0x0000 (---------------) + I hamar - 0x0026f847, // n0x172c c0x0000 (---------------) + I hamaroy - 0x0037250c, // n0x172d c0x0000 (---------------) + I hammarfeasta - 0x0027a34a, // n0x172e c0x0000 (---------------) + I hammerfest - 0x0028bc46, // n0x172f c0x0000 (---------------) + I hapmir - 0x002d1205, // n0x1730 c0x0000 (---------------) + I haram - 0x0028c9c6, // n0x1731 c0x0000 (---------------) + I hareid - 0x0028cd07, // n0x1732 c0x0000 (---------------) + I harstad - 0x0028e046, // n0x1733 c0x0000 (---------------) + I hasvik - 0x0028fecc, // n0x1734 c0x0000 (---------------) + I hattfjelldal - 0x002079c9, // n0x1735 c0x0000 (---------------) + I haugesund - 0x3ce37607, // n0x1736 c0x00f3 (n0x1959-n0x195c) o I hedmark - 0x00291405, // n0x1737 c0x0000 (---------------) + I hemne - 0x00291406, // n0x1738 c0x0000 (---------------) + I hemnes - 0x00291a48, // n0x1739 c0x0000 (---------------) + I hemsedal - 0x002b2b45, // n0x173a c0x0000 (---------------) + I herad - 0x002a4205, // n0x173b c0x0000 (---------------) + I hitra - 0x002a4448, // n0x173c c0x0000 (---------------) + I hjartdal - 0x002a464a, // n0x173d c0x0000 (---------------) + I hjelmeland - 0x3d253642, // n0x173e c0x00f4 (n0x195c-n0x195d) + I hl - 0x3d60da82, // n0x173f c0x00f5 (n0x195d-n0x195e) + I hm - 0x00374bc5, // n0x1740 c0x0000 (---------------) + I hobol - 0x002d9303, // n0x1741 c0x0000 (---------------) + I hof - 0x0033b248, // n0x1742 c0x0000 (---------------) + I hokksund - 0x0022ea03, // n0x1743 c0x0000 (---------------) + I hol - 0x002a48c4, // n0x1744 c0x0000 (---------------) + I hole - 0x0029978b, // n0x1745 c0x0000 (---------------) + I holmestrand - 0x002af0c8, // n0x1746 c0x0000 (---------------) + I holtalen - 0x002a75c8, // n0x1747 c0x0000 (---------------) + I honefoss - 0x3db21789, // n0x1748 c0x00f6 (n0x195e-n0x195f) o I hordaland - 0x002a8749, // n0x1749 c0x0000 (---------------) + I hornindal - 0x002a8bc6, // n0x174a c0x0000 (---------------) + I horten - 0x002a9a48, // n0x174b c0x0000 (---------------) + I hoyanger - 0x002a9c49, // n0x174c c0x0000 (---------------) + I hoylandet - 0x002ab486, // n0x174d c0x0000 (---------------) + I hurdal - 0x002ab605, // n0x174e c0x0000 (---------------) + I hurum - 0x003626c6, // n0x174f c0x0000 (---------------) + I hvaler - 0x002abf09, // n0x1750 c0x0000 (---------------) + I hyllestad - 0x00227e07, // n0x1751 c0x0000 (---------------) + I ibestad - 0x00266906, // n0x1752 c0x0000 (---------------) + I idrett - 0x0037a007, // n0x1753 c0x0000 (---------------) + I inderoy - 0x0038b4c7, // n0x1754 c0x0000 (---------------) + I iveland - 0x00287b84, // n0x1755 c0x0000 (---------------) + I ivgu - 0x3de1d3c9, // n0x1756 c0x00f7 (n0x195f-n0x1960) + I jan-mayen - 0x002c8048, // n0x1757 c0x0000 (---------------) + I jessheim - 0x003555c8, // n0x1758 c0x0000 (---------------) + I jevnaker - 0x00233f07, // n0x1759 c0x0000 (---------------) + I jolster - 0x002c3606, // n0x175a c0x0000 (---------------) + I jondal - 0x002fc689, // n0x175b c0x0000 (---------------) + I jorpeland - 0x002bef47, // n0x175c c0x0000 (---------------) + I kafjord - 0x0024dc4a, // n0x175d c0x0000 (---------------) + I karasjohka - 0x002efc08, // n0x175e c0x0000 (---------------) + I karasjok - 0x00228407, // n0x175f c0x0000 (---------------) + I karlsoy - 0x002288c6, // n0x1760 c0x0000 (---------------) + I karmoy - 0x0026304a, // n0x1761 c0x0000 (---------------) + I kautokeino - 0x0027e448, // n0x1762 c0x0000 (---------------) + I kirkenes - 0x00267005, // n0x1763 c0x0000 (---------------) + I klabu - 0x0022df05, // n0x1764 c0x0000 (---------------) + I klepp - 0x002b38c7, // n0x1765 c0x0000 (---------------) + I kommune - 0x00320bc9, // n0x1766 c0x0000 (---------------) + I kongsberg - 0x002cb04b, // n0x1767 c0x0000 (---------------) + I kongsvinger - 0x002d8808, // n0x1768 c0x0000 (---------------) + I kopervik - 0x0025b849, // n0x1769 c0x0000 (---------------) + I kraanghke - 0x0024ef47, // n0x176a c0x0000 (---------------) + I kragero - 0x002b220c, // n0x176b c0x0000 (---------------) + I kristiansand - 0x002b270c, // n0x176c c0x0000 (---------------) + I kristiansund - 0x002b2a0a, // n0x176d c0x0000 (---------------) + I krodsherad - 0x002b2c8c, // n0x176e c0x0000 (---------------) + I krokstadelva - 0x002bdf88, // n0x176f c0x0000 (---------------) + I kvafjord - 0x002be188, // n0x1770 c0x0000 (---------------) + I kvalsund - 0x002be384, // n0x1771 c0x0000 (---------------) + I kvam - 0x002bf109, // n0x1772 c0x0000 (---------------) + I kvanangen - 0x002bf349, // n0x1773 c0x0000 (---------------) + I kvinesdal - 0x002bf58a, // n0x1774 c0x0000 (---------------) + I kvinnherad - 0x002bf809, // n0x1775 c0x0000 (---------------) + I kviteseid - 0x002bfb47, // n0x1776 c0x0000 (---------------) + I kvitsoy - 0x003a54cc, // n0x1777 c0x0000 (---------------) + I laakesvuemie - 0x0033a186, // n0x1778 c0x0000 (---------------) + I lahppi - 0x0024a748, // n0x1779 c0x0000 (---------------) + I langevag - 0x00263f86, // n0x177a c0x0000 (---------------) + I lardal - 0x0037aa06, // n0x177b c0x0000 (---------------) + I larvik - 0x00334407, // n0x177c c0x0000 (---------------) + I lavagis - 0x0039b008, // n0x177d c0x0000 (---------------) + I lavangen - 0x0027124b, // n0x177e c0x0000 (---------------) + I leangaviika - 0x002ba107, // n0x177f c0x0000 (---------------) + I lebesby - 0x00257ec9, // n0x1780 c0x0000 (---------------) + I leikanger - 0x00281189, // n0x1781 c0x0000 (---------------) + I leirfjord - 0x0035b987, // n0x1782 c0x0000 (---------------) + I leirvik - 0x002be644, // n0x1783 c0x0000 (---------------) + I leka - 0x00325887, // n0x1784 c0x0000 (---------------) + I leksvik - 0x00328ac6, // n0x1785 c0x0000 (---------------) + I lenvik - 0x00218106, // n0x1786 c0x0000 (---------------) + I lerdal - 0x0030b185, // n0x1787 c0x0000 (---------------) + I lesja - 0x002d1fc8, // n0x1788 c0x0000 (---------------) + I levanger - 0x002c3f44, // n0x1789 c0x0000 (---------------) + I lier - 0x002c3f46, // n0x178a c0x0000 (---------------) + I lierne - 0x0027a20b, // n0x178b c0x0000 (---------------) + I lillehammer - 0x0032b589, // n0x178c c0x0000 (---------------) + I lillesand - 0x0030dd06, // n0x178d c0x0000 (---------------) + I lindas - 0x00354a89, // n0x178e c0x0000 (---------------) + I lindesnes - 0x00385346, // n0x178f c0x0000 (---------------) + I loabat - 0x002583c8, // n0x1790 c0x0000 (---------------) + I lodingen - 0x002136c3, // n0x1791 c0x0000 (---------------) + I lom - 0x0038ff85, // n0x1792 c0x0000 (---------------) + I loppa - 0x00218249, // n0x1793 c0x0000 (---------------) + I lorenskog - 0x0021e305, // n0x1794 c0x0000 (---------------) + I loten - 0x002e2584, // n0x1795 c0x0000 (---------------) + I lund - 0x002743c6, // n0x1796 c0x0000 (---------------) + I lunner - 0x0022ed85, // n0x1797 c0x0000 (---------------) + I luroy - 0x002ddbc6, // n0x1798 c0x0000 (---------------) + I luster - 0x002fcc47, // n0x1799 c0x0000 (---------------) + I lyngdal - 0x003960c6, // n0x179a c0x0000 (---------------) + I lyngen - 0x00297f0b, // n0x179b c0x0000 (---------------) + I malatvuopmi - 0x002e7087, // n0x179c c0x0000 (---------------) + I malselv - 0x0020cfc6, // n0x179d c0x0000 (---------------) + I malvik - 0x0034d686, // n0x179e c0x0000 (---------------) + I mandal - 0x00235cc6, // n0x179f c0x0000 (---------------) + I marker - 0x00310389, // n0x17a0 c0x0000 (---------------) + I marnardal - 0x00218a4a, // n0x17a1 c0x0000 (---------------) + I masfjorden - 0x00332185, // n0x17a2 c0x0000 (---------------) + I masoy - 0x0021d84d, // n0x17a3 c0x0000 (---------------) + I matta-varjjat - 0x002a4746, // n0x17a4 c0x0000 (---------------) + I meland - 0x00210086, // n0x17a5 c0x0000 (---------------) + I meldal - 0x00286606, // n0x17a6 c0x0000 (---------------) + I melhus - 0x00299445, // n0x17a7 c0x0000 (---------------) + I meloy - 0x0032dd87, // n0x17a8 c0x0000 (---------------) + I meraker - 0x0029d047, // n0x17a9 c0x0000 (---------------) + I midsund - 0x002e804e, // n0x17aa c0x0000 (---------------) + I midtre-gauldal - 0x002119c3, // n0x17ab c0x0000 (---------------) + I mil - 0x002c35c9, // n0x17ac c0x0000 (---------------) + I mjondalen - 0x00232dc9, // n0x17ad c0x0000 (---------------) + I mo-i-rana - 0x0022f707, // n0x17ae c0x0000 (---------------) + I moareke - 0x00267c47, // n0x17af c0x0000 (---------------) + I modalen - 0x002a62c5, // n0x17b0 c0x0000 (---------------) + I modum - 0x00329805, // n0x17b1 c0x0000 (---------------) + I molde - 0x3e26e4cf, // n0x17b2 c0x00f8 (n0x1960-n0x1962) o I more-og-romsdal - 0x002ca807, // n0x17b3 c0x0000 (---------------) + I mosjoen - 0x002ca9c8, // n0x17b4 c0x0000 (---------------) + I moskenes - 0x002cad04, // n0x17b5 c0x0000 (---------------) + I moss - 0x002caf06, // n0x17b6 c0x0000 (---------------) + I mosvik - 0x3e648f42, // n0x17b7 c0x00f9 (n0x1962-n0x1963) + I mr - 0x002ce646, // n0x17b8 c0x0000 (---------------) + I muosat - 0x002d1806, // n0x17b9 c0x0000 (---------------) + I museum - 0x0027158e, // n0x17ba c0x0000 (---------------) + I naamesjevuemie - 0x0032d50a, // n0x17bb c0x0000 (---------------) + I namdalseid - 0x002b7586, // n0x17bc c0x0000 (---------------) + I namsos - 0x0021708a, // n0x17bd c0x0000 (---------------) + I namsskogan - 0x002c5f09, // n0x17be c0x0000 (---------------) + I nannestad - 0x0031ce45, // n0x17bf c0x0000 (---------------) + I naroy - 0x00386c08, // n0x17c0 c0x0000 (---------------) + I narviika - 0x003a15c6, // n0x17c1 c0x0000 (---------------) + I narvik - 0x0032b3c8, // n0x17c2 c0x0000 (---------------) + I naustdal - 0x0039a948, // n0x17c3 c0x0000 (---------------) + I navuotna - 0x00329c4b, // n0x17c4 c0x0000 (---------------) + I nedre-eiker - 0x002230c5, // n0x17c5 c0x0000 (---------------) + I nesna - 0x0035f3c8, // n0x17c6 c0x0000 (---------------) + I nesodden - 0x0020808c, // n0x17c7 c0x0000 (---------------) + I nesoddtangen - 0x002b9287, // n0x17c8 c0x0000 (---------------) + I nesseby - 0x0024d806, // n0x17c9 c0x0000 (---------------) + I nesset - 0x0022d208, // n0x17ca c0x0000 (---------------) + I nissedal - 0x002789c8, // n0x17cb c0x0000 (---------------) + I nittedal - 0x3ea473c2, // n0x17cc c0x00fa (n0x1963-n0x1964) + I nl - 0x002be84b, // n0x17cd c0x0000 (---------------) + I nord-aurdal - 0x00397dc9, // n0x17ce c0x0000 (---------------) + I nord-fron - 0x00345ac9, // n0x17cf c0x0000 (---------------) + I nord-odal - 0x00334287, // n0x17d0 c0x0000 (---------------) + I norddal - 0x00230c48, // n0x17d1 c0x0000 (---------------) + I nordkapp - 0x3ef284c8, // n0x17d2 c0x00fb (n0x1964-n0x1968) o I nordland - 0x0022710b, // n0x17d3 c0x0000 (---------------) + I nordre-land - 0x00285bc9, // n0x17d4 c0x0000 (---------------) + I nordreisa - 0x00211c8d, // n0x17d5 c0x0000 (---------------) + I nore-og-uvdal - 0x00313948, // n0x17d6 c0x0000 (---------------) + I notodden - 0x00330848, // n0x17d7 c0x0000 (---------------) + I notteroy - 0x3f2008c2, // n0x17d8 c0x00fc (n0x1968-n0x1969) + I nt - 0x00200a04, // n0x17d9 c0x0000 (---------------) + I odda - 0x3f60a482, // n0x17da c0x00fd (n0x1969-n0x196a) + I of - 0x002efd86, // n0x17db c0x0000 (---------------) + I oksnes - 0x3fa020c2, // n0x17dc c0x00fe (n0x196a-n0x196b) + I ol - 0x0022e18a, // n0x17dd c0x0000 (---------------) + I omasvuotna - 0x0033aec6, // n0x17de c0x0000 (---------------) + I oppdal - 0x0021dec8, // n0x17df c0x0000 (---------------) + I oppegard - 0x00254e88, // n0x17e0 c0x0000 (---------------) + I orkanger - 0x002ec286, // n0x17e1 c0x0000 (---------------) + I orkdal - 0x0033aa46, // n0x17e2 c0x0000 (---------------) + I orland - 0x002e7546, // n0x17e3 c0x0000 (---------------) + I orskog - 0x00279bc5, // n0x17e4 c0x0000 (---------------) + I orsta - 0x00240ac4, // n0x17e5 c0x0000 (---------------) + I osen - 0x3fec6c84, // n0x17e6 c0x00ff (n0x196b-n0x196c) + I oslo - 0x00336e46, // n0x17e7 c0x0000 (---------------) + I osoyro - 0x002408c7, // n0x17e8 c0x0000 (---------------) + I osteroy - 0x4038a2c7, // n0x17e9 c0x0100 (n0x196c-n0x196d) o I ostfold - 0x002d00cb, // n0x17ea c0x0000 (---------------) + I ostre-toten - 0x0026f2c9, // n0x17eb c0x0000 (---------------) + I overhalla - 0x0026244a, // n0x17ec c0x0000 (---------------) + I ovre-eiker - 0x0031e4c4, // n0x17ed c0x0000 (---------------) + I oyer - 0x0026f988, // n0x17ee c0x0000 (---------------) + I oygarden - 0x002666cd, // n0x17ef c0x0000 (---------------) + I oystre-slidre - 0x002e0489, // n0x17f0 c0x0000 (---------------) + I porsanger - 0x002e06c8, // n0x17f1 c0x0000 (---------------) + I porsangu - 0x002e0949, // n0x17f2 c0x0000 (---------------) + I porsgrunn - 0x002e1e84, // n0x17f3 c0x0000 (---------------) + I priv - 0x00207784, // n0x17f4 c0x0000 (---------------) + I rade - 0x0027ed05, // n0x17f5 c0x0000 (---------------) + I radoy - 0x0027604b, // n0x17f6 c0x0000 (---------------) + I rahkkeravju - 0x002af046, // n0x17f7 c0x0000 (---------------) + I raholt - 0x00336c85, // n0x17f8 c0x0000 (---------------) + I raisa - 0x00356049, // n0x17f9 c0x0000 (---------------) + I rakkestad - 0x002232c8, // n0x17fa c0x0000 (---------------) + I ralingen - 0x00232f04, // n0x17fb c0x0000 (---------------) + I rana - 0x0022f489, // n0x17fc c0x0000 (---------------) + I randaberg - 0x002532c5, // n0x17fd c0x0000 (---------------) + I rauma - 0x002baa48, // n0x17fe c0x0000 (---------------) + I rendalen - 0x002701c7, // n0x17ff c0x0000 (---------------) + I rennebu - 0x0030e2c8, // n0x1800 c0x0000 (---------------) + I rennesoy - 0x0027f2c6, // n0x1801 c0x0000 (---------------) + I rindal - 0x003786c7, // n0x1802 c0x0000 (---------------) + I ringebu - 0x00290e09, // n0x1803 c0x0000 (---------------) + I ringerike - 0x00305509, // n0x1804 c0x0000 (---------------) + I ringsaker - 0x0026a285, // n0x1805 c0x0000 (---------------) + I risor - 0x00232405, // n0x1806 c0x0000 (---------------) + I rissa - 0x40621382, // n0x1807 c0x0101 (n0x196d-n0x196e) + I rl - 0x002fb104, // n0x1808 c0x0000 (---------------) + I roan - 0x0029cac5, // n0x1809 c0x0000 (---------------) + I rodoy - 0x002d2a46, // n0x180a c0x0000 (---------------) + I rollag - 0x0031f645, // n0x180b c0x0000 (---------------) + I romsa - 0x00251987, // n0x180c c0x0000 (---------------) + I romskog - 0x00296705, // n0x180d c0x0000 (---------------) + I roros - 0x00275884, // n0x180e c0x0000 (---------------) + I rost - 0x0032acc6, // n0x180f c0x0000 (---------------) + I royken - 0x0033b487, // n0x1810 c0x0000 (---------------) + I royrvik - 0x00248f86, // n0x1811 c0x0000 (---------------) + I ruovat - 0x00228145, // n0x1812 c0x0000 (---------------) + I rygge - 0x0030cb88, // n0x1813 c0x0000 (---------------) + I salangen - 0x00223845, // n0x1814 c0x0000 (---------------) + I salat - 0x0030db87, // n0x1815 c0x0000 (---------------) + I saltdal - 0x00354c89, // n0x1816 c0x0000 (---------------) + I samnanger - 0x0032b6ca, // n0x1817 c0x0000 (---------------) + I sandefjord - 0x0034c0c7, // n0x1818 c0x0000 (---------------) + I sandnes - 0x0034c0cc, // n0x1819 c0x0000 (---------------) + I sandnessjoen - 0x0022a986, // n0x181a c0x0000 (---------------) + I sandoy - 0x00225289, // n0x181b c0x0000 (---------------) + I sarpsborg - 0x003a2b05, // n0x181c c0x0000 (---------------) + I sauda - 0x00352848, // n0x181d c0x0000 (---------------) + I sauherad - 0x00210dc3, // n0x181e c0x0000 (---------------) + I sel - 0x00210dc5, // n0x181f c0x0000 (---------------) + I selbu - 0x003379c5, // n0x1820 c0x0000 (---------------) + I selje - 0x0023d247, // n0x1821 c0x0000 (---------------) + I seljord - 0x40a0ea82, // n0x1822 c0x0102 (n0x196e-n0x196f) + I sf - 0x0023cb47, // n0x1823 c0x0000 (---------------) + I siellak - 0x002c1006, // n0x1824 c0x0000 (---------------) + I sigdal - 0x0021d306, // n0x1825 c0x0000 (---------------) + I siljan - 0x002cab86, // n0x1826 c0x0000 (---------------) + I sirdal - 0x00278906, // n0x1827 c0x0000 (---------------) + I skanit - 0x0030a3c8, // n0x1828 c0x0000 (---------------) + I skanland - 0x0024c645, // n0x1829 c0x0000 (---------------) + I skaun - 0x00245b07, // n0x182a c0x0000 (---------------) + I skedsmo - 0x00245b0d, // n0x182b c0x0000 (---------------) + I skedsmokorset - 0x002302c3, // n0x182c c0x0000 (---------------) + I ski - 0x00309385, // n0x182d c0x0000 (---------------) + I skien - 0x002d4b87, // n0x182e c0x0000 (---------------) + I skierva - 0x002302c8, // n0x182f c0x0000 (---------------) + I skiptvet - 0x002d4745, // n0x1830 c0x0000 (---------------) + I skjak - 0x002269c8, // n0x1831 c0x0000 (---------------) + I skjervoy - 0x00371f86, // n0x1832 c0x0000 (---------------) + I skodje - 0x00250507, // n0x1833 c0x0000 (---------------) + I slattum - 0x002c2505, // n0x1834 c0x0000 (---------------) + I smola - 0x00223146, // n0x1835 c0x0000 (---------------) + I snaase - 0x0035ee45, // n0x1836 c0x0000 (---------------) + I snasa - 0x002bd40a, // n0x1837 c0x0000 (---------------) + I snillfjord - 0x002f6706, // n0x1838 c0x0000 (---------------) + I snoasa - 0x00235107, // n0x1839 c0x0000 (---------------) + I sogndal - 0x002ac785, // n0x183a c0x0000 (---------------) + I sogne - 0x002d8587, // n0x183b c0x0000 (---------------) + I sokndal - 0x002da344, // n0x183c c0x0000 (---------------) + I sola - 0x002e2506, // n0x183d c0x0000 (---------------) + I solund - 0x002f8645, // n0x183e c0x0000 (---------------) + I somna - 0x002b8e8b, // n0x183f c0x0000 (---------------) + I sondre-land - 0x00328949, // n0x1840 c0x0000 (---------------) + I songdalen - 0x002ae08a, // n0x1841 c0x0000 (---------------) + I sor-aurdal - 0x0026a308, // n0x1842 c0x0000 (---------------) + I sor-fron - 0x002efec8, // n0x1843 c0x0000 (---------------) + I sor-odal - 0x0031c14c, // n0x1844 c0x0000 (---------------) + I sor-varanger - 0x002f0d87, // n0x1845 c0x0000 (---------------) + I sorfold - 0x002f1488, // n0x1846 c0x0000 (---------------) + I sorreisa - 0x002fbf08, // n0x1847 c0x0000 (---------------) + I sortland - 0x002fd845, // n0x1848 c0x0000 (---------------) + I sorum - 0x002bfdca, // n0x1849 c0x0000 (---------------) + I spjelkavik - 0x00334909, // n0x184a c0x0000 (---------------) + I spydeberg - 0x40e02c02, // n0x184b c0x0103 (n0x196f-n0x1970) + I st - 0x00202c06, // n0x184c c0x0000 (---------------) + I stange - 0x002038c4, // n0x184d c0x0000 (---------------) + I stat - 0x002dee09, // n0x184e c0x0000 (---------------) + I stathelle - 0x0024ea89, // n0x184f c0x0000 (---------------) + I stavanger - 0x00212c07, // n0x1850 c0x0000 (---------------) + I stavern - 0x00249b47, // n0x1851 c0x0000 (---------------) + I steigen - 0x0027ff49, // n0x1852 c0x0000 (---------------) + I steinkjer - 0x0037f408, // n0x1853 c0x0000 (---------------) + I stjordal - 0x0037f40f, // n0x1854 c0x0000 (---------------) + I stjordalshalsen - 0x0026b986, // n0x1855 c0x0000 (---------------) + I stokke - 0x002410cb, // n0x1856 c0x0000 (---------------) + I stor-elvdal - 0x0035a285, // n0x1857 c0x0000 (---------------) + I stord - 0x0035a287, // n0x1858 c0x0000 (---------------) + I stordal - 0x003796c9, // n0x1859 c0x0000 (---------------) + I storfjord - 0x0022f406, // n0x185a c0x0000 (---------------) + I strand - 0x0022f407, // n0x185b c0x0000 (---------------) + I stranda - 0x003a3985, // n0x185c c0x0000 (---------------) + I stryn - 0x00238384, // n0x185d c0x0000 (---------------) + I sula - 0x002b0306, // n0x185e c0x0000 (---------------) + I suldal - 0x00200704, // n0x185f c0x0000 (---------------) + I sund - 0x00308247, // n0x1860 c0x0000 (---------------) + I sunndal - 0x002ea888, // n0x1861 c0x0000 (---------------) + I surnadal - 0x412ec408, // n0x1862 c0x0104 (n0x1970-n0x1971) + I svalbard - 0x002eca05, // n0x1863 c0x0000 (---------------) + I sveio - 0x002ecb47, // n0x1864 c0x0000 (---------------) + I svelvik - 0x00366d89, // n0x1865 c0x0000 (---------------) + I sykkylven - 0x00202804, // n0x1866 c0x0000 (---------------) + I tana - 0x00202808, // n0x1867 c0x0000 (---------------) + I tananger - 0x41662e88, // n0x1868 c0x0105 (n0x1971-n0x1973) o I telemark - 0x00215f04, // n0x1869 c0x0000 (---------------) + I time - 0x00239288, // n0x186a c0x0000 (---------------) + I tingvoll - 0x002e38c4, // n0x186b c0x0000 (---------------) + I tinn - 0x00224e09, // n0x186c c0x0000 (---------------) + I tjeldsund - 0x0027c805, // n0x186d c0x0000 (---------------) + I tjome - 0x41a00c42, // n0x186e c0x0106 (n0x1973-n0x1974) + I tm - 0x0026b9c5, // n0x186f c0x0000 (---------------) + I tokke - 0x0021eb85, // n0x1870 c0x0000 (---------------) + I tolga - 0x0035cac8, // n0x1871 c0x0000 (---------------) + I tonsberg - 0x0023afc7, // n0x1872 c0x0000 (---------------) + I torsken - 0x41e04882, // n0x1873 c0x0107 (n0x1974-n0x1975) + I tr - 0x002cbcc5, // n0x1874 c0x0000 (---------------) + I trana - 0x00273e86, // n0x1875 c0x0000 (---------------) + I tranby - 0x00290b46, // n0x1876 c0x0000 (---------------) + I tranoy - 0x002fb0c8, // n0x1877 c0x0000 (---------------) + I troandin - 0x002ff308, // n0x1878 c0x0000 (---------------) + I trogstad - 0x0031f606, // n0x1879 c0x0000 (---------------) + I tromsa - 0x00326806, // n0x187a c0x0000 (---------------) + I tromso - 0x0027ce49, // n0x187b c0x0000 (---------------) + I trondheim - 0x00357346, // n0x187c c0x0000 (---------------) + I trysil - 0x0028c70b, // n0x187d c0x0000 (---------------) + I tvedestrand - 0x0024d605, // n0x187e c0x0000 (---------------) + I tydal - 0x0020f6c6, // n0x187f c0x0000 (---------------) + I tynset - 0x0039fb48, // n0x1880 c0x0000 (---------------) + I tysfjord - 0x002349c6, // n0x1881 c0x0000 (---------------) + I tysnes - 0x00236f46, // n0x1882 c0x0000 (---------------) + I tysvar - 0x0021454a, // n0x1883 c0x0000 (---------------) + I ullensaker - 0x0028a5ca, // n0x1884 c0x0000 (---------------) + I ullensvang - 0x0028bf05, // n0x1885 c0x0000 (---------------) + I ulvik - 0x002ca307, // n0x1886 c0x0000 (---------------) + I unjarga - 0x003447c6, // n0x1887 c0x0000 (---------------) + I utsira - 0x42200bc2, // n0x1888 c0x0108 (n0x1975-n0x1976) + I va - 0x002d4cc7, // n0x1889 c0x0000 (---------------) + I vaapste - 0x00273805, // n0x188a c0x0000 (---------------) + I vadso - 0x00253d84, // n0x188b c0x0000 (---------------) + I vaga - 0x00253d85, // n0x188c c0x0000 (---------------) + I vagan - 0x0031e3c6, // n0x188d c0x0000 (---------------) + I vagsoy - 0x00331187, // n0x188e c0x0000 (---------------) + I vaksdal - 0x00212a45, // n0x188f c0x0000 (---------------) + I valle - 0x0024eb44, // n0x1890 c0x0000 (---------------) + I vang - 0x0024f408, // n0x1891 c0x0000 (---------------) + I vanylven - 0x00237005, // n0x1892 c0x0000 (---------------) + I vardo - 0x0028b147, // n0x1893 c0x0000 (---------------) + I varggat - 0x002920c5, // n0x1894 c0x0000 (---------------) + I varoy - 0x00213485, // n0x1895 c0x0000 (---------------) + I vefsn - 0x00268d04, // n0x1896 c0x0000 (---------------) + I vega - 0x003467c9, // n0x1897 c0x0000 (---------------) + I vegarshei - 0x002e4ac8, // n0x1898 c0x0000 (---------------) + I vennesla - 0x003711c6, // n0x1899 c0x0000 (---------------) + I verdal - 0x00344dc6, // n0x189a c0x0000 (---------------) + I verran - 0x00215c06, // n0x189b c0x0000 (---------------) + I vestby - 0x4279c608, // n0x189c c0x0109 (n0x1976-n0x1977) o I vestfold - 0x002f0c07, // n0x189d c0x0000 (---------------) + I vestnes - 0x002f0f4d, // n0x189e c0x0000 (---------------) + I vestre-slidre - 0x002f23cc, // n0x189f c0x0000 (---------------) + I vestre-toten - 0x002f29c9, // n0x18a0 c0x0000 (---------------) + I vestvagoy - 0x002f2c09, // n0x18a1 c0x0000 (---------------) + I vevelstad - 0x42b504c2, // n0x18a2 c0x010a (n0x1977-n0x1978) + I vf - 0x00399a43, // n0x18a3 c0x0000 (---------------) + I vgs - 0x0020d083, // n0x18a4 c0x0000 (---------------) + I vik - 0x00328b85, // n0x18a5 c0x0000 (---------------) + I vikna - 0x00380e0a, // n0x18a6 c0x0000 (---------------) + I vindafjord - 0x0031f4c6, // n0x18a7 c0x0000 (---------------) + I voagat - 0x002f9dc5, // n0x18a8 c0x0000 (---------------) + I volda - 0x002fd184, // n0x18a9 c0x0000 (---------------) + I voss - 0x002fd18b, // n0x18aa c0x0000 (---------------) + I vossevangen - 0x00316acc, // n0x18ab c0x0000 (---------------) + I xn--andy-ira - 0x0031730c, // n0x18ac c0x0000 (---------------) + I xn--asky-ira - 0x00317615, // n0x18ad c0x0000 (---------------) + I xn--aurskog-hland-jnb - 0x0031854d, // n0x18ae c0x0000 (---------------) + I xn--avery-yua - 0x0031978f, // n0x18af c0x0000 (---------------) + I xn--bdddj-mrabd - 0x00319b52, // n0x18b0 c0x0000 (---------------) + I xn--bearalvhki-y4a - 0x00319fcf, // n0x18b1 c0x0000 (---------------) + I xn--berlevg-jxa - 0x0031a392, // n0x18b2 c0x0000 (---------------) + I xn--bhcavuotna-s4a - 0x0031a813, // n0x18b3 c0x0000 (---------------) + I xn--bhccavuotna-k7a - 0x0031accd, // n0x18b4 c0x0000 (---------------) + I xn--bidr-5nac - 0x0031b28d, // n0x18b5 c0x0000 (---------------) + I xn--bievt-0qa - 0x0031b60e, // n0x18b6 c0x0000 (---------------) + I xn--bjarky-fya - 0x0031bace, // n0x18b7 c0x0000 (---------------) + I xn--bjddar-pta - 0x0031c44c, // n0x18b8 c0x0000 (---------------) + I xn--blt-elab - 0x0031c7cc, // n0x18b9 c0x0000 (---------------) + I xn--bmlo-gra - 0x0031cc0b, // n0x18ba c0x0000 (---------------) + I xn--bod-2na - 0x0031cf8e, // n0x18bb c0x0000 (---------------) + I xn--brnny-wuac - 0x0031e9d2, // n0x18bc c0x0000 (---------------) + I xn--brnnysund-m8ac - 0x0031f28c, // n0x18bd c0x0000 (---------------) + I xn--brum-voa - 0x0031f9d0, // n0x18be c0x0000 (---------------) + I xn--btsfjord-9za - 0x0032e212, // n0x18bf c0x0000 (---------------) + I xn--davvenjrga-y4a - 0x0032f0cc, // n0x18c0 c0x0000 (---------------) + I xn--dnna-gra - 0x0032f78d, // n0x18c1 c0x0000 (---------------) + I xn--drbak-wua - 0x0032facc, // n0x18c2 c0x0000 (---------------) + I xn--dyry-ira - 0x00331611, // n0x18c3 c0x0000 (---------------) + I xn--eveni-0qa01ga - 0x003325cd, // n0x18c4 c0x0000 (---------------) + I xn--finny-yua - 0x00334e4d, // n0x18c5 c0x0000 (---------------) + I xn--fjord-lra - 0x0033544a, // n0x18c6 c0x0000 (---------------) + I xn--fl-zia - 0x003356cc, // n0x18c7 c0x0000 (---------------) + I xn--flor-jra - 0x00335fcc, // n0x18c8 c0x0000 (---------------) + I xn--frde-gra - 0x0033698c, // n0x18c9 c0x0000 (---------------) + I xn--frna-woa - 0x0033720c, // n0x18ca c0x0000 (---------------) + I xn--frya-hra - 0x0033b7d3, // n0x18cb c0x0000 (---------------) + I xn--ggaviika-8ya47h - 0x0033bdd0, // n0x18cc c0x0000 (---------------) + I xn--gildeskl-g0a - 0x0033c1d0, // n0x18cd c0x0000 (---------------) + I xn--givuotna-8ya - 0x0033d14d, // n0x18ce c0x0000 (---------------) + I xn--gjvik-wua - 0x0033d74c, // n0x18cf c0x0000 (---------------) + I xn--gls-elac - 0x0033e809, // n0x18d0 c0x0000 (---------------) + I xn--h-2fa - 0x0034018d, // n0x18d1 c0x0000 (---------------) + I xn--hbmer-xqa - 0x003404d3, // n0x18d2 c0x0000 (---------------) + I xn--hcesuolo-7ya35b - 0x003410d1, // n0x18d3 c0x0000 (---------------) + I xn--hgebostad-g3a - 0x00341513, // n0x18d4 c0x0000 (---------------) + I xn--hmmrfeasta-s4ac - 0x0034240f, // n0x18d5 c0x0000 (---------------) + I xn--hnefoss-q1a - 0x003427cc, // n0x18d6 c0x0000 (---------------) + I xn--hobl-ira - 0x00342acf, // n0x18d7 c0x0000 (---------------) + I xn--holtlen-hxa - 0x00342e8d, // n0x18d8 c0x0000 (---------------) + I xn--hpmir-xqa - 0x0034348f, // n0x18d9 c0x0000 (---------------) + I xn--hyanger-q1a - 0x00343850, // n0x18da c0x0000 (---------------) + I xn--hylandet-54a - 0x003442ce, // n0x18db c0x0000 (---------------) + I xn--indery-fya - 0x0034860e, // n0x18dc c0x0000 (---------------) + I xn--jlster-bya - 0x00348d50, // n0x18dd c0x0000 (---------------) + I xn--jrpeland-54a - 0x00349e8d, // n0x18de c0x0000 (---------------) + I xn--karmy-yua - 0x0034a80e, // n0x18df c0x0000 (---------------) + I xn--kfjord-iua - 0x0034ab8c, // n0x18e0 c0x0000 (---------------) + I xn--klbu-woa - 0x0034bb53, // n0x18e1 c0x0000 (---------------) + I xn--koluokta-7ya57h - 0x0034d80e, // n0x18e2 c0x0000 (---------------) + I xn--krager-gya - 0x0034ea50, // n0x18e3 c0x0000 (---------------) + I xn--kranghke-b0a - 0x0034ee51, // n0x18e4 c0x0000 (---------------) + I xn--krdsherad-m8a - 0x0034f28f, // n0x18e5 c0x0000 (---------------) + I xn--krehamn-dxa - 0x0034f653, // n0x18e6 c0x0000 (---------------) + I xn--krjohka-hwab49j - 0x0035004d, // n0x18e7 c0x0000 (---------------) + I xn--ksnes-uua - 0x0035038f, // n0x18e8 c0x0000 (---------------) + I xn--kvfjord-nxa - 0x0035074e, // n0x18e9 c0x0000 (---------------) + I xn--kvitsy-fya - 0x00350f90, // n0x18ea c0x0000 (---------------) + I xn--kvnangen-k0a - 0x00351389, // n0x18eb c0x0000 (---------------) + I xn--l-1fa - 0x003531d0, // n0x18ec c0x0000 (---------------) + I xn--laheadju-7ya - 0x0035380f, // n0x18ed c0x0000 (---------------) + I xn--langevg-jxa - 0x00353e8f, // n0x18ee c0x0000 (---------------) + I xn--ldingen-q1a - 0x00354252, // n0x18ef c0x0000 (---------------) + I xn--leagaviika-52b - 0x00354ece, // n0x18f0 c0x0000 (---------------) + I xn--lesund-hua - 0x003557cd, // n0x18f1 c0x0000 (---------------) + I xn--lgrd-poac - 0x00356a8d, // n0x18f2 c0x0000 (---------------) + I xn--lhppi-xqa - 0x00356dcd, // n0x18f3 c0x0000 (---------------) + I xn--linds-pra - 0x0035828d, // n0x18f4 c0x0000 (---------------) + I xn--loabt-0qa - 0x003585cd, // n0x18f5 c0x0000 (---------------) + I xn--lrdal-sra - 0x00358910, // n0x18f6 c0x0000 (---------------) + I xn--lrenskog-54a - 0x00358d0b, // n0x18f7 c0x0000 (---------------) + I xn--lt-liac - 0x003592cc, // n0x18f8 c0x0000 (---------------) + I xn--lten-gra - 0x0035964c, // n0x18f9 c0x0000 (---------------) + I xn--lury-ira - 0x0035994c, // n0x18fa c0x0000 (---------------) + I xn--mely-ira - 0x00359c4e, // n0x18fb c0x0000 (---------------) + I xn--merker-kua - 0x00365650, // n0x18fc c0x0000 (---------------) + I xn--mjndalen-64a - 0x00367612, // n0x18fd c0x0000 (---------------) + I xn--mlatvuopmi-s4a - 0x00367a8b, // n0x18fe c0x0000 (---------------) + I xn--mli-tla - 0x0036820e, // n0x18ff c0x0000 (---------------) + I xn--mlselv-iua - 0x0036858e, // n0x1900 c0x0000 (---------------) + I xn--moreke-jua - 0x0036928e, // n0x1901 c0x0000 (---------------) + I xn--mosjen-eya - 0x003699cb, // n0x1902 c0x0000 (---------------) + I xn--mot-tla - 0x42f69f96, // n0x1903 c0x010b (n0x1978-n0x197a) o I xn--mre-og-romsdal-qqb - 0x0036adcd, // n0x1904 c0x0000 (---------------) + I xn--msy-ula0h - 0x0036b994, // n0x1905 c0x0000 (---------------) + I xn--mtta-vrjjat-k7af - 0x0036c90d, // n0x1906 c0x0000 (---------------) + I xn--muost-0qa - 0x0036ed55, // n0x1907 c0x0000 (---------------) + I xn--nmesjevuemie-tcba - 0x0037014d, // n0x1908 c0x0000 (---------------) + I xn--nry-yla5g - 0x00370acf, // n0x1909 c0x0000 (---------------) + I xn--nttery-byae - 0x0037134f, // n0x190a c0x0000 (---------------) + I xn--nvuotna-hwa - 0x00373e8f, // n0x190b c0x0000 (---------------) + I xn--oppegrd-ixa - 0x0037424e, // n0x190c c0x0000 (---------------) + I xn--ostery-fya - 0x0037528d, // n0x190d c0x0000 (---------------) + I xn--osyro-wua - 0x00377691, // n0x190e c0x0000 (---------------) + I xn--porsgu-sta26f - 0x0037a1cc, // n0x190f c0x0000 (---------------) + I xn--rady-ira - 0x0037a4cc, // n0x1910 c0x0000 (---------------) + I xn--rdal-poa - 0x0037a7cb, // n0x1911 c0x0000 (---------------) + I xn--rde-ula - 0x0037ad8c, // n0x1912 c0x0000 (---------------) + I xn--rdy-0nab - 0x0037b14f, // n0x1913 c0x0000 (---------------) + I xn--rennesy-v1a - 0x0037b512, // n0x1914 c0x0000 (---------------) + I xn--rhkkervju-01af - 0x0037becd, // n0x1915 c0x0000 (---------------) + I xn--rholt-mra - 0x0037ce8c, // n0x1916 c0x0000 (---------------) + I xn--risa-5na - 0x0037d30c, // n0x1917 c0x0000 (---------------) + I xn--risr-ira - 0x0037d60d, // n0x1918 c0x0000 (---------------) + I xn--rland-uua - 0x0037d94f, // n0x1919 c0x0000 (---------------) + I xn--rlingen-mxa - 0x0037dd0e, // n0x191a c0x0000 (---------------) + I xn--rmskog-bya - 0x00380acc, // n0x191b c0x0000 (---------------) + I xn--rros-gra - 0x0038108d, // n0x191c c0x0000 (---------------) + I xn--rskog-uua - 0x003813cb, // n0x191d c0x0000 (---------------) + I xn--rst-0na - 0x0038198c, // n0x191e c0x0000 (---------------) + I xn--rsta-fra - 0x00381f0d, // n0x191f c0x0000 (---------------) + I xn--ryken-vua - 0x0038224e, // n0x1920 c0x0000 (---------------) + I xn--ryrvik-bya - 0x003826c9, // n0x1921 c0x0000 (---------------) + I xn--s-1fa - 0x003833d3, // n0x1922 c0x0000 (---------------) + I xn--sandnessjen-ogb - 0x0038404d, // n0x1923 c0x0000 (---------------) + I xn--sandy-yua - 0x0038438d, // n0x1924 c0x0000 (---------------) + I xn--seral-lra - 0x0038498c, // n0x1925 c0x0000 (---------------) + I xn--sgne-gra - 0x00384e0e, // n0x1926 c0x0000 (---------------) + I xn--skierv-uta - 0x00385bcf, // n0x1927 c0x0000 (---------------) + I xn--skjervy-v1a - 0x00385f8c, // n0x1928 c0x0000 (---------------) + I xn--skjk-soa - 0x0038628d, // n0x1929 c0x0000 (---------------) + I xn--sknit-yqa - 0x003865cf, // n0x192a c0x0000 (---------------) + I xn--sknland-fxa - 0x0038698c, // n0x192b c0x0000 (---------------) + I xn--slat-5na - 0x0038708c, // n0x192c c0x0000 (---------------) + I xn--slt-elab - 0x0038744c, // n0x192d c0x0000 (---------------) + I xn--smla-hra - 0x0038774c, // n0x192e c0x0000 (---------------) + I xn--smna-gra - 0x00387e0d, // n0x192f c0x0000 (---------------) + I xn--snase-nra - 0x00388152, // n0x1930 c0x0000 (---------------) + I xn--sndre-land-0cb - 0x003887cc, // n0x1931 c0x0000 (---------------) + I xn--snes-poa - 0x00388acc, // n0x1932 c0x0000 (---------------) + I xn--snsa-roa - 0x00388dd1, // n0x1933 c0x0000 (---------------) + I xn--sr-aurdal-l8a - 0x0038920f, // n0x1934 c0x0000 (---------------) + I xn--sr-fron-q1a - 0x003895cf, // n0x1935 c0x0000 (---------------) + I xn--sr-odal-q1a - 0x00389993, // n0x1936 c0x0000 (---------------) + I xn--sr-varanger-ggb - 0x0038e4ce, // n0x1937 c0x0000 (---------------) + I xn--srfold-bya - 0x0038ea4f, // n0x1938 c0x0000 (---------------) + I xn--srreisa-q1a - 0x0038ee0c, // n0x1939 c0x0000 (---------------) + I xn--srum-gra - 0x4338f14e, // n0x193a c0x010c (n0x197a-n0x197b) o I xn--stfold-9xa - 0x0038f4cf, // n0x193b c0x0000 (---------------) + I xn--stjrdal-s1a - 0x0038f896, // n0x193c c0x0000 (---------------) + I xn--stjrdalshalsen-sqb - 0x00390392, // n0x193d c0x0000 (---------------) + I xn--stre-toten-zcb - 0x00391a0c, // n0x193e c0x0000 (---------------) + I xn--tjme-hra - 0x003921cf, // n0x193f c0x0000 (---------------) + I xn--tnsberg-q1a - 0x0039284d, // n0x1940 c0x0000 (---------------) + I xn--trany-yua - 0x00392b8f, // n0x1941 c0x0000 (---------------) + I xn--trgstad-r1a - 0x00392f4c, // n0x1942 c0x0000 (---------------) + I xn--trna-woa - 0x0039324d, // n0x1943 c0x0000 (---------------) + I xn--troms-zua - 0x0039358d, // n0x1944 c0x0000 (---------------) + I xn--tysvr-vra - 0x0039518e, // n0x1945 c0x0000 (---------------) + I xn--unjrga-rta - 0x003968cc, // n0x1946 c0x0000 (---------------) + I xn--vads-jra - 0x00396bcc, // n0x1947 c0x0000 (---------------) + I xn--vard-jra - 0x00396ed0, // n0x1948 c0x0000 (---------------) + I xn--vegrshei-c0a - 0x00398ed1, // n0x1949 c0x0000 (---------------) + I xn--vestvgy-ixa6o - 0x0039930b, // n0x194a c0x0000 (---------------) + I xn--vg-yiab - 0x0039964c, // n0x194b c0x0000 (---------------) + I xn--vgan-qoa - 0x0039994e, // n0x194c c0x0000 (---------------) + I xn--vgsy-qoa0j - 0x0039cb11, // n0x194d c0x0000 (---------------) + I xn--vre-eiker-k8a - 0x0039cf4e, // n0x194e c0x0000 (---------------) + I xn--vrggt-xqad - 0x0039d2cd, // n0x194f c0x0000 (---------------) + I xn--vry-yla5g - 0x003a138b, // n0x1950 c0x0000 (---------------) + I xn--yer-zna - 0x003a1c8f, // n0x1951 c0x0000 (---------------) + I xn--ygarden-p1a - 0x003a2c54, // n0x1952 c0x0000 (---------------) + I xn--ystre-slidre-ujb - 0x00296a42, // n0x1953 c0x0000 (---------------) + I gs - 0x00296a42, // n0x1954 c0x0000 (---------------) + I gs - 0x002030c3, // n0x1955 c0x0000 (---------------) + I nes - 0x00296a42, // n0x1956 c0x0000 (---------------) + I gs - 0x002030c3, // n0x1957 c0x0000 (---------------) + I nes - 0x00296a42, // n0x1958 c0x0000 (---------------) + I gs - 0x002035c2, // n0x1959 c0x0000 (---------------) + I os - 0x00362705, // n0x195a c0x0000 (---------------) + I valer - 0x0039c80c, // n0x195b c0x0000 (---------------) + I xn--vler-qoa - 0x00296a42, // n0x195c c0x0000 (---------------) + I gs - 0x00296a42, // n0x195d c0x0000 (---------------) + I gs - 0x002035c2, // n0x195e c0x0000 (---------------) + I os - 0x00296a42, // n0x195f c0x0000 (---------------) + I gs - 0x00291ec5, // n0x1960 c0x0000 (---------------) + I heroy - 0x0032b6c5, // n0x1961 c0x0000 (---------------) + I sande - 0x00296a42, // n0x1962 c0x0000 (---------------) + I gs - 0x00296a42, // n0x1963 c0x0000 (---------------) + I gs - 0x0020cb02, // n0x1964 c0x0000 (---------------) + I bo - 0x00291ec5, // n0x1965 c0x0000 (---------------) + I heroy - 0x00318a89, // n0x1966 c0x0000 (---------------) + I xn--b-5ga - 0x00340dcc, // n0x1967 c0x0000 (---------------) + I xn--hery-ira - 0x00296a42, // n0x1968 c0x0000 (---------------) + I gs - 0x00296a42, // n0x1969 c0x0000 (---------------) + I gs - 0x00296a42, // n0x196a c0x0000 (---------------) + I gs - 0x00296a42, // n0x196b c0x0000 (---------------) + I gs - 0x00362705, // n0x196c c0x0000 (---------------) + I valer - 0x00296a42, // n0x196d c0x0000 (---------------) + I gs - 0x00296a42, // n0x196e c0x0000 (---------------) + I gs - 0x00296a42, // n0x196f c0x0000 (---------------) + I gs - 0x00296a42, // n0x1970 c0x0000 (---------------) + I gs - 0x0020cb02, // n0x1971 c0x0000 (---------------) + I bo - 0x00318a89, // n0x1972 c0x0000 (---------------) + I xn--b-5ga - 0x00296a42, // n0x1973 c0x0000 (---------------) + I gs - 0x00296a42, // n0x1974 c0x0000 (---------------) + I gs - 0x00296a42, // n0x1975 c0x0000 (---------------) + I gs - 0x0032b6c5, // n0x1976 c0x0000 (---------------) + I sande - 0x00296a42, // n0x1977 c0x0000 (---------------) + I gs - 0x0032b6c5, // n0x1978 c0x0000 (---------------) + I sande - 0x00340dcc, // n0x1979 c0x0000 (---------------) + I xn--hery-ira - 0x0039c80c, // n0x197a c0x0000 (---------------) + I xn--vler-qoa - 0x0032bd03, // n0x197b c0x0000 (---------------) + I biz - 0x00234803, // n0x197c c0x0000 (---------------) + I com - 0x0023a1c3, // n0x197d c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x197e c0x0000 (---------------) + I gov - 0x00201804, // n0x197f c0x0000 (---------------) + I info - 0x002207c3, // n0x1980 c0x0000 (---------------) + I net - 0x00225403, // n0x1981 c0x0000 (---------------) + I org - 0x00112f88, // n0x1982 c0x0000 (---------------) + merseine - 0x000a8ac4, // n0x1983 c0x0000 (---------------) + mine - 0x000f95c8, // n0x1984 c0x0000 (---------------) + shacknet - 0x00200342, // n0x1985 c0x0000 (---------------) + I ac - 0x4420a442, // n0x1986 c0x0110 (n0x1995-n0x1996) + I co - 0x00244d03, // n0x1987 c0x0000 (---------------) + I cri - 0x00248904, // n0x1988 c0x0000 (---------------) + I geek - 0x002082c3, // n0x1989 c0x0000 (---------------) + I gen - 0x00344c84, // n0x198a c0x0000 (---------------) + I govt - 0x002ad786, // n0x198b c0x0000 (---------------) + I health - 0x0020b5c3, // n0x198c c0x0000 (---------------) + I iwi - 0x002ebac4, // n0x198d c0x0000 (---------------) + I kiwi - 0x0026d145, // n0x198e c0x0000 (---------------) + I maori - 0x002119c3, // n0x198f c0x0000 (---------------) + I mil - 0x002207c3, // n0x1990 c0x0000 (---------------) + I net - 0x00225403, // n0x1991 c0x0000 (---------------) + I org - 0x0027498a, // n0x1992 c0x0000 (---------------) + I parliament - 0x0022ec46, // n0x1993 c0x0000 (---------------) + I school - 0x0036890c, // n0x1994 c0x0000 (---------------) + I xn--mori-qsa - 0x000ff148, // n0x1995 c0x0000 (---------------) + blogspot - 0x0020a442, // n0x1996 c0x0000 (---------------) + I co - 0x00234803, // n0x1997 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1998 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1999 c0x0000 (---------------) + I gov - 0x002127c3, // n0x199a c0x0000 (---------------) + I med - 0x002d1806, // n0x199b c0x0000 (---------------) + I museum - 0x002207c3, // n0x199c c0x0000 (---------------) + I net - 0x00225403, // n0x199d c0x0000 (---------------) + I org - 0x002210c3, // n0x199e c0x0000 (---------------) + I pro - 0x000075c2, // n0x199f c0x0000 (---------------) + ae - 0x0002d7c7, // n0x19a0 c0x0000 (---------------) + blogdns - 0x000d6608, // n0x19a1 c0x0000 (---------------) + blogsite - 0x0000d88e, // n0x19a2 c0x0000 (---------------) + bmoattachments - 0x00089292, // n0x19a3 c0x0000 (---------------) + boldlygoingnowhere - 0x44e50345, // n0x19a4 c0x0113 (n0x19dc-n0x19de) o I cdn77 - 0x4532344c, // n0x19a5 c0x0114 (n0x19de-n0x19df) o I cdn77-secure - 0x001456c8, // n0x19a6 c0x0000 (---------------) + dnsalias - 0x0007bbc7, // n0x19a7 c0x0000 (---------------) + dnsdojo - 0x000141cb, // n0x19a8 c0x0000 (---------------) + doesntexist - 0x00168e09, // n0x19a9 c0x0000 (---------------) + dontexist - 0x001455c7, // n0x19aa c0x0000 (---------------) + doomdns - 0x0018bfc7, // n0x19ab c0x0000 (---------------) + dsmynas - 0x0007bac7, // n0x19ac c0x0000 (---------------) + duckdns - 0x0000fe06, // n0x19ad c0x0000 (---------------) + dvrdns - 0x00197988, // n0x19ae c0x0000 (---------------) + dynalias - 0x45812586, // n0x19af c0x0116 (n0x19e0-n0x19e2) + dyndns - 0x000a8ccd, // n0x19b0 c0x0000 (---------------) + endofinternet - 0x00109450, // n0x19b1 c0x0000 (---------------) + endoftheinternet - 0x45c07602, // n0x19b2 c0x0117 (n0x19e2-n0x1a19) + eu - 0x0018be48, // n0x19b3 c0x0000 (---------------) + familyds - 0x00066487, // n0x19b4 c0x0000 (---------------) + from-me - 0x00097009, // n0x19b5 c0x0000 (---------------) + game-host - 0x000569c6, // n0x19b6 c0x0000 (---------------) + gotdns - 0x00009a82, // n0x19b7 c0x0000 (---------------) + hk - 0x0014e14a, // n0x19b8 c0x0000 (---------------) + hobby-site - 0x00012747, // n0x19b9 c0x0000 (---------------) + homedns - 0x00147c07, // n0x19ba c0x0000 (---------------) + homeftp - 0x000a54c9, // n0x19bb c0x0000 (---------------) + homelinux - 0x000a6ac8, // n0x19bc c0x0000 (---------------) + homeunix - 0x000e0e8e, // n0x19bd c0x0000 (---------------) + is-a-bruinsfan - 0x0000ac4e, // n0x19be c0x0000 (---------------) + is-a-candidate - 0x0000e7cf, // n0x19bf c0x0000 (---------------) + is-a-celticsfan - 0x0000fa89, // n0x19c0 c0x0000 (---------------) + is-a-chef - 0x000487c9, // n0x19c1 c0x0000 (---------------) + is-a-geek - 0x0005fc8b, // n0x19c2 c0x0000 (---------------) + is-a-knight - 0x0007d2cf, // n0x19c3 c0x0000 (---------------) + is-a-linux-user - 0x001469cc, // n0x19c4 c0x0000 (---------------) + is-a-patsfan - 0x000a520b, // n0x19c5 c0x0000 (---------------) + is-a-soxfan - 0x000baf88, // n0x19c6 c0x0000 (---------------) + is-found - 0x000cffc7, // n0x19c7 c0x0000 (---------------) + is-lost - 0x000f8a88, // n0x19c8 c0x0000 (---------------) + is-saved - 0x000ebd8b, // n0x19c9 c0x0000 (---------------) + is-very-bad - 0x000eebcc, // n0x19ca c0x0000 (---------------) + is-very-evil - 0x000fb78c, // n0x19cb c0x0000 (---------------) + is-very-good - 0x00122a0c, // n0x19cc c0x0000 (---------------) + is-very-nice - 0x0013ddcd, // n0x19cd c0x0000 (---------------) + is-very-sweet - 0x00085d48, // n0x19ce c0x0000 (---------------) + isa-geek - 0x00151d49, // n0x19cf c0x0000 (---------------) + kicks-ass - 0x001a10cb, // n0x19d0 c0x0000 (---------------) + misconfused - 0x000dde07, // n0x19d1 c0x0000 (---------------) + podzone - 0x000d648a, // n0x19d2 c0x0000 (---------------) + readmyblog - 0x00067646, // n0x19d3 c0x0000 (---------------) + selfip - 0x00098e8d, // n0x19d4 c0x0000 (---------------) + sellsyourhome - 0x000cd608, // n0x19d5 c0x0000 (---------------) + servebbs - 0x000747c8, // n0x19d6 c0x0000 (---------------) + serveftp - 0x00170e89, // n0x19d7 c0x0000 (---------------) + servegame - 0x000e9f4c, // n0x19d8 c0x0000 (---------------) + stuff-4-sale - 0x00002202, // n0x19d9 c0x0000 (---------------) + us - 0x00111746, // n0x19da c0x0000 (---------------) + webhop - 0x00000182, // n0x19db c0x0000 (---------------) + za - 0x00000141, // n0x19dc c0x0000 (---------------) + c - 0x00038883, // n0x19dd c0x0000 (---------------) + rsc - 0x4577e786, // n0x19de c0x0115 (n0x19df-n0x19e0) o I origin - 0x000504c3, // n0x19df c0x0000 (---------------) + ssl - 0x0000ec82, // n0x19e0 c0x0000 (---------------) + go - 0x00012744, // n0x19e1 c0x0000 (---------------) + home - 0x00000cc2, // n0x19e2 c0x0000 (---------------) + al - 0x000ceb44, // n0x19e3 c0x0000 (---------------) + asso - 0x00000482, // n0x19e4 c0x0000 (---------------) + at - 0x00006142, // n0x19e5 c0x0000 (---------------) + au - 0x00003c42, // n0x19e6 c0x0000 (---------------) + be - 0x00103f42, // n0x19e7 c0x0000 (---------------) + bg - 0x00000e02, // n0x19e8 c0x0000 (---------------) + ca - 0x00050342, // n0x19e9 c0x0000 (---------------) + cd - 0x00000382, // n0x19ea c0x0000 (---------------) + ch - 0x0001b802, // n0x19eb c0x0000 (---------------) + cn - 0x0003de02, // n0x19ec c0x0000 (---------------) + cy - 0x00000142, // n0x19ed c0x0000 (---------------) + cz - 0x00007802, // n0x19ee c0x0000 (---------------) + de - 0x00030d02, // n0x19ef c0x0000 (---------------) + dk - 0x0003a1c3, // n0x19f0 c0x0000 (---------------) + edu - 0x0000a542, // n0x19f1 c0x0000 (---------------) + ee - 0x00000082, // n0x19f2 c0x0000 (---------------) + es - 0x000016c2, // n0x19f3 c0x0000 (---------------) + fi - 0x00041542, // n0x19f4 c0x0000 (---------------) + fr - 0x00009d42, // n0x19f5 c0x0000 (---------------) + gr - 0x00037242, // n0x19f6 c0x0000 (---------------) + hr - 0x0001dc42, // n0x19f7 c0x0000 (---------------) + hu - 0x00000042, // n0x19f8 c0x0000 (---------------) + ie - 0x00002dc2, // n0x19f9 c0x0000 (---------------) + il - 0x00001282, // n0x19fa c0x0000 (---------------) + in - 0x000014c3, // n0x19fb c0x0000 (---------------) + int - 0x000022c2, // n0x19fc c0x0000 (---------------) + is - 0x00001782, // n0x19fd c0x0000 (---------------) + it - 0x000af2c2, // n0x19fe c0x0000 (---------------) + jp - 0x000093c2, // n0x19ff c0x0000 (---------------) + kr - 0x0000e982, // n0x1a00 c0x0000 (---------------) + lt - 0x000047c2, // n0x1a01 c0x0000 (---------------) + lu - 0x0000d042, // n0x1a02 c0x0000 (---------------) + lv - 0x00019ac2, // n0x1a03 c0x0000 (---------------) + mc - 0x00005d82, // n0x1a04 c0x0000 (---------------) + me - 0x00165b42, // n0x1a05 c0x0000 (---------------) + mk - 0x00007682, // n0x1a06 c0x0000 (---------------) + mt - 0x0002c482, // n0x1a07 c0x0000 (---------------) + my - 0x000207c3, // n0x1a08 c0x0000 (---------------) + net - 0x00002902, // n0x1a09 c0x0000 (---------------) + ng - 0x000473c2, // n0x1a0a c0x0000 (---------------) + nl - 0x00001342, // n0x1a0b c0x0000 (---------------) + no - 0x00004ec2, // n0x1a0c c0x0000 (---------------) + nz - 0x0006a205, // n0x1a0d c0x0000 (---------------) + paris - 0x00004bc2, // n0x1a0e c0x0000 (---------------) + pl - 0x00030382, // n0x1a0f c0x0000 (---------------) + pt - 0x00042d83, // n0x1a10 c0x0000 (---------------) + q-a - 0x00002082, // n0x1a11 c0x0000 (---------------) + ro - 0x0000e0c2, // n0x1a12 c0x0000 (---------------) + ru - 0x00002342, // n0x1a13 c0x0000 (---------------) + se - 0x00003342, // n0x1a14 c0x0000 (---------------) + si - 0x0000d642, // n0x1a15 c0x0000 (---------------) + sk - 0x00004882, // n0x1a16 c0x0000 (---------------) + tr - 0x00001ac2, // n0x1a17 c0x0000 (---------------) + uk - 0x00002202, // n0x1a18 c0x0000 (---------------) + us - 0x00216dc3, // n0x1a19 c0x0000 (---------------) + I abo - 0x00200342, // n0x1a1a c0x0000 (---------------) + I ac - 0x00234803, // n0x1a1b c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1a1c c0x0000 (---------------) + I edu - 0x00211543, // n0x1a1d c0x0000 (---------------) + I gob - 0x0020c9c3, // n0x1a1e c0x0000 (---------------) + I ing - 0x002127c3, // n0x1a1f c0x0000 (---------------) + I med - 0x002207c3, // n0x1a20 c0x0000 (---------------) + I net - 0x00201343, // n0x1a21 c0x0000 (---------------) + I nom - 0x00225403, // n0x1a22 c0x0000 (---------------) + I org - 0x00291e03, // n0x1a23 c0x0000 (---------------) + I sld - 0x000ff148, // n0x1a24 c0x0000 (---------------) + blogspot - 0x00234803, // n0x1a25 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1a26 c0x0000 (---------------) + I edu - 0x00211543, // n0x1a27 c0x0000 (---------------) + I gob - 0x002119c3, // n0x1a28 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1a29 c0x0000 (---------------) + I net - 0x00201343, // n0x1a2a c0x0000 (---------------) + I nom - 0x00225403, // n0x1a2b c0x0000 (---------------) + I org - 0x00234803, // n0x1a2c c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1a2d c0x0000 (---------------) + I edu - 0x00225403, // n0x1a2e c0x0000 (---------------) + I org - 0x00234803, // n0x1a2f c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1a30 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1a31 c0x0000 (---------------) + I gov - 0x00200041, // n0x1a32 c0x0000 (---------------) + I i - 0x002119c3, // n0x1a33 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1a34 c0x0000 (---------------) + I net - 0x00230743, // n0x1a35 c0x0000 (---------------) + I ngo - 0x00225403, // n0x1a36 c0x0000 (---------------) + I org - 0x0032bd03, // n0x1a37 c0x0000 (---------------) + I biz - 0x00234803, // n0x1a38 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1a39 c0x0000 (---------------) + I edu - 0x00286583, // n0x1a3a c0x0000 (---------------) + I fam - 0x00211543, // n0x1a3b c0x0000 (---------------) + I gob - 0x0023d143, // n0x1a3c c0x0000 (---------------) + I gok - 0x0024cb83, // n0x1a3d c0x0000 (---------------) + I gon - 0x002a3483, // n0x1a3e c0x0000 (---------------) + I gop - 0x0024bf83, // n0x1a3f c0x0000 (---------------) + I gos - 0x0027c5c3, // n0x1a40 c0x0000 (---------------) + I gov - 0x00201804, // n0x1a41 c0x0000 (---------------) + I info - 0x002207c3, // n0x1a42 c0x0000 (---------------) + I net - 0x00225403, // n0x1a43 c0x0000 (---------------) + I org - 0x00221cc3, // n0x1a44 c0x0000 (---------------) + I web - 0x00310784, // n0x1a45 c0x0000 (---------------) + I agro - 0x0022be83, // n0x1a46 c0x0000 (---------------) + I aid - 0x00001d03, // n0x1a47 c0x0000 (---------------) + art - 0x00200c03, // n0x1a48 c0x0000 (---------------) + I atm - 0x00254308, // n0x1a49 c0x0000 (---------------) + I augustow - 0x00263084, // n0x1a4a c0x0000 (---------------) + I auto - 0x0022430a, // n0x1a4b c0x0000 (---------------) + I babia-gora - 0x00203c46, // n0x1a4c c0x0000 (---------------) + I bedzin - 0x00397847, // n0x1a4d c0x0000 (---------------) + I beskidy - 0x0021f10a, // n0x1a4e c0x0000 (---------------) + I bialowieza - 0x0026b849, // n0x1a4f c0x0000 (---------------) + I bialystok - 0x003a3107, // n0x1a50 c0x0000 (---------------) + I bielawa - 0x0020000a, // n0x1a51 c0x0000 (---------------) + I bieszczady - 0x0032bd03, // n0x1a52 c0x0000 (---------------) + I biz - 0x00374c4b, // n0x1a53 c0x0000 (---------------) + I boleslawiec - 0x002d38c9, // n0x1a54 c0x0000 (---------------) + I bydgoszcz - 0x00215d05, // n0x1a55 c0x0000 (---------------) + I bytom - 0x002ce487, // n0x1a56 c0x0000 (---------------) + I cieszyn - 0x0000a442, // n0x1a57 c0x0000 (---------------) + co - 0x00234803, // n0x1a58 c0x0000 (---------------) + I com - 0x00229247, // n0x1a59 c0x0000 (---------------) + I czeladz - 0x0027cb85, // n0x1a5a c0x0000 (---------------) + I czest - 0x002be509, // n0x1a5b c0x0000 (---------------) + I dlugoleka - 0x0023a1c3, // n0x1a5c c0x0000 (---------------) + I edu - 0x00222ec6, // n0x1a5d c0x0000 (---------------) + I elblag - 0x002bd0c3, // n0x1a5e c0x0000 (---------------) + I elk - 0x000c1083, // n0x1a5f c0x0000 (---------------) + gda - 0x000fc206, // n0x1a60 c0x0000 (---------------) + gdansk - 0x001147c6, // n0x1a61 c0x0000 (---------------) + gdynia - 0x0000b547, // n0x1a62 c0x0000 (---------------) + gliwice - 0x00210946, // n0x1a63 c0x0000 (---------------) + I glogow - 0x00215405, // n0x1a64 c0x0000 (---------------) + I gmina - 0x00334147, // n0x1a65 c0x0000 (---------------) + I gniezno - 0x00337047, // n0x1a66 c0x0000 (---------------) + I gorlice - 0x47a7c5c3, // n0x1a67 c0x011e (n0x1aea-n0x1b19) + I gov - 0x0032f307, // n0x1a68 c0x0000 (---------------) + I grajewo - 0x0035b883, // n0x1a69 c0x0000 (---------------) + I gsm - 0x0030bf05, // n0x1a6a c0x0000 (---------------) + I ilawa - 0x00201804, // n0x1a6b c0x0000 (---------------) + I info - 0x002694c8, // n0x1a6c c0x0000 (---------------) + I jaworzno - 0x002ae78c, // n0x1a6d c0x0000 (---------------) + I jelenia-gora - 0x002ace45, // n0x1a6e c0x0000 (---------------) + I jgora - 0x00328246, // n0x1a6f c0x0000 (---------------) + I kalisz - 0x00229107, // n0x1a70 c0x0000 (---------------) + I karpacz - 0x0039de47, // n0x1a71 c0x0000 (---------------) + I kartuzy - 0x0020d107, // n0x1a72 c0x0000 (---------------) + I kaszuby - 0x0020d688, // n0x1a73 c0x0000 (---------------) + I katowice - 0x002afb8f, // n0x1a74 c0x0000 (---------------) + I kazimierz-dolny - 0x00230b85, // n0x1a75 c0x0000 (---------------) + I kepno - 0x00244e07, // n0x1a76 c0x0000 (---------------) + I ketrzyn - 0x00208a47, // n0x1a77 c0x0000 (---------------) + I klodzko - 0x002a4a0a, // n0x1a78 c0x0000 (---------------) + I kobierzyce - 0x0028c009, // n0x1a79 c0x0000 (---------------) + I kolobrzeg - 0x002cb985, // n0x1a7a c0x0000 (---------------) + I konin - 0x002cd90a, // n0x1a7b c0x0000 (---------------) + I konskowola - 0x00111606, // n0x1a7c c0x0000 (---------------) + krakow - 0x002bd145, // n0x1a7d c0x0000 (---------------) + I kutno - 0x00367cc4, // n0x1a7e c0x0000 (---------------) + I lapy - 0x00399f86, // n0x1a7f c0x0000 (---------------) + I lebork - 0x0032dbc7, // n0x1a80 c0x0000 (---------------) + I legnica - 0x00231b87, // n0x1a81 c0x0000 (---------------) + I lezajsk - 0x00333ac8, // n0x1a82 c0x0000 (---------------) + I limanowa - 0x002136c5, // n0x1a83 c0x0000 (---------------) + I lomza - 0x0027ca86, // n0x1a84 c0x0000 (---------------) + I lowicz - 0x0036a905, // n0x1a85 c0x0000 (---------------) + I lubin - 0x0033b005, // n0x1a86 c0x0000 (---------------) + I lukow - 0x0021e244, // n0x1a87 c0x0000 (---------------) + I mail - 0x002ec187, // n0x1a88 c0x0000 (---------------) + I malbork - 0x0030a20a, // n0x1a89 c0x0000 (---------------) + I malopolska - 0x00206308, // n0x1a8a c0x0000 (---------------) + I mazowsze - 0x002eb706, // n0x1a8b c0x0000 (---------------) + I mazury - 0x000127c3, // n0x1a8c c0x0000 (---------------) + med - 0x00302485, // n0x1a8d c0x0000 (---------------) + I media - 0x00231846, // n0x1a8e c0x0000 (---------------) + I miasta - 0x003a5706, // n0x1a8f c0x0000 (---------------) + I mielec - 0x00271846, // n0x1a90 c0x0000 (---------------) + I mielno - 0x002119c3, // n0x1a91 c0x0000 (---------------) + I mil - 0x0037c147, // n0x1a92 c0x0000 (---------------) + I mragowo - 0x002089c5, // n0x1a93 c0x0000 (---------------) + I naklo - 0x002207c3, // n0x1a94 c0x0000 (---------------) + I net - 0x003a338d, // n0x1a95 c0x0000 (---------------) + I nieruchomosci - 0x00201343, // n0x1a96 c0x0000 (---------------) + I nom - 0x00333bc8, // n0x1a97 c0x0000 (---------------) + I nowaruda - 0x0039f9c4, // n0x1a98 c0x0000 (---------------) + I nysa - 0x00275b45, // n0x1a99 c0x0000 (---------------) + I olawa - 0x002a4906, // n0x1a9a c0x0000 (---------------) + I olecko - 0x0023c806, // n0x1a9b c0x0000 (---------------) + I olkusz - 0x0020f5c7, // n0x1a9c c0x0000 (---------------) + I olsztyn - 0x0023d487, // n0x1a9d c0x0000 (---------------) + I opoczno - 0x00246445, // n0x1a9e c0x0000 (---------------) + I opole - 0x00225403, // n0x1a9f c0x0000 (---------------) + I org - 0x00321447, // n0x1aa0 c0x0000 (---------------) + I ostroda - 0x002c9f09, // n0x1aa1 c0x0000 (---------------) + I ostroleka - 0x00209f49, // n0x1aa2 c0x0000 (---------------) + I ostrowiec - 0x0020bc8a, // n0x1aa3 c0x0000 (---------------) + I ostrowwlkp - 0x00248042, // n0x1aa4 c0x0000 (---------------) + I pc - 0x0030bec4, // n0x1aa5 c0x0000 (---------------) + I pila - 0x002d8fc4, // n0x1aa6 c0x0000 (---------------) + I pisz - 0x00211747, // n0x1aa7 c0x0000 (---------------) + I podhale - 0x0023ca08, // n0x1aa8 c0x0000 (---------------) + I podlasie - 0x002de889, // n0x1aa9 c0x0000 (---------------) + I polkowice - 0x00309249, // n0x1aaa c0x0000 (---------------) + I pomorskie - 0x002df447, // n0x1aab c0x0000 (---------------) + I pomorze - 0x002534c6, // n0x1aac c0x0000 (---------------) + I powiat - 0x000e0c06, // n0x1aad c0x0000 (---------------) + poznan - 0x002e1e84, // n0x1aae c0x0000 (---------------) + I priv - 0x002e200a, // n0x1aaf c0x0000 (---------------) + I prochowice - 0x002e6788, // n0x1ab0 c0x0000 (---------------) + I pruszkow - 0x002e7409, // n0x1ab1 c0x0000 (---------------) + I przeworsk - 0x002961c6, // n0x1ab2 c0x0000 (---------------) + I pulawy - 0x0033ca45, // n0x1ab3 c0x0000 (---------------) + I radom - 0x002061c8, // n0x1ab4 c0x0000 (---------------) + I rawa-maz - 0x002c4f4a, // n0x1ab5 c0x0000 (---------------) + I realestate - 0x00286d83, // n0x1ab6 c0x0000 (---------------) + I rel - 0x003522c6, // n0x1ab7 c0x0000 (---------------) + I rybnik - 0x002df547, // n0x1ab8 c0x0000 (---------------) + I rzeszow - 0x00205e85, // n0x1ab9 c0x0000 (---------------) + I sanok - 0x00222785, // n0x1aba c0x0000 (---------------) + I sejny - 0x002471c3, // n0x1abb c0x0000 (---------------) + I sex - 0x0033ae44, // n0x1abc c0x0000 (---------------) + I shop - 0x0022dec5, // n0x1abd c0x0000 (---------------) + I sklep - 0x002844c7, // n0x1abe c0x0000 (---------------) + I skoczow - 0x002e4c05, // n0x1abf c0x0000 (---------------) + I slask - 0x002cd7c6, // n0x1ac0 c0x0000 (---------------) + I slupsk - 0x000ef545, // n0x1ac1 c0x0000 (---------------) + sopot - 0x0021c883, // n0x1ac2 c0x0000 (---------------) + I sos - 0x002b7649, // n0x1ac3 c0x0000 (---------------) + I sosnowiec - 0x0027590c, // n0x1ac4 c0x0000 (---------------) + I stalowa-wola - 0x0029f98c, // n0x1ac5 c0x0000 (---------------) + I starachowice - 0x002cb548, // n0x1ac6 c0x0000 (---------------) + I stargard - 0x0025fb07, // n0x1ac7 c0x0000 (---------------) + I suwalki - 0x002ed448, // n0x1ac8 c0x0000 (---------------) + I swidnica - 0x002eda4a, // n0x1ac9 c0x0000 (---------------) + I swiebodzin - 0x002ee3cb, // n0x1aca c0x0000 (---------------) + I swinoujscie - 0x002d3a08, // n0x1acb c0x0000 (---------------) + I szczecin - 0x00328348, // n0x1acc c0x0000 (---------------) + I szczytno - 0x00292e86, // n0x1acd c0x0000 (---------------) + I szkola - 0x003225c5, // n0x1ace c0x0000 (---------------) + I targi - 0x00228d8a, // n0x1acf c0x0000 (---------------) + I tarnobrzeg - 0x00220d85, // n0x1ad0 c0x0000 (---------------) + I tgory - 0x00200c42, // n0x1ad1 c0x0000 (---------------) + I tm - 0x002c23c7, // n0x1ad2 c0x0000 (---------------) + I tourism - 0x0029b886, // n0x1ad3 c0x0000 (---------------) + I travel - 0x00352d05, // n0x1ad4 c0x0000 (---------------) + I turek - 0x002efa49, // n0x1ad5 c0x0000 (---------------) + I turystyka - 0x0030b6c5, // n0x1ad6 c0x0000 (---------------) + I tychy - 0x00286705, // n0x1ad7 c0x0000 (---------------) + I ustka - 0x0030bac9, // n0x1ad8 c0x0000 (---------------) + I walbrzych - 0x002316c6, // n0x1ad9 c0x0000 (---------------) + I warmia - 0x0023f648, // n0x1ada c0x0000 (---------------) + I warszawa - 0x00258e43, // n0x1adb c0x0000 (---------------) + I waw - 0x0020f1c6, // n0x1adc c0x0000 (---------------) + I wegrow - 0x00274306, // n0x1add c0x0000 (---------------) + I wielun - 0x002ff685, // n0x1ade c0x0000 (---------------) + I wlocl - 0x002ff689, // n0x1adf c0x0000 (---------------) + I wloclawek - 0x002b3249, // n0x1ae0 c0x0000 (---------------) + I wodzislaw - 0x00248387, // n0x1ae1 c0x0000 (---------------) + I wolomin - 0x000ff504, // n0x1ae2 c0x0000 (---------------) + wroc - 0x002ff507, // n0x1ae3 c0x0000 (---------------) + I wroclaw - 0x00309149, // n0x1ae4 c0x0000 (---------------) + I zachpomor - 0x0021f305, // n0x1ae5 c0x0000 (---------------) + I zagan - 0x0013a408, // n0x1ae6 c0x0000 (---------------) + zakopane - 0x003336c5, // n0x1ae7 c0x0000 (---------------) + I zarow - 0x00220885, // n0x1ae8 c0x0000 (---------------) + I zgora - 0x0022c689, // n0x1ae9 c0x0000 (---------------) + I zgorzelec - 0x00211702, // n0x1aea c0x0000 (---------------) + I ap - 0x0022bc84, // n0x1aeb c0x0000 (---------------) + I griw - 0x00202442, // n0x1aec c0x0000 (---------------) + I ic - 0x002022c2, // n0x1aed c0x0000 (---------------) + I is - 0x0026e245, // n0x1aee c0x0000 (---------------) + I kmpsp - 0x002cefc8, // n0x1aef c0x0000 (---------------) + I konsulat - 0x0036eac5, // n0x1af0 c0x0000 (---------------) + I kppsp - 0x002bfd03, // n0x1af1 c0x0000 (---------------) + I kwp - 0x002bfd05, // n0x1af2 c0x0000 (---------------) + I kwpsp - 0x002ce843, // n0x1af3 c0x0000 (---------------) + I mup - 0x0020f182, // n0x1af4 c0x0000 (---------------) + I mw - 0x00264644, // n0x1af5 c0x0000 (---------------) + I oirm - 0x002e8f43, // n0x1af6 c0x0000 (---------------) + I oum - 0x00206642, // n0x1af7 c0x0000 (---------------) + I pa - 0x002e2c04, // n0x1af8 c0x0000 (---------------) + I pinb - 0x002d95c3, // n0x1af9 c0x0000 (---------------) + I piw - 0x00200842, // n0x1afa c0x0000 (---------------) + I po - 0x0026e2c3, // n0x1afb c0x0000 (---------------) + I psp - 0x0028d884, // n0x1afc c0x0000 (---------------) + I psse - 0x002b7383, // n0x1afd c0x0000 (---------------) + I pup - 0x00234c44, // n0x1afe c0x0000 (---------------) + I rzgw - 0x00200fc2, // n0x1aff c0x0000 (---------------) + I sa - 0x0026dec3, // n0x1b00 c0x0000 (---------------) + I sdn - 0x00217183, // n0x1b01 c0x0000 (---------------) + I sko - 0x00208102, // n0x1b02 c0x0000 (---------------) + I so - 0x00334d82, // n0x1b03 c0x0000 (---------------) + I sr - 0x002b3089, // n0x1b04 c0x0000 (---------------) + I starostwo - 0x002054c2, // n0x1b05 c0x0000 (---------------) + I ug - 0x00289e04, // n0x1b06 c0x0000 (---------------) + I ugim - 0x00206042, // n0x1b07 c0x0000 (---------------) + I um - 0x00206044, // n0x1b08 c0x0000 (---------------) + I umig - 0x00253484, // n0x1b09 c0x0000 (---------------) + I upow - 0x002e5b84, // n0x1b0a c0x0000 (---------------) + I uppo - 0x00202202, // n0x1b0b c0x0000 (---------------) + I us - 0x00243082, // n0x1b0c c0x0000 (---------------) + I uw - 0x0020e103, // n0x1b0d c0x0000 (---------------) + I uzs - 0x002ee043, // n0x1b0e c0x0000 (---------------) + I wif - 0x00244404, // n0x1b0f c0x0000 (---------------) + I wiih - 0x0025b084, // n0x1b10 c0x0000 (---------------) + I winb - 0x002c9e84, // n0x1b11 c0x0000 (---------------) + I wios - 0x002cb884, // n0x1b12 c0x0000 (---------------) + I witd - 0x002feb83, // n0x1b13 c0x0000 (---------------) + I wiw - 0x002f43c3, // n0x1b14 c0x0000 (---------------) + I wsa - 0x00311584, // n0x1b15 c0x0000 (---------------) + I wskr - 0x00300644, // n0x1b16 c0x0000 (---------------) + I wuoz - 0x00300946, // n0x1b17 c0x0000 (---------------) + I wzmiuw - 0x00261242, // n0x1b18 c0x0000 (---------------) + I zp - 0x0020a442, // n0x1b19 c0x0000 (---------------) + I co - 0x0023a1c3, // n0x1b1a c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1b1b c0x0000 (---------------) + I gov - 0x002207c3, // n0x1b1c c0x0000 (---------------) + I net - 0x00225403, // n0x1b1d c0x0000 (---------------) + I org - 0x00200342, // n0x1b1e c0x0000 (---------------) + I ac - 0x0032bd03, // n0x1b1f c0x0000 (---------------) + I biz - 0x00234803, // n0x1b20 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1b21 c0x0000 (---------------) + I edu - 0x00202bc3, // n0x1b22 c0x0000 (---------------) + I est - 0x0027c5c3, // n0x1b23 c0x0000 (---------------) + I gov - 0x00201804, // n0x1b24 c0x0000 (---------------) + I info - 0x002b3344, // n0x1b25 c0x0000 (---------------) + I isla - 0x00207d04, // n0x1b26 c0x0000 (---------------) + I name - 0x002207c3, // n0x1b27 c0x0000 (---------------) + I net - 0x00225403, // n0x1b28 c0x0000 (---------------) + I org - 0x002210c3, // n0x1b29 c0x0000 (---------------) + I pro - 0x002e4604, // n0x1b2a c0x0000 (---------------) + I prof - 0x002b6603, // n0x1b2b c0x0000 (---------------) + I aca - 0x00205803, // n0x1b2c c0x0000 (---------------) + I bar - 0x00214d43, // n0x1b2d c0x0000 (---------------) + I cpa - 0x00210703, // n0x1b2e c0x0000 (---------------) + I eng - 0x002b2143, // n0x1b2f c0x0000 (---------------) + I jur - 0x0026d383, // n0x1b30 c0x0000 (---------------) + I law - 0x002127c3, // n0x1b31 c0x0000 (---------------) + I med - 0x00234803, // n0x1b32 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1b33 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1b34 c0x0000 (---------------) + I gov - 0x002207c3, // n0x1b35 c0x0000 (---------------) + I net - 0x00225403, // n0x1b36 c0x0000 (---------------) + I org - 0x002dca83, // n0x1b37 c0x0000 (---------------) + I plo - 0x00236dc3, // n0x1b38 c0x0000 (---------------) + I sec - 0x000ff148, // n0x1b39 c0x0000 (---------------) + blogspot - 0x00234803, // n0x1b3a c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1b3b c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1b3c c0x0000 (---------------) + I gov - 0x002014c3, // n0x1b3d c0x0000 (---------------) + I int - 0x002207c3, // n0x1b3e c0x0000 (---------------) + I net - 0x00242144, // n0x1b3f c0x0000 (---------------) + I nome - 0x00225403, // n0x1b40 c0x0000 (---------------) + I org - 0x0029e384, // n0x1b41 c0x0000 (---------------) + I publ - 0x002b9f05, // n0x1b42 c0x0000 (---------------) + I belau - 0x0020a442, // n0x1b43 c0x0000 (---------------) + I co - 0x00202ac2, // n0x1b44 c0x0000 (---------------) + I ed - 0x0020ec82, // n0x1b45 c0x0000 (---------------) + I go - 0x002030c2, // n0x1b46 c0x0000 (---------------) + I ne - 0x00200d82, // n0x1b47 c0x0000 (---------------) + I or - 0x00234803, // n0x1b48 c0x0000 (---------------) + I com - 0x0023d404, // n0x1b49 c0x0000 (---------------) + I coop - 0x0023a1c3, // n0x1b4a c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1b4b c0x0000 (---------------) + I gov - 0x002119c3, // n0x1b4c c0x0000 (---------------) + I mil - 0x002207c3, // n0x1b4d c0x0000 (---------------) + I net - 0x00225403, // n0x1b4e c0x0000 (---------------) + I org - 0x000ff148, // n0x1b4f c0x0000 (---------------) + blogspot - 0x00234803, // n0x1b50 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1b51 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1b52 c0x0000 (---------------) + I gov - 0x002119c3, // n0x1b53 c0x0000 (---------------) + I mil - 0x00207d04, // n0x1b54 c0x0000 (---------------) + I name - 0x002207c3, // n0x1b55 c0x0000 (---------------) + I net - 0x00225403, // n0x1b56 c0x0000 (---------------) + I org - 0x00217f43, // n0x1b57 c0x0000 (---------------) + I sch - 0x002ceb44, // n0x1b58 c0x0000 (---------------) + I asso - 0x000ff148, // n0x1b59 c0x0000 (---------------) + blogspot - 0x00234803, // n0x1b5a c0x0000 (---------------) + I com - 0x00201343, // n0x1b5b c0x0000 (---------------) + I nom - 0x0024a4c4, // n0x1b5c c0x0000 (---------------) + I arts - 0x000ff148, // n0x1b5d c0x0000 (---------------) + blogspot - 0x00234803, // n0x1b5e c0x0000 (---------------) + I com - 0x0024bac4, // n0x1b5f c0x0000 (---------------) + I firm - 0x00201804, // n0x1b60 c0x0000 (---------------) + I info - 0x00201343, // n0x1b61 c0x0000 (---------------) + I nom - 0x002008c2, // n0x1b62 c0x0000 (---------------) + I nt - 0x00225403, // n0x1b63 c0x0000 (---------------) + I org - 0x00229943, // n0x1b64 c0x0000 (---------------) + I rec - 0x00363685, // n0x1b65 c0x0000 (---------------) + I store - 0x00200c42, // n0x1b66 c0x0000 (---------------) + I tm - 0x00300783, // n0x1b67 c0x0000 (---------------) + I www - 0x00200342, // n0x1b68 c0x0000 (---------------) + I ac - 0x000ff148, // n0x1b69 c0x0000 (---------------) + blogspot - 0x0020a442, // n0x1b6a c0x0000 (---------------) + I co - 0x0023a1c3, // n0x1b6b c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1b6c c0x0000 (---------------) + I gov - 0x00201282, // n0x1b6d c0x0000 (---------------) + I in - 0x00225403, // n0x1b6e c0x0000 (---------------) + I org - 0x00200342, // n0x1b6f c0x0000 (---------------) + I ac - 0x002001c7, // n0x1b70 c0x0000 (---------------) + I adygeya - 0x00283ec5, // n0x1b71 c0x0000 (---------------) + I altai - 0x00293304, // n0x1b72 c0x0000 (---------------) + I amur - 0x00379e86, // n0x1b73 c0x0000 (---------------) + I amursk - 0x0023770b, // n0x1b74 c0x0000 (---------------) + I arkhangelsk - 0x00258bc9, // n0x1b75 c0x0000 (---------------) + I astrakhan - 0x00328186, // n0x1b76 c0x0000 (---------------) + I baikal - 0x0032aa09, // n0x1b77 c0x0000 (---------------) + I bashkiria - 0x002d5ec8, // n0x1b78 c0x0000 (---------------) + I belgorod - 0x00206d43, // n0x1b79 c0x0000 (---------------) + I bir - 0x000ff148, // n0x1b7a c0x0000 (---------------) + blogspot - 0x00226887, // n0x1b7b c0x0000 (---------------) + I bryansk - 0x0034e508, // n0x1b7c c0x0000 (---------------) + I buryatia - 0x00314b43, // n0x1b7d c0x0000 (---------------) + I cbg - 0x00262844, // n0x1b7e c0x0000 (---------------) + I chel - 0x002685cb, // n0x1b7f c0x0000 (---------------) + I chelyabinsk - 0x002af505, // n0x1b80 c0x0000 (---------------) + I chita - 0x002bedc8, // n0x1b81 c0x0000 (---------------) + I chukotka - 0x00338549, // n0x1b82 c0x0000 (---------------) + I chuvashia - 0x0025b003, // n0x1b83 c0x0000 (---------------) + I cmw - 0x00234803, // n0x1b84 c0x0000 (---------------) + I com - 0x00202b08, // n0x1b85 c0x0000 (---------------) + I dagestan - 0x002ec5c7, // n0x1b86 c0x0000 (---------------) + I dudinka - 0x00312d06, // n0x1b87 c0x0000 (---------------) + I e-burg - 0x0023a1c3, // n0x1b88 c0x0000 (---------------) + I edu - 0x00382887, // n0x1b89 c0x0000 (---------------) + I fareast - 0x0027c5c3, // n0x1b8a c0x0000 (---------------) + I gov - 0x002dc906, // n0x1b8b c0x0000 (---------------) + I grozny - 0x002014c3, // n0x1b8c c0x0000 (---------------) + I int - 0x0022dd87, // n0x1b8d c0x0000 (---------------) + I irkutsk - 0x0022ffc7, // n0x1b8e c0x0000 (---------------) + I ivanovo - 0x00383887, // n0x1b8f c0x0000 (---------------) + I izhevsk - 0x002ec105, // n0x1b90 c0x0000 (---------------) + I jamal - 0x00209003, // n0x1b91 c0x0000 (---------------) + I jar - 0x002099cb, // n0x1b92 c0x0000 (---------------) + I joshkar-ola - 0x0032c6c8, // n0x1b93 c0x0000 (---------------) + I k-uralsk - 0x0022c3c8, // n0x1b94 c0x0000 (---------------) + I kalmykia - 0x0024f186, // n0x1b95 c0x0000 (---------------) + I kaluga - 0x00219a49, // n0x1b96 c0x0000 (---------------) + I kamchatka - 0x0030ec87, // n0x1b97 c0x0000 (---------------) + I karelia - 0x002fa105, // n0x1b98 c0x0000 (---------------) + I kazan - 0x00375dc4, // n0x1b99 c0x0000 (---------------) + I kchr - 0x0026ba88, // n0x1b9a c0x0000 (---------------) + I kemerovo - 0x0023548a, // n0x1b9b c0x0000 (---------------) + I khabarovsk - 0x002356c9, // n0x1b9c c0x0000 (---------------) + I khakassia - 0x0024f383, // n0x1b9d c0x0000 (---------------) + I khv - 0x0027eb45, // n0x1b9e c0x0000 (---------------) + I kirov - 0x00285f03, // n0x1b9f c0x0000 (---------------) + I kms - 0x002a2406, // n0x1ba0 c0x0000 (---------------) + I koenig - 0x002d89c4, // n0x1ba1 c0x0000 (---------------) + I komi - 0x002fc348, // n0x1ba2 c0x0000 (---------------) + I kostroma - 0x00383a0b, // n0x1ba3 c0x0000 (---------------) + I krasnoyarsk - 0x0033f585, // n0x1ba4 c0x0000 (---------------) + I kuban - 0x002b9c86, // n0x1ba5 c0x0000 (---------------) + I kurgan - 0x002bb845, // n0x1ba6 c0x0000 (---------------) + I kursk - 0x002bbd88, // n0x1ba7 c0x0000 (---------------) + I kustanai - 0x002bd287, // n0x1ba8 c0x0000 (---------------) + I kuzbass - 0x002101c7, // n0x1ba9 c0x0000 (---------------) + I lipetsk - 0x00223f07, // n0x1baa c0x0000 (---------------) + I magadan - 0x00215e04, // n0x1bab c0x0000 (---------------) + I mari - 0x0021f5c7, // n0x1bac c0x0000 (---------------) + I mari-el - 0x0027ae46, // n0x1bad c0x0000 (---------------) + I marine - 0x002119c3, // n0x1bae c0x0000 (---------------) + I mil - 0x002c7e48, // n0x1baf c0x0000 (---------------) + I mordovia - 0x00251a03, // n0x1bb0 c0x0000 (---------------) + I msk - 0x002cee08, // n0x1bb1 c0x0000 (---------------) + I murmansk - 0x002d5505, // n0x1bb2 c0x0000 (---------------) + I mytis - 0x0039aac8, // n0x1bb3 c0x0000 (---------------) + I nakhodka - 0x0023a3c7, // n0x1bb4 c0x0000 (---------------) + I nalchik - 0x002207c3, // n0x1bb5 c0x0000 (---------------) + I net - 0x00392103, // n0x1bb6 c0x0000 (---------------) + I nkz - 0x0028b084, // n0x1bb7 c0x0000 (---------------) + I nnov - 0x00371e47, // n0x1bb8 c0x0000 (---------------) + I norilsk - 0x00208343, // n0x1bb9 c0x0000 (---------------) + I nov - 0x0023008b, // n0x1bba c0x0000 (---------------) + I novosibirsk - 0x0020d603, // n0x1bbb c0x0000 (---------------) + I nsk - 0x002519c4, // n0x1bbc c0x0000 (---------------) + I omsk - 0x00363708, // n0x1bbd c0x0000 (---------------) + I orenburg - 0x00225403, // n0x1bbe c0x0000 (---------------) + I org - 0x002d9905, // n0x1bbf c0x0000 (---------------) + I oryol - 0x002967c5, // n0x1bc0 c0x0000 (---------------) + I oskol - 0x002088c6, // n0x1bc1 c0x0000 (---------------) + I palana - 0x00211045, // n0x1bc2 c0x0000 (---------------) + I penza - 0x002d58c4, // n0x1bc3 c0x0000 (---------------) + I perm - 0x00210c02, // n0x1bc4 c0x0000 (---------------) + I pp - 0x002e76c3, // n0x1bc5 c0x0000 (---------------) + I ptz - 0x00367d4a, // n0x1bc6 c0x0000 (---------------) + I pyatigorsk - 0x0038fe43, // n0x1bc7 c0x0000 (---------------) + I rnd - 0x002d4589, // n0x1bc8 c0x0000 (---------------) + I rubtsovsk - 0x00321f86, // n0x1bc9 c0x0000 (---------------) + I ryazan - 0x0020d448, // n0x1bca c0x0000 (---------------) + I sakhalin - 0x0028d3c6, // n0x1bcb c0x0000 (---------------) + I samara - 0x002128c7, // n0x1bcc c0x0000 (---------------) + I saratov - 0x00320a08, // n0x1bcd c0x0000 (---------------) + I simbirsk - 0x002d2308, // n0x1bce c0x0000 (---------------) + I smolensk - 0x0034e983, // n0x1bcf c0x0000 (---------------) + I snz - 0x0026e303, // n0x1bd0 c0x0000 (---------------) + I spb - 0x00217bc9, // n0x1bd1 c0x0000 (---------------) + I stavropol - 0x0028c6c3, // n0x1bd2 c0x0000 (---------------) + I stv - 0x003446c6, // n0x1bd3 c0x0000 (---------------) + I surgut - 0x00283b46, // n0x1bd4 c0x0000 (---------------) + I syzran - 0x0031bdc6, // n0x1bd5 c0x0000 (---------------) + I tambov - 0x0036a549, // n0x1bd6 c0x0000 (---------------) + I tatarstan - 0x002fef84, // n0x1bd7 c0x0000 (---------------) + I test - 0x00206a03, // n0x1bd8 c0x0000 (---------------) + I tom - 0x0032c5c5, // n0x1bd9 c0x0000 (---------------) + I tomsk - 0x002f4789, // n0x1bda c0x0000 (---------------) + I tsaritsyn - 0x002102c3, // n0x1bdb c0x0000 (---------------) + I tsk - 0x0035ac44, // n0x1bdc c0x0000 (---------------) + I tula - 0x002f0744, // n0x1bdd c0x0000 (---------------) + I tuva - 0x0020cd04, // n0x1bde c0x0000 (---------------) + I tver - 0x00227546, // n0x1bdf c0x0000 (---------------) + I tyumen - 0x00204503, // n0x1be0 c0x0000 (---------------) + I udm - 0x00204508, // n0x1be1 c0x0000 (---------------) + I udmurtia - 0x00258808, // n0x1be2 c0x0000 (---------------) + I ulan-ude - 0x0033ec46, // n0x1be3 c0x0000 (---------------) + I vdonsk - 0x002f9f0b, // n0x1be4 c0x0000 (---------------) + I vladikavkaz - 0x002fa248, // n0x1be5 c0x0000 (---------------) + I vladimir - 0x002fa44b, // n0x1be6 c0x0000 (---------------) + I vladivostok - 0x002fc989, // n0x1be7 c0x0000 (---------------) + I volgograd - 0x002fc107, // n0x1be8 c0x0000 (---------------) + I vologda - 0x002fce08, // n0x1be9 c0x0000 (---------------) + I voronezh - 0x002fe5c3, // n0x1bea c0x0000 (---------------) + I vrn - 0x0038a506, // n0x1beb c0x0000 (---------------) + I vyatka - 0x0020a7c7, // n0x1bec c0x0000 (---------------) + I yakutia - 0x00297e85, // n0x1bed c0x0000 (---------------) + I yamal - 0x00348909, // n0x1bee c0x0000 (---------------) + I yaroslavl - 0x0030e48d, // n0x1bef c0x0000 (---------------) + I yekaterinburg - 0x0020d291, // n0x1bf0 c0x0000 (---------------) + I yuzhno-sakhalinsk - 0x00236185, // n0x1bf1 c0x0000 (---------------) + I zgrad - 0x00200342, // n0x1bf2 c0x0000 (---------------) + I ac - 0x0020a442, // n0x1bf3 c0x0000 (---------------) + I co - 0x00234803, // n0x1bf4 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1bf5 c0x0000 (---------------) + I edu - 0x0023ef84, // n0x1bf6 c0x0000 (---------------) + I gouv - 0x0027c5c3, // n0x1bf7 c0x0000 (---------------) + I gov - 0x002014c3, // n0x1bf8 c0x0000 (---------------) + I int - 0x002119c3, // n0x1bf9 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1bfa c0x0000 (---------------) + I net - 0x00234803, // n0x1bfb c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1bfc c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1bfd c0x0000 (---------------) + I gov - 0x002127c3, // n0x1bfe c0x0000 (---------------) + I med - 0x002207c3, // n0x1bff c0x0000 (---------------) + I net - 0x00225403, // n0x1c00 c0x0000 (---------------) + I org - 0x0029e383, // n0x1c01 c0x0000 (---------------) + I pub - 0x00217f43, // n0x1c02 c0x0000 (---------------) + I sch - 0x00234803, // n0x1c03 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1c04 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1c05 c0x0000 (---------------) + I gov - 0x002207c3, // n0x1c06 c0x0000 (---------------) + I net - 0x00225403, // n0x1c07 c0x0000 (---------------) + I org - 0x00234803, // n0x1c08 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1c09 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1c0a c0x0000 (---------------) + I gov - 0x002207c3, // n0x1c0b c0x0000 (---------------) + I net - 0x00225403, // n0x1c0c c0x0000 (---------------) + I org - 0x00234803, // n0x1c0d c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1c0e c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1c0f c0x0000 (---------------) + I gov - 0x00201804, // n0x1c10 c0x0000 (---------------) + I info - 0x002127c3, // n0x1c11 c0x0000 (---------------) + I med - 0x002207c3, // n0x1c12 c0x0000 (---------------) + I net - 0x00225403, // n0x1c13 c0x0000 (---------------) + I org - 0x0020cd02, // n0x1c14 c0x0000 (---------------) + I tv - 0x002001c1, // n0x1c15 c0x0000 (---------------) + I a - 0x00200342, // n0x1c16 c0x0000 (---------------) + I ac - 0x00200001, // n0x1c17 c0x0000 (---------------) + I b - 0x00319882, // n0x1c18 c0x0000 (---------------) + I bd - 0x000ff148, // n0x1c19 c0x0000 (---------------) + blogspot - 0x0021ce45, // n0x1c1a c0x0000 (---------------) + I brand - 0x00200141, // n0x1c1b c0x0000 (---------------) + I c - 0x00034803, // n0x1c1c c0x0000 (---------------) + com - 0x00200201, // n0x1c1d c0x0000 (---------------) + I d - 0x00200081, // n0x1c1e c0x0000 (---------------) + I e - 0x002016c1, // n0x1c1f c0x0000 (---------------) + I f - 0x002353c2, // n0x1c20 c0x0000 (---------------) + I fh - 0x002353c4, // n0x1c21 c0x0000 (---------------) + I fhsk - 0x00362683, // n0x1c22 c0x0000 (---------------) + I fhv - 0x00200281, // n0x1c23 c0x0000 (---------------) + I g - 0x002003c1, // n0x1c24 c0x0000 (---------------) + I h - 0x00200041, // n0x1c25 c0x0000 (---------------) + I i - 0x00201b01, // n0x1c26 c0x0000 (---------------) + I k - 0x00399d07, // n0x1c27 c0x0000 (---------------) + I komforb - 0x00383c8f, // n0x1c28 c0x0000 (---------------) + I kommunalforbund - 0x002da9c6, // n0x1c29 c0x0000 (---------------) + I komvux - 0x00200d01, // n0x1c2a c0x0000 (---------------) + I l - 0x00265b46, // n0x1c2b c0x0000 (---------------) + I lanbib - 0x00200441, // n0x1c2c c0x0000 (---------------) + I m - 0x00200781, // n0x1c2d c0x0000 (---------------) + I n - 0x002e398e, // n0x1c2e c0x0000 (---------------) + I naturbruksgymn - 0x00200881, // n0x1c2f c0x0000 (---------------) + I o - 0x00225403, // n0x1c30 c0x0000 (---------------) + I org - 0x00200581, // n0x1c31 c0x0000 (---------------) + I p - 0x00298805, // n0x1c32 c0x0000 (---------------) + I parti - 0x00210c02, // n0x1c33 c0x0000 (---------------) + I pp - 0x002470c5, // n0x1c34 c0x0000 (---------------) + I press - 0x002006c1, // n0x1c35 c0x0000 (---------------) + I r - 0x002000c1, // n0x1c36 c0x0000 (---------------) + I s - 0x002004c1, // n0x1c37 c0x0000 (---------------) + I t - 0x00200c42, // n0x1c38 c0x0000 (---------------) + I tm - 0x00200741, // n0x1c39 c0x0000 (---------------) + I u - 0x00201c01, // n0x1c3a c0x0000 (---------------) + I w - 0x00206501, // n0x1c3b c0x0000 (---------------) + I x - 0x00200241, // n0x1c3c c0x0000 (---------------) + I y - 0x00200101, // n0x1c3d c0x0000 (---------------) + I z - 0x000ff148, // n0x1c3e c0x0000 (---------------) + blogspot - 0x00234803, // n0x1c3f c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1c40 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1c41 c0x0000 (---------------) + I gov - 0x002207c3, // n0x1c42 c0x0000 (---------------) + I net - 0x00225403, // n0x1c43 c0x0000 (---------------) + I org - 0x00221183, // n0x1c44 c0x0000 (---------------) + I per - 0x00234803, // n0x1c45 c0x0000 (---------------) + I com - 0x0027c5c3, // n0x1c46 c0x0000 (---------------) + I gov - 0x0008dcc8, // n0x1c47 c0x0000 (---------------) + hashbang - 0x002119c3, // n0x1c48 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1c49 c0x0000 (---------------) + I net - 0x00225403, // n0x1c4a c0x0000 (---------------) + I org - 0x014da588, // n0x1c4b c0x0005 (---------------)* o platform - 0x000ff148, // n0x1c4c c0x0000 (---------------) + blogspot - 0x000ff148, // n0x1c4d c0x0000 (---------------) + blogspot - 0x00234803, // n0x1c4e c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1c4f c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1c50 c0x0000 (---------------) + I gov - 0x002207c3, // n0x1c51 c0x0000 (---------------) + I net - 0x00225403, // n0x1c52 c0x0000 (---------------) + I org - 0x00201d03, // n0x1c53 c0x0000 (---------------) + I art - 0x000ff148, // n0x1c54 c0x0000 (---------------) + blogspot - 0x00234803, // n0x1c55 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1c56 c0x0000 (---------------) + I edu - 0x0023ef84, // n0x1c57 c0x0000 (---------------) + I gouv - 0x00225403, // n0x1c58 c0x0000 (---------------) + I org - 0x00294b45, // n0x1c59 c0x0000 (---------------) + I perso - 0x0030c344, // n0x1c5a c0x0000 (---------------) + I univ - 0x00234803, // n0x1c5b c0x0000 (---------------) + I com - 0x002207c3, // n0x1c5c c0x0000 (---------------) + I net - 0x00225403, // n0x1c5d c0x0000 (---------------) + I org - 0x0020a442, // n0x1c5e c0x0000 (---------------) + I co - 0x00234803, // n0x1c5f c0x0000 (---------------) + I com - 0x002382c9, // n0x1c60 c0x0000 (---------------) + I consulado - 0x0023a1c3, // n0x1c61 c0x0000 (---------------) + I edu - 0x0023c549, // n0x1c62 c0x0000 (---------------) + I embaixada - 0x0027c5c3, // n0x1c63 c0x0000 (---------------) + I gov - 0x002119c3, // n0x1c64 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1c65 c0x0000 (---------------) + I net - 0x00225403, // n0x1c66 c0x0000 (---------------) + I org - 0x002e1c88, // n0x1c67 c0x0000 (---------------) + I principe - 0x0020ff47, // n0x1c68 c0x0000 (---------------) + I saotome - 0x00363685, // n0x1c69 c0x0000 (---------------) + I store - 0x002001c7, // n0x1c6a c0x0000 (---------------) + I adygeya - 0x0023770b, // n0x1c6b c0x0000 (---------------) + I arkhangelsk - 0x0020c008, // n0x1c6c c0x0000 (---------------) + I balashov - 0x0032aa09, // n0x1c6d c0x0000 (---------------) + I bashkiria - 0x00226887, // n0x1c6e c0x0000 (---------------) + I bryansk - 0x00202b08, // n0x1c6f c0x0000 (---------------) + I dagestan - 0x002dc906, // n0x1c70 c0x0000 (---------------) + I grozny - 0x0022ffc7, // n0x1c71 c0x0000 (---------------) + I ivanovo - 0x0022c3c8, // n0x1c72 c0x0000 (---------------) + I kalmykia - 0x0024f186, // n0x1c73 c0x0000 (---------------) + I kaluga - 0x0030ec87, // n0x1c74 c0x0000 (---------------) + I karelia - 0x002356c9, // n0x1c75 c0x0000 (---------------) + I khakassia - 0x0037ab49, // n0x1c76 c0x0000 (---------------) + I krasnodar - 0x002b9c86, // n0x1c77 c0x0000 (---------------) + I kurgan - 0x002bab85, // n0x1c78 c0x0000 (---------------) + I lenug - 0x002c7e48, // n0x1c79 c0x0000 (---------------) + I mordovia - 0x00251a03, // n0x1c7a c0x0000 (---------------) + I msk - 0x002cee08, // n0x1c7b c0x0000 (---------------) + I murmansk - 0x0023a3c7, // n0x1c7c c0x0000 (---------------) + I nalchik - 0x00208343, // n0x1c7d c0x0000 (---------------) + I nov - 0x00228747, // n0x1c7e c0x0000 (---------------) + I obninsk - 0x00211045, // n0x1c7f c0x0000 (---------------) + I penza - 0x002de208, // n0x1c80 c0x0000 (---------------) + I pokrovsk - 0x002738c5, // n0x1c81 c0x0000 (---------------) + I sochi - 0x0026e303, // n0x1c82 c0x0000 (---------------) + I spb - 0x00352589, // n0x1c83 c0x0000 (---------------) + I togliatti - 0x002a9e47, // n0x1c84 c0x0000 (---------------) + I troitsk - 0x0035ac44, // n0x1c85 c0x0000 (---------------) + I tula - 0x002f0744, // n0x1c86 c0x0000 (---------------) + I tuva - 0x002f9f0b, // n0x1c87 c0x0000 (---------------) + I vladikavkaz - 0x002fa248, // n0x1c88 c0x0000 (---------------) + I vladimir - 0x002fc107, // n0x1c89 c0x0000 (---------------) + I vologda - 0x00234803, // n0x1c8a c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1c8b c0x0000 (---------------) + I edu - 0x00211543, // n0x1c8c c0x0000 (---------------) + I gob - 0x00225403, // n0x1c8d c0x0000 (---------------) + I org - 0x00239b03, // n0x1c8e c0x0000 (---------------) + I red - 0x0027c5c3, // n0x1c8f c0x0000 (---------------) + I gov - 0x00234803, // n0x1c90 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1c91 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1c92 c0x0000 (---------------) + I gov - 0x002119c3, // n0x1c93 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1c94 c0x0000 (---------------) + I net - 0x00225403, // n0x1c95 c0x0000 (---------------) + I org - 0x00200342, // n0x1c96 c0x0000 (---------------) + I ac - 0x0020a442, // n0x1c97 c0x0000 (---------------) + I co - 0x00225403, // n0x1c98 c0x0000 (---------------) + I org - 0x000ff148, // n0x1c99 c0x0000 (---------------) + blogspot - 0x00200342, // n0x1c9a c0x0000 (---------------) + I ac - 0x0020a442, // n0x1c9b c0x0000 (---------------) + I co - 0x0020ec82, // n0x1c9c c0x0000 (---------------) + I go - 0x00201282, // n0x1c9d c0x0000 (---------------) + I in - 0x00206082, // n0x1c9e c0x0000 (---------------) + I mi - 0x002207c3, // n0x1c9f c0x0000 (---------------) + I net - 0x00200d82, // n0x1ca0 c0x0000 (---------------) + I or - 0x00200342, // n0x1ca1 c0x0000 (---------------) + I ac - 0x0032bd03, // n0x1ca2 c0x0000 (---------------) + I biz - 0x0020a442, // n0x1ca3 c0x0000 (---------------) + I co - 0x00234803, // n0x1ca4 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1ca5 c0x0000 (---------------) + I edu - 0x0020ec82, // n0x1ca6 c0x0000 (---------------) + I go - 0x0027c5c3, // n0x1ca7 c0x0000 (---------------) + I gov - 0x002014c3, // n0x1ca8 c0x0000 (---------------) + I int - 0x002119c3, // n0x1ca9 c0x0000 (---------------) + I mil - 0x00207d04, // n0x1caa c0x0000 (---------------) + I name - 0x002207c3, // n0x1cab c0x0000 (---------------) + I net - 0x0021a7c3, // n0x1cac c0x0000 (---------------) + I nic - 0x00225403, // n0x1cad c0x0000 (---------------) + I org - 0x002fef84, // n0x1cae c0x0000 (---------------) + I test - 0x00221cc3, // n0x1caf c0x0000 (---------------) + I web - 0x0027c5c3, // n0x1cb0 c0x0000 (---------------) + I gov - 0x0020a442, // n0x1cb1 c0x0000 (---------------) + I co - 0x00234803, // n0x1cb2 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1cb3 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1cb4 c0x0000 (---------------) + I gov - 0x002119c3, // n0x1cb5 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1cb6 c0x0000 (---------------) + I net - 0x00201343, // n0x1cb7 c0x0000 (---------------) + I nom - 0x00225403, // n0x1cb8 c0x0000 (---------------) + I org - 0x00391ec7, // n0x1cb9 c0x0000 (---------------) + I agrinet - 0x00234803, // n0x1cba c0x0000 (---------------) + I com - 0x00222647, // n0x1cbb c0x0000 (---------------) + I defense - 0x0026e986, // n0x1cbc c0x0000 (---------------) + I edunet - 0x00214603, // n0x1cbd c0x0000 (---------------) + I ens - 0x002016c3, // n0x1cbe c0x0000 (---------------) + I fin - 0x0027c5c3, // n0x1cbf c0x0000 (---------------) + I gov - 0x0021e783, // n0x1cc0 c0x0000 (---------------) + I ind - 0x00201804, // n0x1cc1 c0x0000 (---------------) + I info - 0x0036b204, // n0x1cc2 c0x0000 (---------------) + I intl - 0x002da746, // n0x1cc3 c0x0000 (---------------) + I mincom - 0x0021a003, // n0x1cc4 c0x0000 (---------------) + I nat - 0x002207c3, // n0x1cc5 c0x0000 (---------------) + I net - 0x00225403, // n0x1cc6 c0x0000 (---------------) + I org - 0x00294b45, // n0x1cc7 c0x0000 (---------------) + I perso - 0x0020ba44, // n0x1cc8 c0x0000 (---------------) + I rnrt - 0x00356943, // n0x1cc9 c0x0000 (---------------) + I rns - 0x00378e43, // n0x1cca c0x0000 (---------------) + I rnu - 0x002c23c7, // n0x1ccb c0x0000 (---------------) + I tourism - 0x00270145, // n0x1ccc c0x0000 (---------------) + I turen - 0x00234803, // n0x1ccd c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1cce c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1ccf c0x0000 (---------------) + I gov - 0x002119c3, // n0x1cd0 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1cd1 c0x0000 (---------------) + I net - 0x00225403, // n0x1cd2 c0x0000 (---------------) + I org - 0x00208482, // n0x1cd3 c0x0000 (---------------) + I av - 0x002cd743, // n0x1cd4 c0x0000 (---------------) + I bbs - 0x00285a83, // n0x1cd5 c0x0000 (---------------) + I bel - 0x0032bd03, // n0x1cd6 c0x0000 (---------------) + I biz - 0x51a34803, // n0x1cd7 c0x0146 (n0x1ce8-n0x1ce9) + I com - 0x002271c2, // n0x1cd8 c0x0000 (---------------) + I dr - 0x0023a1c3, // n0x1cd9 c0x0000 (---------------) + I edu - 0x002082c3, // n0x1cda c0x0000 (---------------) + I gen - 0x0027c5c3, // n0x1cdb c0x0000 (---------------) + I gov - 0x00201804, // n0x1cdc c0x0000 (---------------) + I info - 0x0032c883, // n0x1cdd c0x0000 (---------------) + I k12 - 0x00230b83, // n0x1cde c0x0000 (---------------) + I kep - 0x002119c3, // n0x1cdf c0x0000 (---------------) + I mil - 0x00207d04, // n0x1ce0 c0x0000 (---------------) + I name - 0x51e04d42, // n0x1ce1 c0x0147 (n0x1ce9-n0x1cea) + I nc - 0x002207c3, // n0x1ce2 c0x0000 (---------------) + I net - 0x00225403, // n0x1ce3 c0x0000 (---------------) + I org - 0x00204343, // n0x1ce4 c0x0000 (---------------) + I pol - 0x0022a4c3, // n0x1ce5 c0x0000 (---------------) + I tel - 0x0020cd02, // n0x1ce6 c0x0000 (---------------) + I tv - 0x00221cc3, // n0x1ce7 c0x0000 (---------------) + I web - 0x000ff148, // n0x1ce8 c0x0000 (---------------) + blogspot - 0x0027c5c3, // n0x1ce9 c0x0000 (---------------) + I gov - 0x00232584, // n0x1cea c0x0000 (---------------) + I aero - 0x0032bd03, // n0x1ceb c0x0000 (---------------) + I biz - 0x0020a442, // n0x1cec c0x0000 (---------------) + I co - 0x00234803, // n0x1ced c0x0000 (---------------) + I com - 0x0023d404, // n0x1cee c0x0000 (---------------) + I coop - 0x0023a1c3, // n0x1cef c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1cf0 c0x0000 (---------------) + I gov - 0x00201804, // n0x1cf1 c0x0000 (---------------) + I info - 0x002014c3, // n0x1cf2 c0x0000 (---------------) + I int - 0x002c3c44, // n0x1cf3 c0x0000 (---------------) + I jobs - 0x00209504, // n0x1cf4 c0x0000 (---------------) + I mobi - 0x002d1806, // n0x1cf5 c0x0000 (---------------) + I museum - 0x00207d04, // n0x1cf6 c0x0000 (---------------) + I name - 0x002207c3, // n0x1cf7 c0x0000 (---------------) + I net - 0x00225403, // n0x1cf8 c0x0000 (---------------) + I org - 0x002210c3, // n0x1cf9 c0x0000 (---------------) + I pro - 0x0029b886, // n0x1cfa c0x0000 (---------------) + I travel - 0x0005518b, // n0x1cfb c0x0000 (---------------) + better-than - 0x00012586, // n0x1cfc c0x0000 (---------------) + dyndns - 0x00021b0a, // n0x1cfd c0x0000 (---------------) + on-the-web - 0x000fe68a, // n0x1cfe c0x0000 (---------------) + worse-than - 0x000ff148, // n0x1cff c0x0000 (---------------) + blogspot - 0x00232684, // n0x1d00 c0x0000 (---------------) + I club - 0x00234803, // n0x1d01 c0x0000 (---------------) + I com - 0x0032bcc4, // n0x1d02 c0x0000 (---------------) + I ebiz - 0x0023a1c3, // n0x1d03 c0x0000 (---------------) + I edu - 0x00297004, // n0x1d04 c0x0000 (---------------) + I game - 0x0027c5c3, // n0x1d05 c0x0000 (---------------) + I gov - 0x0031e343, // n0x1d06 c0x0000 (---------------) + I idv - 0x002119c3, // n0x1d07 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1d08 c0x0000 (---------------) + I net - 0x00225403, // n0x1d09 c0x0000 (---------------) + I org - 0x0032708b, // n0x1d0a c0x0000 (---------------) + I xn--czrw28b - 0x003938ca, // n0x1d0b c0x0000 (---------------) + I xn--uc0atv - 0x003a484c, // n0x1d0c c0x0000 (---------------) + I xn--zf0ao64a - 0x00200342, // n0x1d0d c0x0000 (---------------) + I ac - 0x0020a442, // n0x1d0e c0x0000 (---------------) + I co - 0x0020ec82, // n0x1d0f c0x0000 (---------------) + I go - 0x00235ec5, // n0x1d10 c0x0000 (---------------) + I hotel - 0x00201804, // n0x1d11 c0x0000 (---------------) + I info - 0x00205d82, // n0x1d12 c0x0000 (---------------) + I me - 0x002119c3, // n0x1d13 c0x0000 (---------------) + I mil - 0x00209504, // n0x1d14 c0x0000 (---------------) + I mobi - 0x002030c2, // n0x1d15 c0x0000 (---------------) + I ne - 0x00200d82, // n0x1d16 c0x0000 (---------------) + I or - 0x00217f42, // n0x1d17 c0x0000 (---------------) + I sc - 0x0020cd02, // n0x1d18 c0x0000 (---------------) + I tv - 0x0012bd03, // n0x1d19 c0x0000 (---------------) + biz - 0x002aaf09, // n0x1d1a c0x0000 (---------------) + I cherkassy - 0x002839c8, // n0x1d1b c0x0000 (---------------) + I cherkasy - 0x0027c449, // n0x1d1c c0x0000 (---------------) + I chernigov - 0x002879c9, // n0x1d1d c0x0000 (---------------) + I chernihiv - 0x00372a4a, // n0x1d1e c0x0000 (---------------) + I chernivtsi - 0x00366b8a, // n0x1d1f c0x0000 (---------------) + I chernovtsy - 0x0020a682, // n0x1d20 c0x0000 (---------------) + I ck - 0x0021b802, // n0x1d21 c0x0000 (---------------) + I cn - 0x0000a442, // n0x1d22 c0x0000 (---------------) + co - 0x00234803, // n0x1d23 c0x0000 (---------------) + I com - 0x00207442, // n0x1d24 c0x0000 (---------------) + I cr - 0x00244fc6, // n0x1d25 c0x0000 (---------------) + I crimea - 0x00353d02, // n0x1d26 c0x0000 (---------------) + I cv - 0x0020fec2, // n0x1d27 c0x0000 (---------------) + I dn - 0x002d488e, // n0x1d28 c0x0000 (---------------) + I dnepropetrovsk - 0x0026df0e, // n0x1d29 c0x0000 (---------------) + I dnipropetrovsk - 0x0027c2c7, // n0x1d2a c0x0000 (---------------) + I dominic - 0x0030d307, // n0x1d2b c0x0000 (---------------) + I donetsk - 0x002e1442, // n0x1d2c c0x0000 (---------------) + I dp - 0x0023a1c3, // n0x1d2d c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1d2e c0x0000 (---------------) + I gov - 0x00201a42, // n0x1d2f c0x0000 (---------------) + I if - 0x00201282, // n0x1d30 c0x0000 (---------------) + I in - 0x002413cf, // n0x1d31 c0x0000 (---------------) + I ivano-frankivsk - 0x0020d4c2, // n0x1d32 c0x0000 (---------------) + I kh - 0x0023fb07, // n0x1d33 c0x0000 (---------------) + I kharkiv - 0x00240547, // n0x1d34 c0x0000 (---------------) + I kharkov - 0x00241747, // n0x1d35 c0x0000 (---------------) + I kherson - 0x002489cc, // n0x1d36 c0x0000 (---------------) + I khmelnitskiy - 0x0024964c, // n0x1d37 c0x0000 (---------------) + I khmelnytskyi - 0x00202f84, // n0x1d38 c0x0000 (---------------) + I kiev - 0x0027eb4a, // n0x1d39 c0x0000 (---------------) + I kirovograd - 0x0022ef82, // n0x1d3a c0x0000 (---------------) + I km - 0x002093c2, // n0x1d3b c0x0000 (---------------) + I kr - 0x002b3a84, // n0x1d3c c0x0000 (---------------) + I krym - 0x00265302, // n0x1d3d c0x0000 (---------------) + I ks - 0x002bdf82, // n0x1d3e c0x0000 (---------------) + I kv - 0x00249884, // n0x1d3f c0x0000 (---------------) + I kyiv - 0x0021a8c2, // n0x1d40 c0x0000 (---------------) + I lg - 0x0020e982, // n0x1d41 c0x0000 (---------------) + I lt - 0x0024f207, // n0x1d42 c0x0000 (---------------) + I lugansk - 0x00235285, // n0x1d43 c0x0000 (---------------) + I lutsk - 0x0020d042, // n0x1d44 c0x0000 (---------------) + I lv - 0x00241344, // n0x1d45 c0x0000 (---------------) + I lviv - 0x00365b42, // n0x1d46 c0x0000 (---------------) + I mk - 0x0022fe48, // n0x1d47 c0x0000 (---------------) + I mykolaiv - 0x002207c3, // n0x1d48 c0x0000 (---------------) + I net - 0x003a3d88, // n0x1d49 c0x0000 (---------------) + I nikolaev - 0x00200a02, // n0x1d4a c0x0000 (---------------) + I od - 0x0023b505, // n0x1d4b c0x0000 (---------------) + I odesa - 0x0036fa06, // n0x1d4c c0x0000 (---------------) + I odessa - 0x00225403, // n0x1d4d c0x0000 (---------------) + I org - 0x00204bc2, // n0x1d4e c0x0000 (---------------) + I pl - 0x002deac7, // n0x1d4f c0x0000 (---------------) + I poltava - 0x00010c02, // n0x1d50 c0x0000 (---------------) + pp - 0x002e1ec5, // n0x1d51 c0x0000 (---------------) + I rivne - 0x00386f45, // n0x1d52 c0x0000 (---------------) + I rovno - 0x002023c2, // n0x1d53 c0x0000 (---------------) + I rv - 0x00225382, // n0x1d54 c0x0000 (---------------) + I sb - 0x0020418a, // n0x1d55 c0x0000 (---------------) + I sebastopol - 0x002462ca, // n0x1d56 c0x0000 (---------------) + I sevastopol - 0x00211982, // n0x1d57 c0x0000 (---------------) + I sm - 0x00357884, // n0x1d58 c0x0000 (---------------) + I sumy - 0x00201182, // n0x1d59 c0x0000 (---------------) + I te - 0x0030bd88, // n0x1d5a c0x0000 (---------------) + I ternopil - 0x00209182, // n0x1d5b c0x0000 (---------------) + I uz - 0x0029c988, // n0x1d5c c0x0000 (---------------) + I uzhgorod - 0x002f5447, // n0x1d5d c0x0000 (---------------) + I vinnica - 0x002f6009, // n0x1d5e c0x0000 (---------------) + I vinnytsia - 0x002e1f42, // n0x1d5f c0x0000 (---------------) + I vn - 0x002fcbc5, // n0x1d60 c0x0000 (---------------) + I volyn - 0x00283e85, // n0x1d61 c0x0000 (---------------) + I yalta - 0x002c5bcb, // n0x1d62 c0x0000 (---------------) + I zaporizhzhe - 0x002c660c, // n0x1d63 c0x0000 (---------------) + I zaporizhzhia - 0x0022dc08, // n0x1d64 c0x0000 (---------------) + I zhitomir - 0x002fcf88, // n0x1d65 c0x0000 (---------------) + I zhytomyr - 0x00261242, // n0x1d66 c0x0000 (---------------) + I zp - 0x0020f682, // n0x1d67 c0x0000 (---------------) + I zt - 0x00200342, // n0x1d68 c0x0000 (---------------) + I ac - 0x000ff148, // n0x1d69 c0x0000 (---------------) + blogspot - 0x0020a442, // n0x1d6a c0x0000 (---------------) + I co - 0x00234803, // n0x1d6b c0x0000 (---------------) + I com - 0x0020ec82, // n0x1d6c c0x0000 (---------------) + I go - 0x002030c2, // n0x1d6d c0x0000 (---------------) + I ne - 0x00200d82, // n0x1d6e c0x0000 (---------------) + I or - 0x00225403, // n0x1d6f c0x0000 (---------------) + I org - 0x00217f42, // n0x1d70 c0x0000 (---------------) + I sc - 0x00200342, // n0x1d71 c0x0000 (---------------) + I ac - 0x53e0a442, // n0x1d72 c0x014f (n0x1d7c-n0x1d7d) + I co - 0x5427c5c3, // n0x1d73 c0x0150 (n0x1d7d-n0x1d7e) + I gov - 0x0030dc03, // n0x1d74 c0x0000 (---------------) + I ltd - 0x00205d82, // n0x1d75 c0x0000 (---------------) + I me - 0x002207c3, // n0x1d76 c0x0000 (---------------) + I net - 0x0036a743, // n0x1d77 c0x0000 (---------------) + I nhs - 0x00225403, // n0x1d78 c0x0000 (---------------) + I org - 0x002db1c3, // n0x1d79 c0x0000 (---------------) + I plc - 0x00217d46, // n0x1d7a c0x0000 (---------------) + I police - 0x01617f43, // n0x1d7b c0x0005 (---------------)* o I sch - 0x000ff148, // n0x1d7c c0x0000 (---------------) + blogspot - 0x00002347, // n0x1d7d c0x0000 (---------------) + service - 0x54a02f42, // n0x1d7e c0x0152 (n0x1dbd-n0x1dc0) + I ak - 0x54e00cc2, // n0x1d7f c0x0153 (n0x1dc0-n0x1dc3) + I al - 0x55200942, // n0x1d80 c0x0154 (n0x1dc3-n0x1dc6) + I ar - 0x55603b02, // n0x1d81 c0x0155 (n0x1dc6-n0x1dc9) + I as - 0x55a06342, // n0x1d82 c0x0156 (n0x1dc9-n0x1dcc) + I az - 0x55e00e02, // n0x1d83 c0x0157 (n0x1dcc-n0x1dcf) + I ca - 0x5620a442, // n0x1d84 c0x0158 (n0x1dcf-n0x1dd2) + I co - 0x56628d42, // n0x1d85 c0x0159 (n0x1dd2-n0x1dd5) + I ct - 0x56a20502, // n0x1d86 c0x015a (n0x1dd5-n0x1dd8) + I dc - 0x56e07802, // n0x1d87 c0x015b (n0x1dd8-n0x1ddb) + I de - 0x0026df03, // n0x1d88 c0x0000 (---------------) + I dni - 0x0020e2c3, // n0x1d89 c0x0000 (---------------) + I fed - 0x572180c2, // n0x1d8a c0x015c (n0x1ddb-n0x1dde) + I fl - 0x57601b82, // n0x1d8b c0x015d (n0x1dde-n0x1de1) + I ga - 0x57a0c442, // n0x1d8c c0x015e (n0x1de1-n0x1de4) + I gu - 0x57e003c2, // n0x1d8d c0x015f (n0x1de4-n0x1de6) + I hi - 0x58201982, // n0x1d8e c0x0160 (n0x1de6-n0x1de9) + I ia - 0x5860ae82, // n0x1d8f c0x0161 (n0x1de9-n0x1dec) + I id - 0x58a02dc2, // n0x1d90 c0x0162 (n0x1dec-n0x1def) + I il - 0x58e01282, // n0x1d91 c0x0163 (n0x1def-n0x1df2) + I in - 0x001680c5, // n0x1d92 c0x0000 (---------------) + is-by - 0x00223803, // n0x1d93 c0x0000 (---------------) + I isa - 0x0028c604, // n0x1d94 c0x0000 (---------------) + I kids - 0x59265302, // n0x1d95 c0x0164 (n0x1df2-n0x1df5) + I ks - 0x59637982, // n0x1d96 c0x0165 (n0x1df5-n0x1df8) + I ky - 0x59a08942, // n0x1d97 c0x0166 (n0x1df8-n0x1dfb) + I la - 0x0007820b, // n0x1d98 c0x0000 (---------------) + land-4-sale - 0x59e00442, // n0x1d99 c0x0167 (n0x1dfb-n0x1dfe) + I ma - 0x5a64bb82, // n0x1d9a c0x0169 (n0x1e01-n0x1e04) + I md - 0x5aa05d82, // n0x1d9b c0x016a (n0x1e04-n0x1e07) + I me - 0x5ae06082, // n0x1d9c c0x016b (n0x1e07-n0x1e0a) + I mi - 0x5b220782, // n0x1d9d c0x016c (n0x1e0a-n0x1e0d) + I mn - 0x5b609502, // n0x1d9e c0x016d (n0x1e0d-n0x1e10) + I mo - 0x5ba0df02, // n0x1d9f c0x016e (n0x1e10-n0x1e13) + I ms - 0x5be07682, // n0x1da0 c0x016f (n0x1e13-n0x1e16) + I mt - 0x5c204d42, // n0x1da1 c0x0170 (n0x1e16-n0x1e19) + I nc - 0x5c600782, // n0x1da2 c0x0171 (n0x1e19-n0x1e1b) + I nd - 0x5ca030c2, // n0x1da3 c0x0172 (n0x1e1b-n0x1e1e) + I ne - 0x5ce3dc82, // n0x1da4 c0x0173 (n0x1e1e-n0x1e21) + I nh - 0x5d201042, // n0x1da5 c0x0174 (n0x1e21-n0x1e24) + I nj - 0x5d63d882, // n0x1da6 c0x0175 (n0x1e24-n0x1e27) + I nm - 0x002f66c3, // n0x1da7 c0x0000 (---------------) + I nsn - 0x5da05c42, // n0x1da8 c0x0176 (n0x1e27-n0x1e2a) + I nv - 0x5de1b502, // n0x1da9 c0x0177 (n0x1e2a-n0x1e2d) + I ny - 0x5e203242, // n0x1daa c0x0178 (n0x1e2d-n0x1e30) + I oh - 0x5e605382, // n0x1dab c0x0179 (n0x1e30-n0x1e33) + I ok - 0x5ea00d82, // n0x1dac c0x017a (n0x1e33-n0x1e36) + I or - 0x5ee06642, // n0x1dad c0x017b (n0x1e36-n0x1e39) + I pa - 0x5f207082, // n0x1dae c0x017c (n0x1e39-n0x1e3c) + I pr - 0x5f600982, // n0x1daf c0x017d (n0x1e3c-n0x1e3f) + I ri - 0x5fa17f42, // n0x1db0 c0x017e (n0x1e3f-n0x1e42) + I sc - 0x5fe5bd42, // n0x1db1 c0x017f (n0x1e42-n0x1e44) + I sd - 0x000e9f4c, // n0x1db2 c0x0000 (---------------) + stuff-4-sale - 0x6022e342, // n0x1db3 c0x0180 (n0x1e44-n0x1e47) + I tn - 0x606723c2, // n0x1db4 c0x0181 (n0x1e47-n0x1e4a) + I tx - 0x60a05a02, // n0x1db5 c0x0182 (n0x1e4a-n0x1e4d) + I ut - 0x60e00bc2, // n0x1db6 c0x0183 (n0x1e4d-n0x1e50) + I va - 0x61202402, // n0x1db7 c0x0184 (n0x1e50-n0x1e53) + I vi - 0x61673e42, // n0x1db8 c0x0185 (n0x1e53-n0x1e56) + I vt - 0x61a01c02, // n0x1db9 c0x0186 (n0x1e56-n0x1e59) + I wa - 0x61e0a082, // n0x1dba c0x0187 (n0x1e59-n0x1e5c) + I wi - 0x62274682, // n0x1dbb c0x0188 (n0x1e5c-n0x1e5d) + I wv - 0x6266d402, // n0x1dbc c0x0189 (n0x1e5d-n0x1e60) + I wy - 0x0022c882, // n0x1dbd c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dbe c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dbf c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dc0 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dc1 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dc2 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dc3 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dc4 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dc5 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dc6 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dc7 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dc8 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dc9 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dca c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dcb c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dcc c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dcd c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dce c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dcf c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dd0 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dd1 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dd2 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dd3 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dd4 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dd5 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dd6 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dd7 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dd8 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dd9 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dda c0x0000 (---------------) + I lib - 0x0022c882, // n0x1ddb c0x0000 (---------------) + I cc - 0x0032c883, // n0x1ddc c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1ddd c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dde c0x0000 (---------------) + I cc - 0x0032c883, // n0x1ddf c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1de0 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1de1 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1de2 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1de3 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1de4 c0x0000 (---------------) + I cc - 0x0026d7c3, // n0x1de5 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1de6 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1de7 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1de8 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1de9 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1dea c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1deb c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dec c0x0000 (---------------) + I cc - 0x0032c883, // n0x1ded c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dee c0x0000 (---------------) + I lib - 0x0022c882, // n0x1def c0x0000 (---------------) + I cc - 0x0032c883, // n0x1df0 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1df1 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1df2 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1df3 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1df4 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1df5 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1df6 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1df7 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1df8 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1df9 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1dfa c0x0000 (---------------) + I lib - 0x0022c882, // n0x1dfb c0x0000 (---------------) + I cc - 0x5a32c883, // n0x1dfc c0x0168 (n0x1dfe-n0x1e01) + I k12 - 0x0026d7c3, // n0x1dfd c0x0000 (---------------) + I lib - 0x003050c4, // n0x1dfe c0x0000 (---------------) + I chtr - 0x002838c6, // n0x1dff c0x0000 (---------------) + I paroch - 0x002e7783, // n0x1e00 c0x0000 (---------------) + I pvt - 0x0022c882, // n0x1e01 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e02 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e03 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e04 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e05 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e06 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e07 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e08 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e09 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e0a c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e0b c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e0c c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e0d c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e0e c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e0f c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e10 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e11 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e12 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e13 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e14 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e15 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e16 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e17 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e18 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e19 c0x0000 (---------------) + I cc - 0x0026d7c3, // n0x1e1a c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e1b c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e1c c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e1d c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e1e c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e1f c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e20 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e21 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e22 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e23 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e24 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e25 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e26 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e27 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e28 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e29 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e2a c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e2b c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e2c c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e2d c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e2e c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e2f c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e30 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e31 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e32 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e33 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e34 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e35 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e36 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e37 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e38 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e39 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e3a c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e3b c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e3c c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e3d c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e3e c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e3f c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e40 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e41 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e42 c0x0000 (---------------) + I cc - 0x0026d7c3, // n0x1e43 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e44 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e45 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e46 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e47 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e48 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e49 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e4a c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e4b c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e4c c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e4d c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e4e c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e4f c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e50 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e51 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e52 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e53 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e54 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e55 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e56 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e57 c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e58 c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e59 c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e5a c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e5b c0x0000 (---------------) + I lib - 0x0022c882, // n0x1e5c c0x0000 (---------------) + I cc - 0x0022c882, // n0x1e5d c0x0000 (---------------) + I cc - 0x0032c883, // n0x1e5e c0x0000 (---------------) + I k12 - 0x0026d7c3, // n0x1e5f c0x0000 (---------------) + I lib - 0x62e34803, // n0x1e60 c0x018b (n0x1e66-n0x1e67) + I com - 0x0023a1c3, // n0x1e61 c0x0000 (---------------) + I edu - 0x00254603, // n0x1e62 c0x0000 (---------------) + I gub - 0x002119c3, // n0x1e63 c0x0000 (---------------) + I mil - 0x002207c3, // n0x1e64 c0x0000 (---------------) + I net - 0x00225403, // n0x1e65 c0x0000 (---------------) + I org - 0x000ff148, // n0x1e66 c0x0000 (---------------) + blogspot - 0x0020a442, // n0x1e67 c0x0000 (---------------) + I co - 0x00234803, // n0x1e68 c0x0000 (---------------) + I com - 0x002207c3, // n0x1e69 c0x0000 (---------------) + I net - 0x00225403, // n0x1e6a c0x0000 (---------------) + I org - 0x00234803, // n0x1e6b c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1e6c c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1e6d c0x0000 (---------------) + I gov - 0x002119c3, // n0x1e6e c0x0000 (---------------) + I mil - 0x002207c3, // n0x1e6f c0x0000 (---------------) + I net - 0x00225403, // n0x1e70 c0x0000 (---------------) + I org - 0x0024a4c4, // n0x1e71 c0x0000 (---------------) + I arts - 0x0020a442, // n0x1e72 c0x0000 (---------------) + I co - 0x00234803, // n0x1e73 c0x0000 (---------------) + I com - 0x00334603, // n0x1e74 c0x0000 (---------------) + I e12 - 0x0023a1c3, // n0x1e75 c0x0000 (---------------) + I edu - 0x0024bac4, // n0x1e76 c0x0000 (---------------) + I firm - 0x00211543, // n0x1e77 c0x0000 (---------------) + I gob - 0x0027c5c3, // n0x1e78 c0x0000 (---------------) + I gov - 0x00201804, // n0x1e79 c0x0000 (---------------) + I info - 0x002014c3, // n0x1e7a c0x0000 (---------------) + I int - 0x002119c3, // n0x1e7b c0x0000 (---------------) + I mil - 0x002207c3, // n0x1e7c c0x0000 (---------------) + I net - 0x00225403, // n0x1e7d c0x0000 (---------------) + I org - 0x00229943, // n0x1e7e c0x0000 (---------------) + I rec - 0x00363685, // n0x1e7f c0x0000 (---------------) + I store - 0x00242cc3, // n0x1e80 c0x0000 (---------------) + I tec - 0x00221cc3, // n0x1e81 c0x0000 (---------------) + I web - 0x0020a442, // n0x1e82 c0x0000 (---------------) + I co - 0x00234803, // n0x1e83 c0x0000 (---------------) + I com - 0x0032c883, // n0x1e84 c0x0000 (---------------) + I k12 - 0x002207c3, // n0x1e85 c0x0000 (---------------) + I net - 0x00225403, // n0x1e86 c0x0000 (---------------) + I org - 0x00200342, // n0x1e87 c0x0000 (---------------) + I ac - 0x0032bd03, // n0x1e88 c0x0000 (---------------) + I biz - 0x000ff148, // n0x1e89 c0x0000 (---------------) + blogspot - 0x00234803, // n0x1e8a c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1e8b c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1e8c c0x0000 (---------------) + I gov - 0x002ad786, // n0x1e8d c0x0000 (---------------) + I health - 0x00201804, // n0x1e8e c0x0000 (---------------) + I info - 0x002014c3, // n0x1e8f c0x0000 (---------------) + I int - 0x00207d04, // n0x1e90 c0x0000 (---------------) + I name - 0x002207c3, // n0x1e91 c0x0000 (---------------) + I net - 0x00225403, // n0x1e92 c0x0000 (---------------) + I org - 0x002210c3, // n0x1e93 c0x0000 (---------------) + I pro - 0x00234803, // n0x1e94 c0x0000 (---------------) + I com - 0x0023a1c3, // n0x1e95 c0x0000 (---------------) + I edu - 0x002207c3, // n0x1e96 c0x0000 (---------------) + I net - 0x00225403, // n0x1e97 c0x0000 (---------------) + I org - 0x00234803, // n0x1e98 c0x0000 (---------------) + I com - 0x00012586, // n0x1e99 c0x0000 (---------------) + dyndns - 0x0023a1c3, // n0x1e9a c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1e9b c0x0000 (---------------) + I gov - 0x000d21c6, // n0x1e9c c0x0000 (---------------) + mypets - 0x002207c3, // n0x1e9d c0x0000 (---------------) + I net - 0x00225403, // n0x1e9e c0x0000 (---------------) + I org - 0x00307ac8, // n0x1e9f c0x0000 (---------------) + I xn--80au - 0x00315709, // n0x1ea0 c0x0000 (---------------) + I xn--90azh - 0x0031fdc9, // n0x1ea1 c0x0000 (---------------) + I xn--c1avg - 0x0032cd08, // n0x1ea2 c0x0000 (---------------) + I xn--d1at - 0x003719c8, // n0x1ea3 c0x0000 (---------------) + I xn--o1ac - 0x003719c9, // n0x1ea4 c0x0000 (---------------) + I xn--o1ach - 0x00200342, // n0x1ea5 c0x0000 (---------------) + I ac - 0x0026ff85, // n0x1ea6 c0x0000 (---------------) + I agric - 0x0023a803, // n0x1ea7 c0x0000 (---------------) + I alt - 0x6560a442, // n0x1ea8 c0x0195 (n0x1eb6-n0x1eb7) + I co - 0x0023a1c3, // n0x1ea9 c0x0000 (---------------) + I edu - 0x0027c5c3, // n0x1eaa c0x0000 (---------------) + I gov - 0x00379087, // n0x1eab c0x0000 (---------------) + I grondar - 0x0026d383, // n0x1eac c0x0000 (---------------) + I law - 0x002119c3, // n0x1ead c0x0000 (---------------) + I mil - 0x002207c3, // n0x1eae c0x0000 (---------------) + I net - 0x00230743, // n0x1eaf c0x0000 (---------------) + I ngo - 0x0020e5c3, // n0x1eb0 c0x0000 (---------------) + I nis - 0x00201343, // n0x1eb1 c0x0000 (---------------) + I nom - 0x00225403, // n0x1eb2 c0x0000 (---------------) + I org - 0x0022ec46, // n0x1eb3 c0x0000 (---------------) + I school - 0x00200c42, // n0x1eb4 c0x0000 (---------------) + I tm - 0x00221cc3, // n0x1eb5 c0x0000 (---------------) + I web - 0x000ff148, // n0x1eb6 c0x0000 (---------------) + blogspot + 0x31a403, + 0x284944, + 0x2dd106, + 0x3706c3, + 0x3706c6, + 0x398706, + 0x3a8103, + 0x2fe244, + 0x38e987, + 0x2dcd48, + 0x1a05702, + 0x316e87, + 0x35c789, + 0x2abb0a, + 0x2abb0b, + 0x22f383, + 0x287506, + 0x232dc5, + 0x1e021c2, + 0x2161c4, + 0x238743, + 0x26fc45, + 0x2214902, + 0x347743, + 0x266f744, + 0x33ddc5, + 0x2a04702, + 0x376b4e, + 0x24c4c3, + 0x38ae46, + 0x2e00142, + 0x2dd287, + 0x236f46, + 0x3209282, + 0x229d83, + 0x24d9c4, + 0x325e86, + 0x26c588, + 0x2761c6, + 0x2011c4, + 0x3600242, + 0x3335c9, + 0x20a1c7, + 0x351e86, + 0x330c89, + 0x298308, + 0x26e904, + 0x241ec6, + 0x222a46, + 0x3a022c2, + 0x26480f, + 0x20948e, + 0x211d04, + 0x2c2b85, + 0x2fe145, + 0x39e189, + 0x23c409, + 0x349a87, + 0x20fa86, + 0x275a83, + 0x3e02a82, + 0x315503, + 0x34e24a, + 0x20f903, + 0x2af985, + 0x284202, + 0x284209, + 0x4200ec2, + 0x212484, + 0x2b9686, + 0x2f3645, + 0x3552c4, + 0x4a05644, + 0x2030c3, + 0x232344, + 0x4e00c02, + 0x268d44, + 0x52ef6c4, + 0x25ef4a, + 0x5603dc2, + 0x2ba587, + 0x2f3b08, + 0x6208142, + 0x311687, + 0x2bf204, + 0x2bf207, + 0x36e0c5, + 0x34ffc7, + 0x349846, + 0x24f3c4, + 0x38c105, + 0x29e447, + 0x72001c2, + 0x26e503, + 0x200b82, + 0x200b83, + 0x760de02, + 0x2102c5, + 0x7a02a42, + 0x350e04, + 0x2734c5, + 0x211c47, + 0x26bcce, + 0x2b9184, + 0x245544, + 0x202f03, + 0x281d49, + 0x31ee0b, + 0x2e9a88, + 0x379948, + 0x3a9908, + 0x22ae48, + 0x330aca, + 0x34fec7, + 0x318186, + 0x7e87002, + 0x35e203, + 0x367e43, + 0x36f4c4, + 0x3a8143, + 0x3250c3, + 0x1720b82, + 0x8202502, + 0x27a8c5, + 0x296206, + 0x2d1b84, + 0x375487, + 0x2e1886, + 0x331f84, + 0x39d3c7, + 0x203bc3, + 0x86c54c2, + 0x8b0f242, + 0x8e16742, + 0x216746, + 0x9200002, + 0x3523c5, + 0x3220c3, + 0x200604, + 0x2e8f84, + 0x2e8f85, + 0x206b43, + 0x978d2c3, + 0x9a0bb42, + 0x289e05, + 0x289e0b, + 0x31e686, + 0x20cb4b, + 0x221344, + 0x20d949, + 0x20e9c4, + 0x9e0ec02, + 0x20f143, + 0x20f403, + 0x16105c2, + 0x268183, + 0x2105ca, + 0xa20b382, + 0x216445, + 0x29224a, + 0x2d7744, + 0x283783, + 0x26cfc4, + 0x212543, + 0x212544, + 0x212547, + 0x2140c5, + 0x2147c5, + 0x214f46, + 0x2157c6, + 0x216a03, + 0x21ae88, + 0x210043, + 0xa601c02, + 0x243448, + 0x213ccb, + 0x220148, + 0x220d86, + 0x221847, + 0x225348, + 0xb642b42, + 0xbabf3c2, + 0x326788, + 0x35e4c7, + 0x246085, + 0x357f48, + 0x2bd408, + 0x34dd83, + 0x22a1c4, + 0x36f502, + 0xbe2bc82, + 0xc238482, + 0xca2e802, + 0x22e803, + 0xce01ec2, + 0x2fe203, + 0x2f1e84, + 0x201ec3, + 0x26e8c4, + 0x201ecb, + 0x213c03, + 0x2de946, + 0x239f84, + 0x29034e, + 0x371145, + 0x38af48, + 0x31ffc7, + 0x31ffca, + 0x229743, + 0x22f147, + 0x31efc5, + 0x22f8c4, + 0x265b06, + 0x265b07, + 0x2c11c4, + 0x2f7a87, + 0x313d44, + 0x26c004, + 0x26c006, + 0x387184, + 0x3510c6, + 0x203f83, + 0x35e288, + 0x203f88, + 0x245503, + 0x268143, + 0x399a04, + 0x39e003, + 0xd219f02, + 0xd6d6a42, + 0x20bac3, + 0x207146, + 0x241fc3, + 0x377cc4, + 0xdaee982, + 0x3af843, + 0x3507c3, + 0x217a02, + 0xde04142, + 0x2c1946, + 0x233ac7, + 0x2e8945, + 0x37de04, + 0x28c505, + 0x268907, + 0x267805, + 0x2b8649, + 0x2cefc6, + 0x2daa88, + 0x2e8846, + 0xe21a1c2, + 0x32ca08, + 0x2f1c46, + 0x21a1c5, + 0x2f6d87, + 0x309984, + 0x309985, + 0x276384, + 0x276388, + 0xe60cc02, + 0xea09882, + 0x3103c6, + 0x3b8988, + 0x334385, + 0x337306, + 0x342f08, + 0x344a88, + 0xee09885, + 0xf2142c4, + 0x3b0787, + 0xf60e5c2, + 0xfa1b102, + 0x10a099c2, + 0x2b9785, + 0x2a2645, + 0x2fef86, + 0x3b2547, + 0x380747, + 0x112a84c3, + 0x2a84c7, + 0x31eb08, + 0x376ec9, + 0x376d07, + 0x384d07, + 0x3a8ec8, + 0x3ad4c6, + 0x22f3c6, + 0x23000c, + 0x23120a, + 0x231687, + 0x232c8b, + 0x233907, + 0x23390e, + 0x234cc4, + 0x235ac4, + 0x237a47, + 0x3690c7, + 0x23b206, + 0x23b207, + 0x23b4c7, + 0x19604682, + 0x23c886, + 0x23c88a, + 0x23ce8b, + 0x23dbc7, + 0x23ed45, + 0x23f083, + 0x240586, + 0x240587, + 0x38eb43, + 0x19a0c442, + 0x240f4a, + 0x19f5d882, + 0x1a2a5e02, + 0x1a643142, + 0x1aa2cd82, + 0x244bc5, + 0x245304, + 0x1b205742, + 0x268dc5, + 0x23d483, + 0x20eac5, + 0x22ad44, + 0x206804, + 0x314046, + 0x25e206, + 0x28a003, + 0x238284, + 0x3a6803, + 0x1b600dc2, + 0x391c04, + 0x391c06, + 0x3b0d05, + 0x205e06, + 0x2f6e88, + 0x266e84, + 0x27ed08, + 0x2426c5, + 0x228308, + 0x29ff86, + 0x237587, + 0x22e204, + 0x22e206, + 0x33f443, + 0x383ec3, + 0x223d08, + 0x318dc4, + 0x348747, + 0x23e6c6, + 0x2d6389, + 0x250348, + 0x26cd08, + 0x26d084, + 0x351443, + 0x225e02, + 0x1c60f882, + 0x1ca10e82, + 0x3a7403, + 0x1ce04a42, + 0x38eac4, + 0x2862c6, + 0x26e605, + 0x21ba03, + 0x232884, + 0x2b14c7, + 0x33da03, + 0x231a88, + 0x208545, + 0x36e803, + 0x273445, + 0x273584, + 0x2f6a86, + 0x209ec4, + 0x211346, + 0x211b86, + 0x3916c4, + 0x213b43, + 0x1d205882, + 0x247345, + 0x221c03, + 0x1d61b0c2, + 0x22ffc3, + 0x209bc5, + 0x232403, + 0x232409, + 0x1da05f02, + 0x1e205e42, + 0x2893c5, + 0x218786, + 0x2d1746, + 0x2b0a88, + 0x2b0a8b, + 0x20718b, + 0x2e8b45, + 0x2db145, + 0x2c6309, + 0x1600302, + 0x391888, + 0x20dc44, + 0x1ea007c2, + 0x3a7883, + 0x1f2c6086, + 0x20ae88, + 0x1f601402, + 0x2344c8, + 0x1fa2bb82, + 0x3b92ca, + 0x1feccc43, + 0x3ac1c6, + 0x3af408, + 0x3ac008, + 0x31d006, + 0x36bc07, + 0x264a07, + 0x3349ca, + 0x2d77c4, + 0x3474c4, + 0x35c1c9, + 0x20794385, + 0x209686, + 0x20e1c3, + 0x24a044, + 0x20a02644, + 0x202647, + 0x212fc7, + 0x22a584, + 0x285445, + 0x2ff048, + 0x366747, + 0x370f07, + 0x20e18342, + 0x327704, + 0x292b48, + 0x245bc4, + 0x247784, + 0x248085, + 0x2481c7, + 0x223589, + 0x248fc4, + 0x249709, + 0x249948, + 0x249dc4, + 0x249dc7, + 0x2124aa83, + 0x24ad47, + 0x1609d02, + 0x16ad202, + 0x24bec6, + 0x24c507, + 0x24cd44, + 0x24e6c7, + 0x24fa47, + 0x24fdc3, + 0x248902, + 0x229642, + 0x250a03, + 0x250a04, + 0x250a0b, + 0x379a48, + 0x256804, + 0x2523c5, + 0x254007, + 0x2555c5, + 0x2bc00a, + 0x256743, + 0x2160fc82, + 0x226e84, + 0x258d89, + 0x25c343, + 0x25c407, + 0x24a849, + 0x282688, + 0x204743, + 0x278fc7, + 0x279709, + 0x268ac3, + 0x2810c4, + 0x283c89, + 0x2880c6, + 0x289683, + 0x200182, + 0x21f983, + 0x3a8a87, + 0x21f985, + 0x379746, + 0x256e84, + 0x302e85, + 0x2e4403, + 0x216c46, + 0x20db42, + 0x395144, + 0x221402, + 0x221403, + 0x21a00782, + 0x247303, + 0x215c44, + 0x215c47, + 0x200906, + 0x202602, + 0x21e025c2, + 0x2dca84, + 0x22235e82, + 0x22600b02, + 0x2d4f84, + 0x2d4f85, + 0x2b6dc5, + 0x390e06, + 0x22a05d42, + 0x205d45, + 0x20cf05, + 0x20ae03, + 0x210986, + 0x2126c5, + 0x2166c2, + 0x343605, + 0x2166c4, + 0x221ec3, + 0x227343, + 0x22e0c642, + 0x2d4987, + 0x3669c4, + 0x3669c9, + 0x249f44, + 0x291d43, + 0x2f6609, + 0x367508, + 0x232a24c4, + 0x2a24c6, + 0x21c303, + 0x247bc3, + 0x2e9dc3, + 0x236eb382, + 0x368cc2, + 0x23a05e82, + 0x323cc8, + 0x32a388, + 0x398e46, + 0x2e27c5, + 0x22efc5, + 0x352ec7, + 0x21d205, + 0x228782, + 0x23e38182, + 0x1603002, + 0x2416c8, + 0x32c945, + 0x2e3404, + 0x2ebac5, + 0x23f407, + 0x3207c4, + 0x240e42, + 0x24200582, + 0x338984, + 0x212cc7, + 0x28a2c7, + 0x34ff84, + 0x292203, + 0x245444, + 0x245448, + 0x22f706, + 0x26598a, + 0x223444, + 0x292588, + 0x288504, + 0x221946, + 0x294684, + 0x2b9a86, + 0x366c89, + 0x25da47, + 0x3375c3, + 0x24667e42, + 0x267e43, + 0x20ee02, + 0x24a11ec2, + 0x3085c6, + 0x365c88, + 0x2a4087, + 0x3a3f49, + 0x291c49, + 0x2a5045, + 0x2a6049, + 0x2a6805, + 0x2a6949, + 0x2a8005, + 0x2a9108, + 0x21fb84, + 0x24e890c7, + 0x2a9303, + 0x2a9307, + 0x3850c6, + 0x2a9b87, + 0x2a1085, + 0x2935c3, + 0x2521ae02, + 0x3b40c4, + 0x2562ce82, + 0x258203, + 0x25a17f42, + 0x36d586, + 0x2f3a85, + 0x2ac207, + 0x26cc43, + 0x325044, + 0x20e903, + 0x33e783, + 0x25e02bc2, + 0x266015c2, + 0x398804, + 0x2488c3, + 0x243c85, + 0x26a029c2, + 0x27206482, + 0x2b4506, + 0x318f04, + 0x2e3004, + 0x2e300a, + 0x27a01fc2, + 0x37204a, + 0x3756c8, + 0x27fb1384, + 0x20ad83, + 0x201fc3, + 0x3a9a49, + 0x217649, + 0x285246, + 0x28244183, + 0x3292c5, + 0x30180d, + 0x375886, + 0x3bac8b, + 0x28602e82, + 0x22c1c8, + 0x29206e82, + 0x29606fc2, + 0x2ae585, + 0x29a03942, + 0x258447, + 0x21c907, + 0x21e003, + 0x2306c8, + 0x29e06502, + 0x312684, + 0x212943, + 0x351d45, + 0x34db83, + 0x2f3546, + 0x205904, + 0x268103, + 0x2ae9c3, + 0x2a205fc2, + 0x2e8ac4, + 0x35f6c5, + 0x39f1c7, + 0x275643, + 0x2ad883, + 0x2ae083, + 0x160fec2, + 0x2ae143, + 0x2ae943, + 0x2a605102, + 0x282104, + 0x25e406, + 0x342643, + 0x2aec43, + 0x2aaafd42, + 0x2afd48, + 0x2b0004, + 0x36c246, + 0x2b0387, + 0x249c46, + 0x28e2c4, + 0x38600682, + 0x384f8b, + 0x2fb08e, + 0x21930f, + 0x2985c3, + 0x38ebbbc2, + 0x1600f42, + 0x39201582, + 0x28f403, + 0x2fdec3, + 0x233706, + 0x277c46, + 0x3afd87, + 0x3328c4, + 0x396188c2, + 0x39a08882, + 0x348345, + 0x2e6047, + 0x3b5746, + 0x39e27282, + 0x227284, + 0x2b3ac3, + 0x3a20be02, + 0x3a759ec3, + 0x2b4c44, + 0x2be409, + 0x16c3ac2, + 0x3aa03a82, + 0x203a85, + 0x3aec3d42, + 0x3b203202, + 0x346947, + 0x239689, + 0x35ca0b, + 0x2647c5, + 0x2c4849, + 0x2e8246, + 0x31e6c7, + 0x3b608484, + 0x3199c9, + 0x373487, + 0x20ab47, + 0x20a383, + 0x20a386, + 0x3b68c7, + 0x206a43, + 0x2565c6, + 0x3be02a02, + 0x3c232682, + 0x385803, + 0x324c45, + 0x350f47, + 0x250086, + 0x21f905, + 0x277d44, + 0x2c9fc5, + 0x2f2684, + 0x3c6040c2, + 0x331107, + 0x2dbd44, + 0x217544, + 0x21754d, + 0x257509, + 0x3a4448, + 0x253944, + 0x3abc45, + 0x206447, + 0x2144c4, + 0x2e1947, + 0x21c485, + 0x3caa4604, + 0x2d92c5, + 0x25b004, + 0x24bb86, + 0x3b2345, + 0x3ce250c2, + 0x283844, + 0x283845, + 0x36fa46, + 0x20c3c5, + 0x30c304, + 0x2c5dc3, + 0x2053c6, + 0x358505, + 0x2bb485, + 0x3b2444, + 0x2234c3, + 0x2234cc, + 0x3d288a02, + 0x3d6010c2, + 0x3da00282, + 0x206343, + 0x206344, + 0x3de04bc2, + 0x2f9688, + 0x379805, + 0x235684, + 0x23b086, + 0x3e201f42, + 0x3e609782, + 0x3ea00e82, + 0x306b85, + 0x391586, + 0x211084, + 0x3263c6, + 0x2ba346, + 0x219943, + 0x3ef0de0a, + 0x247b05, + 0x2c8e83, + 0x223186, + 0x300fc9, + 0x223187, + 0x297788, + 0x2981c9, + 0x224348, + 0x229486, + 0x20bf03, + 0x3f2a8542, + 0x385683, + 0x385689, + 0x332448, + 0x3f649a02, + 0x3fa02342, + 0x227f83, + 0x2da905, + 0x251ec4, + 0x2c0909, + 0x22cb84, + 0x266348, + 0x202343, + 0x202344, + 0x278b03, + 0x2187c8, + 0x217487, + 0x4020b102, + 0x274082, + 0x351905, + 0x266689, + 0x209703, + 0x27b184, + 0x329284, + 0x2064c3, + 0x27c3ca, + 0x40752bc2, + 0x40a83802, + 0x2c5443, + 0x3739c3, + 0x1602302, + 0x38ac03, + 0x40e0f242, + 0x4120ec42, + 0x41610444, + 0x210446, + 0x383b06, + 0x26ad44, + 0x36c643, + 0x38bcc3, + 0x226883, + 0x23d206, + 0x2cb8c5, + 0x2c5a07, + 0x31e589, + 0x2ca645, + 0x2cb806, + 0x2cbd88, + 0x2cbf86, + 0x236a04, + 0x29944b, + 0x2ceac3, + 0x2ceac5, + 0x2cec08, + 0x228502, + 0x346c42, + 0x41a44c42, + 0x41e0e602, + 0x218903, + 0x422675c2, + 0x2675c3, + 0x2cef04, + 0x2cf5c3, + 0x42a115c2, + 0x42ed43c6, + 0x2a7306, + 0x43207902, + 0x4360f442, + 0x43a27382, + 0x43e02c82, + 0x4422dd02, + 0x44602d02, + 0x234703, + 0x390685, + 0x319606, + 0x44a11cc4, + 0x3b0b0a, + 0x32fe86, + 0x2e8d84, + 0x281d03, + 0x45604642, + 0x200c82, + 0x25fd03, + 0x45a05503, + 0x2c7b87, + 0x3b2247, + 0x47250b07, + 0x312d87, + 0x227b03, + 0x227b0a, + 0x236b84, + 0x23e5c4, + 0x23e5ca, + 0x213f05, + 0x47609642, + 0x24e683, + 0x47a008c2, + 0x21c2c3, + 0x267e03, + 0x48203342, + 0x2a8444, + 0x21de84, + 0x3b9505, + 0x305005, + 0x2e1ac6, + 0x2e1e46, + 0x48608442, + 0x48a033c2, + 0x3185c5, + 0x2a7012, + 0x2511c6, + 0x220803, + 0x30a746, + 0x220805, + 0x1610602, + 0x50e120c2, + 0x353e83, + 0x2120c3, + 0x2441c3, + 0x512023c2, + 0x376e43, + 0x5160b482, + 0x210483, + 0x282148, + 0x25e983, + 0x25e986, + 0x3a2987, + 0x306806, + 0x30680b, + 0x2e8cc7, + 0x3b3ec4, + 0x51e04ec2, + 0x379685, + 0x522054c3, + 0x2a6e03, + 0x326c05, + 0x329983, + 0x52729986, + 0x391a0a, + 0x26a9c3, + 0x204584, + 0x3b88c6, + 0x21a5c6, + 0x52a00983, + 0x324f07, + 0x285147, + 0x29b0c5, + 0x2318c6, + 0x224a83, + 0x54a10bc3, + 0x54e056c2, + 0x328144, + 0x22a2cc, + 0x236149, + 0x2414c7, + 0x249245, + 0x262a84, + 0x273cc8, + 0x278305, + 0x55284a05, + 0x28c609, + 0x351f43, + 0x2a5d84, + 0x556013c2, + 0x2013c3, + 0x55a94142, + 0x2a4386, + 0x160f982, + 0x55e06e02, + 0x306a88, + 0x2be603, + 0x2d9207, + 0x2e4d05, + 0x2dd685, + 0x32840b, + 0x2dd686, + 0x328606, + 0x2ffac6, + 0x262c84, + 0x3042c6, + 0x2e3508, + 0x23a043, + 0x250dc3, + 0x250dc4, + 0x2e4484, + 0x2e4a07, + 0x2e5ec5, + 0x562e6002, + 0x5660ba02, + 0x20ba05, + 0x2e83c4, + 0x2e83cb, + 0x2e8e88, + 0x228f44, + 0x2272c2, + 0x56e28ec2, + 0x23b903, + 0x2e9344, + 0x2e9605, + 0x2ea047, + 0x2eb604, + 0x2e8b84, + 0x57201302, + 0x360cc9, + 0x2ec405, + 0x264a85, + 0x2ecf85, + 0x57601303, + 0x2ee0c4, + 0x2ee0cb, + 0x2ee644, + 0x2ef3cb, + 0x2ef7c5, + 0x21944a, + 0x2f0048, + 0x2f024a, + 0x2f0ac3, + 0x2f0aca, + 0x57a01742, + 0x57e2d4c2, + 0x21aa03, + 0x582f1bc2, + 0x2f1bc3, + 0x5875c402, + 0x58b22842, + 0x2f2504, + 0x21afc6, + 0x326105, + 0x2f4503, + 0x31a9c6, + 0x204405, + 0x25e704, + 0x58e05ec2, + 0x2c9244, + 0x2c5f8a, + 0x22d787, + 0x2f38c6, + 0x380b07, + 0x22a403, + 0x283e48, + 0x37f48b, + 0x3736c5, + 0x333ec5, + 0x333ec6, + 0x390884, + 0x3aa248, + 0x222943, + 0x222944, + 0x222947, + 0x38e446, + 0x352686, + 0x29018a, + 0x246604, + 0x24660a, + 0x59282846, + 0x282847, + 0x252447, + 0x270844, + 0x270849, + 0x25e0c5, + 0x235e0b, + 0x2e81c3, + 0x211503, + 0x22f003, + 0x22fac4, + 0x59600482, + 0x25d4c6, + 0x293345, + 0x30a985, + 0x24f6c6, + 0x3395c4, + 0x59a02782, + 0x23f0c4, + 0x59e01c42, + 0x2b9f05, + 0x21ad84, + 0x21bec3, + 0x5a612102, + 0x212103, + 0x23ba46, + 0x5aa03082, + 0x27f488, + 0x223004, + 0x223006, + 0x374246, + 0x2540c4, + 0x205345, + 0x2141c8, + 0x216547, + 0x219687, + 0x21968f, + 0x292a46, + 0x22cf03, + 0x22cf04, + 0x310504, + 0x20d003, + 0x221a84, + 0x240944, + 0x5ae42b02, + 0x289d43, + 0x242b03, + 0x5b209842, + 0x229f83, + 0x38eb83, + 0x21484a, + 0x358107, + 0x2efc0c, + 0x2efec6, + 0x30a146, + 0x248547, + 0x5b64c687, + 0x24f809, + 0x243584, + 0x24fbc4, + 0x5ba18942, + 0x5be027c2, + 0x290546, + 0x324d04, + 0x2d6bc6, + 0x2a5148, + 0x3b8dc4, + 0x258486, + 0x2d1705, + 0x265c88, + 0x207383, + 0x273705, + 0x273e83, + 0x264b83, + 0x264b84, + 0x2759c3, + 0x5c2ec082, + 0x5c602e02, + 0x2e8089, + 0x278205, + 0x278404, + 0x27a9c5, + 0x20dd44, + 0x2e0d07, + 0x343bc5, + 0x250cc4, + 0x250cc8, + 0x2d5086, + 0x2d7984, + 0x2d8e88, + 0x2dbb87, + 0x5ca03902, + 0x2e36c4, + 0x20d0c4, + 0x20ad47, + 0x5ce2b804, + 0x2ccf42, + 0x5d201102, + 0x201543, + 0x203984, + 0x2aa283, + 0x374e05, + 0x5d61e182, + 0x2eb285, + 0x202c42, + 0x34d5c5, + 0x365e45, + 0x5da00c42, + 0x350744, + 0x5de00d02, + 0x2387c6, + 0x29a146, + 0x2667c8, + 0x2bfa08, + 0x36d504, + 0x36d6c5, + 0x3610c9, + 0x2db1c4, + 0x3919c4, + 0x205183, + 0x5e222705, + 0x2c3b87, + 0x2a2744, + 0x341e8d, + 0x361782, + 0x361783, + 0x364503, + 0x5e600802, + 0x388305, + 0x25f9c7, + 0x205b44, + 0x312e47, + 0x2983c9, + 0x2c60c9, + 0x2519c7, + 0x273b03, + 0x273b08, + 0x2ed249, + 0x24e187, + 0x373605, + 0x39e086, + 0x39fb86, + 0x3a3c05, + 0x257605, + 0x5ea02d82, + 0x36ce45, + 0x2b2908, + 0x2c1706, + 0x5eeb7487, + 0x2efa04, + 0x2aa987, + 0x2f62c6, + 0x5f230982, + 0x36f746, + 0x2f83ca, + 0x2f8e85, + 0x5f6de402, + 0x5fa36542, + 0x3b6c06, + 0x2a1e88, + 0x5fe8a487, + 0x60234e42, + 0x2255c3, + 0x311d86, + 0x225044, + 0x3a2846, + 0x390b06, + 0x26ff0a, + 0x331c05, + 0x367ec6, + 0x3759c3, + 0x3759c4, + 0x207102, + 0x309943, + 0x60606382, + 0x2f0f83, + 0x3722c4, + 0x2a1fc4, + 0x2a1fca, + 0x229543, + 0x276288, + 0x22954a, + 0x27b447, + 0x2fcd86, + 0x238684, + 0x290bc2, + 0x2a2e82, + 0x60a04002, + 0x245403, + 0x252207, + 0x31ac87, + 0x2848c4, + 0x26f8c7, + 0x2ea146, + 0x216847, + 0x35e604, + 0x242a05, + 0x2b7985, + 0x60e0fe82, + 0x20fe86, + 0x218283, + 0x220502, + 0x220506, + 0x61203e02, + 0x6160b0c2, + 0x3ba785, + 0x61a21c82, + 0x61e03b42, + 0x33b5c5, + 0x393105, + 0x367f85, + 0x267303, + 0x286385, + 0x2dd747, + 0x307bc5, + 0x306185, + 0x38b044, + 0x3204c6, + 0x23e804, + 0x62201442, + 0x62f630c5, + 0x2ebe07, + 0x2d6dc8, + 0x25fe86, + 0x25fe8d, + 0x260709, + 0x260712, + 0x32f345, + 0x3339c3, + 0x6320a9c2, + 0x309444, + 0x375903, + 0x360fc5, + 0x2fa085, + 0x63612982, + 0x36e843, + 0x63a50b82, + 0x642bf542, + 0x6460fb42, + 0x353805, + 0x37ac43, + 0x37a4c8, + 0x64a07842, + 0x64e000c2, + 0x2a8406, + 0x33b80a, + 0x21bf03, + 0x20c343, + 0x2ee3c3, + 0x65a02dc2, + 0x73e35482, + 0x74601c82, + 0x201682, + 0x36f549, + 0x2c2f04, + 0x2309c8, + 0x74af4542, + 0x74e08602, + 0x2ef605, + 0x2330c8, + 0x282288, + 0x2f858c, + 0x22d543, + 0x25a9c2, + 0x75201f82, + 0x2caac6, + 0x2fdc05, + 0x26d343, + 0x23cc46, + 0x2fdd46, + 0x201f83, + 0x2ff883, + 0x300786, + 0x3013c4, + 0x295586, + 0x2cec85, + 0x30164a, + 0x2eebc4, + 0x302304, + 0x30370a, + 0x7566b082, + 0x337745, + 0x30478a, + 0x305285, + 0x305b44, + 0x305c46, + 0x305dc4, + 0x218dc6, + 0x75a6dac2, + 0x2f3206, + 0x2f3dc5, + 0x3ab6c7, + 0x200206, + 0x248744, + 0x2d5e07, + 0x30dd46, + 0x2b8a45, + 0x381947, + 0x39eb47, + 0x39eb4e, + 0x25ed06, + 0x2e1805, + 0x27dec7, + 0x282b43, + 0x3b2f87, + 0x20f5c5, + 0x212144, + 0x212f82, + 0x3addc7, + 0x332944, + 0x377404, + 0x273f0b, + 0x21d5c3, + 0x2b6987, + 0x21d5c4, + 0x2cc0c7, + 0x228bc3, + 0x33678d, + 0x388b48, + 0x21d044, + 0x250bc5, + 0x307d05, + 0x308143, + 0x75e22f02, + 0x309903, + 0x309fc3, + 0x210004, + 0x279805, + 0x218307, + 0x375a46, + 0x372003, + 0x23ab4b, + 0x26ba4b, + 0x2a654b, + 0x2de44a, + 0x30254b, + 0x31be8b, + 0x356b8c, + 0x378d11, + 0x3b654a, + 0x3ba10b, + 0x30ad8b, + 0x30b34a, + 0x30b88a, + 0x30cb4e, + 0x30d18b, + 0x30d44a, + 0x30ef11, + 0x30f34a, + 0x30f84b, + 0x30fd8e, + 0x31078c, + 0x310c4b, + 0x310f0e, + 0x31128c, + 0x31474a, + 0x31698c, + 0x76316c8a, + 0x317489, + 0x31af4a, + 0x31b1ca, + 0x31b44b, + 0x31f60e, + 0x31f991, + 0x328b89, + 0x328dca, + 0x3295cb, + 0x32a84a, + 0x32b316, + 0x32e14b, + 0x32f10a, + 0x32f50a, + 0x33084b, + 0x333449, + 0x337109, + 0x337d4d, + 0x33870b, + 0x33978b, + 0x33a14b, + 0x33a609, + 0x33ac4e, + 0x33b30a, + 0x33fc8a, + 0x33ffca, + 0x340b8b, + 0x3413cb, + 0x34168d, + 0x342c0d, + 0x343290, + 0x34374b, + 0x34408c, + 0x34480b, + 0x34644b, + 0x34798b, + 0x34c00b, + 0x34ca8f, + 0x34ce4b, + 0x34d94a, + 0x34e689, + 0x34f409, + 0x34f8cb, + 0x34fb8e, + 0x35434b, + 0x35574f, + 0x35864b, + 0x35890b, + 0x358bcb, + 0x3590ca, + 0x35c609, + 0x35f34f, + 0x36424c, + 0x36488c, + 0x364d0e, + 0x3653cf, + 0x36578e, + 0x365fd0, + 0x3663cf, + 0x366f4e, + 0x36770c, + 0x367a12, + 0x3689d1, + 0x36988e, + 0x36a04e, + 0x36a58e, + 0x36a90f, + 0x36acce, + 0x36b053, + 0x36b511, + 0x36b94e, + 0x36bdcc, + 0x36d913, + 0x36e210, + 0x36ea8c, + 0x36ed8c, + 0x36f24b, + 0x3703ce, + 0x370c8b, + 0x3715cb, + 0x37258c, + 0x37814a, + 0x37850c, + 0x37880c, + 0x378b09, + 0x37bb8b, + 0x37be48, + 0x37c049, + 0x37c04f, + 0x37d98b, + 0x7677eb8a, + 0x381fcc, + 0x383189, + 0x383608, + 0x38380b, + 0x383c8b, + 0x38480a, + 0x384a8b, + 0x38540c, + 0x386008, + 0x388d4b, + 0x38b44b, + 0x39484b, + 0x3958cb, + 0x39e6cb, + 0x39e989, + 0x39eecd, + 0x3a464a, + 0x3a5597, + 0x3a6bd8, + 0x3a96c9, + 0x3ab30b, + 0x3ac814, + 0x3acd0b, + 0x3ad28a, + 0x3aea0a, + 0x3aec8b, + 0x3b4250, + 0x3b4651, + 0x3b4d0a, + 0x3b5b4d, + 0x3b624d, + 0x3ba3cb, + 0x3bbd46, + 0x20ff83, + 0x76b80483, + 0x22cdc6, + 0x247645, + 0x27a007, + 0x31bd46, + 0x1656682, + 0x2ad9c9, + 0x31a7c4, + 0x2dacc8, + 0x232b43, + 0x309387, + 0x234f42, + 0x2ac243, + 0x76e07b02, + 0x2c7406, + 0x2c9884, + 0x369f44, + 0x390143, + 0x390145, + 0x776c3d82, + 0x77aa6cc4, + 0x270787, + 0x77e4a282, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x204e83, + 0x205702, + 0x16d208, + 0x2099c2, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x214843, + 0x324556, + 0x325793, + 0x26f749, + 0x3b0688, + 0x379509, + 0x304906, + 0x3389d0, + 0x254b53, + 0x38e508, + 0x28ea47, + 0x36c747, + 0x284d0a, + 0x372349, + 0x38d849, + 0x28decb, + 0x349846, + 0x379b4a, + 0x220d86, + 0x31a3c3, + 0x2d48c5, + 0x35e288, + 0x23888d, + 0x2b984c, + 0x2de0c7, + 0x30b00d, + 0x2142c4, + 0x22fd8a, + 0x230d4a, + 0x23120a, + 0x2099c7, + 0x23af07, + 0x23d844, + 0x22e206, + 0x20c144, + 0x2b4148, + 0x22cbc9, + 0x2b0a86, + 0x2b0a88, + 0x2422cd, + 0x2c6309, + 0x3ac008, + 0x264a07, + 0x2f1f0a, + 0x24c506, + 0x2580c7, + 0x2cc3c4, + 0x23f287, + 0x309c0a, + 0x3ae54e, + 0x21d205, + 0x3b4a4b, + 0x331a09, + 0x217649, + 0x21c747, + 0x2a34ca, + 0x20ac87, + 0x2fb1c9, + 0x38f0c8, + 0x3533cb, + 0x2da905, + 0x3a430a, + 0x266e09, + 0x26d2ca, + 0x2ca6cb, + 0x23f18b, + 0x28dc55, + 0x2e3b85, + 0x264a85, + 0x2ee0ca, + 0x3945ca, + 0x331787, + 0x21da83, + 0x2904c8, + 0x2d2c4a, + 0x223006, + 0x24dfc9, + 0x265c88, + 0x2d7984, + 0x2aa289, + 0x2bfa08, + 0x29fec7, + 0x3630c6, + 0x2ebe07, + 0x289a47, + 0x23d005, + 0x21d04c, + 0x250bc5, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2099c2, + 0x2a84c3, + 0x205503, + 0x204e83, + 0x200983, + 0x2a84c3, + 0x205503, + 0x25e983, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x16d208, + 0x2099c2, + 0x2006c2, + 0x231442, + 0x206502, + 0x200542, + 0x2decc2, + 0x46a84c3, + 0x232403, + 0x2163c3, + 0x2e9dc3, + 0x244183, + 0x209703, + 0x2d47c6, + 0x205503, + 0x200983, + 0x233183, + 0x16d208, + 0x31ae44, + 0x202107, + 0x392403, + 0x2ae584, + 0x22e043, + 0x21c7c3, + 0x2e9dc3, + 0x16fc07, + 0x205702, + 0x18d2c3, + 0x5a099c2, + 0x88f4d, + 0x8928d, + 0x231442, + 0x1b1384, + 0x200442, + 0x5fb1288, + 0xed844, + 0x16d208, + 0x1411d82, + 0x15054c6, + 0x231783, + 0x200c03, + 0x66a84c3, + 0x22fd84, + 0x6a32403, + 0x6ee9dc3, + 0x202bc2, + 0x3b1384, + 0x205503, + 0x2f78c3, + 0x203ec2, + 0x200983, + 0x21b5c2, + 0x2f2443, + 0x203082, + 0x211643, + 0x265d43, + 0x200202, + 0x16d208, + 0x231783, + 0x2f78c3, + 0x203ec2, + 0x2f2443, + 0x203082, + 0x211643, + 0x265d43, + 0x200202, + 0x2f2443, + 0x203082, + 0x211643, + 0x265d43, + 0x200202, + 0x2a84c3, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x209703, + 0x211cc4, + 0x205503, + 0x200983, + 0x20f942, + 0x201303, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x205503, + 0x200983, + 0x373605, + 0x212982, + 0x205702, + 0x16d208, + 0x1456108, + 0x2e9dc3, + 0x2274c1, + 0x202901, + 0x202941, + 0x23ad81, + 0x23ad01, + 0x30aec1, + 0x23aec1, + 0x2275c1, + 0x2eea41, + 0x30afc1, + 0x200141, + 0x200001, + 0x129845, + 0x16d208, + 0x201ec1, + 0x200701, + 0x200301, + 0x200081, + 0x200181, + 0x200401, + 0x200041, + 0x201181, + 0x200101, + 0x200281, + 0x200e81, + 0x2008c1, + 0x200441, + 0x201301, + 0x206ec1, + 0x200341, + 0x200801, + 0x2002c1, + 0x2000c1, + 0x201501, + 0x200201, + 0x200bc1, + 0x2005c1, + 0x201cc1, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x200442, + 0x200983, + 0x16fc07, + 0x9807, + 0x1cdc6, + 0x13ef8a, + 0x88648, + 0x51d48, + 0x52107, + 0x191106, + 0xd8c05, + 0x192345, + 0x5d306, + 0x125c86, + 0x25ef44, + 0x311547, + 0x16d208, + 0x2d5f04, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x2e9dc3, + 0x244183, + 0x209703, + 0x205503, + 0x200983, + 0x212982, + 0x2c5983, + 0x2bb143, + 0x32c243, + 0x2022c2, + 0x25d183, + 0x2030c3, + 0x204903, + 0x200001, + 0x2dc745, + 0x206b43, + 0x221344, + 0x26cc83, + 0x318ec3, + 0x21b103, + 0x35ff43, + 0xaaa84c3, + 0x235ac4, + 0x23dbc3, + 0x21cc43, + 0x21b0c3, + 0x22ffc3, + 0x232403, + 0x232143, + 0x2459c3, + 0x2a2703, + 0x318e43, + 0x2344c3, + 0x202643, + 0x24ce44, + 0x24e347, + 0x248902, + 0x250943, + 0x256303, + 0x273ac3, + 0x390f43, + 0x2025c3, + 0xaee9dc3, + 0x20bec3, + 0x2143c3, + 0x24a5c3, + 0x328085, + 0x209d43, + 0x2fa383, + 0xb21f903, + 0x365f03, + 0x20d543, + 0x227f83, + 0x209703, + 0x228502, + 0x27d2c3, + 0x205503, + 0x1604e83, + 0x224a43, + 0x209a43, + 0x204a03, + 0x200983, + 0x35fe83, + 0x20f943, + 0x201303, + 0x2efe83, + 0x2ff903, + 0x2f2603, + 0x204405, + 0x23e743, + 0x285346, + 0x2f2643, + 0x36cf43, + 0x3759c4, + 0x2d9083, + 0x2284c3, + 0x267ec3, + 0x233183, + 0x212982, + 0x22d543, + 0x3024c3, + 0x304144, + 0x377404, + 0x20ce83, + 0x16d208, + 0x205702, + 0x200242, + 0x2022c2, + 0x201702, + 0x202a42, + 0x206c02, + 0x245482, + 0x2007c2, + 0x20d882, + 0x200e82, + 0x20b102, + 0x20e602, + 0x2675c2, + 0x2056c2, + 0x2decc2, + 0x2013c2, + 0x2069c2, + 0x201302, + 0x2172c2, + 0x202482, + 0x200482, + 0x219382, + 0x202782, + 0x209842, + 0x2027c2, + 0x222702, + 0x203b42, + 0x5702, + 0x242, + 0x22c2, + 0x1702, + 0x2a42, + 0x6c02, + 0x45482, + 0x7c2, + 0xd882, + 0xe82, + 0xb102, + 0xe602, + 0x675c2, + 0x56c2, + 0xdecc2, + 0x13c2, + 0x69c2, + 0x1302, + 0x172c2, + 0x2482, + 0x482, + 0x19382, + 0x2782, + 0x9842, + 0x27c2, + 0x22702, + 0x3b42, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2099c2, + 0x200983, + 0xc6a84c3, + 0x2e9dc3, + 0x209703, + 0x21a2c2, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x7b02, + 0x201bc2, + 0x153f3c5, + 0x25ed82, + 0x16d208, + 0x99c2, + 0x20c182, + 0x208d02, + 0x2024c2, + 0x209642, + 0x208442, + 0x192345, + 0x2038c2, + 0x203ec2, + 0x2023c2, + 0x204dc2, + 0x2013c2, + 0x385502, + 0x201102, + 0x236582, + 0x16fc07, + 0x1b270d, + 0xd8c89, + 0x56e8b, + 0xdd608, + 0x53dc9, + 0xfacc6, + 0x2e9dc3, + 0x16d208, + 0x16d208, + 0x52e06, + 0x1a78c7, + 0x205702, + 0x25ef44, + 0x2099c2, + 0x2a84c3, + 0x2006c2, + 0x232403, + 0x20d882, + 0x2d5f04, + 0x244183, + 0x249a02, + 0x205503, + 0x200442, + 0x200983, + 0x264a86, + 0x31ba0f, + 0x70a403, + 0x16d208, + 0x2099c2, + 0x2163c3, + 0x2e9dc3, + 0x209703, + 0x1526f4b, + 0xd9888, + 0x142b68a, + 0x14fa807, + 0xda405, + 0x16fc07, + 0x2099c2, + 0x2a84c3, + 0x2e9dc3, + 0x205503, + 0x205702, + 0x20c202, + 0x20bb42, + 0xfea84c3, + 0x23c042, + 0x232403, + 0x209d02, + 0x221402, + 0x2e9dc3, + 0x228782, + 0x251442, + 0x2a6c82, + 0x200f82, + 0x28d742, + 0x203442, + 0x202e42, + 0x267e42, + 0x24ecc2, + 0x211ec2, + 0x2ad882, + 0x2eab02, + 0x2182c2, + 0x2ad342, + 0x209703, + 0x20ec42, + 0x205503, + 0x200e42, + 0x281702, + 0x200983, + 0x25d202, + 0x209842, + 0x218942, + 0x202e02, + 0x200c42, + 0x2de402, + 0x20fe82, + 0x250b82, + 0x220642, + 0x30d44a, + 0x34d94a, + 0x37fc4a, + 0x3bbec2, + 0x202cc2, + 0x2058c2, + 0x1026e389, + 0x1072510a, + 0x1594ac7, + 0x1410843, + 0x24d50, + 0x50642, + 0x2030c4, + 0x10ea84c3, + 0x232403, + 0x249944, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x209703, + 0x205503, + 0xdc105, + 0x204e83, + 0x200983, + 0x23e743, + 0x25ed03, + 0x16d208, + 0x1591084, + 0x18ff45, + 0x1a768a, + 0x116902, + 0x18ae46, + 0xaf551, + 0x1166e389, + 0x18ffc8, + 0x13f9c8, + 0xff387, + 0xec2, + 0x12984b, + 0x1a5b0a, + 0x21347, + 0x16d208, + 0x108f08, + 0xe4c7, + 0x17818f4b, + 0x1b887, + 0x1c02, + 0x6c707, + 0x1a1ca, + 0x13f6cf, + 0x988f, + 0x1b102, + 0x99c2, + 0xa2648, + 0x19e30a, + 0x1320c8, + 0xdc2, + 0x13f44f, + 0x9e18b, + 0x68bc8, + 0x38f47, + 0x388a, + 0x304cb, + 0x4efc9, + 0x11dd07, + 0xfc34c, + 0x2c07, + 0x19b40a, + 0xd4ac8, + 0x1a3cce, + 0x1cdce, + 0x2118b, + 0x26ccb, + 0x27d4b, + 0x2c009, + 0x2da0b, + 0x5e7cd, + 0x85acb, + 0xdfc8d, + 0xe000d, + 0xe164a, + 0x17724b, + 0x1ae0cb, + 0x31c45, + 0x1424d50, + 0x12618f, + 0x1268cf, + 0xe2c0d, + 0x1b8f90, + 0x2bb82, + 0x17fb0388, + 0x9688, + 0x182ee705, + 0x48fcb, + 0x117090, + 0x4fdc8, + 0x26e8a, + 0x56b49, + 0x5cb47, + 0x5ce87, + 0x5d047, + 0x5f507, + 0x60587, + 0x60b87, + 0x61387, + 0x617c7, + 0x61cc7, + 0x61fc7, + 0x62fc7, + 0x63187, + 0x63347, + 0x63507, + 0x63807, + 0x64007, + 0x64c87, + 0x65407, + 0x66547, + 0x66b07, + 0x66cc7, + 0x67047, + 0x67487, + 0x67687, + 0x67947, + 0x67b07, + 0x67cc7, + 0x67f87, + 0x68247, + 0x68f07, + 0x69607, + 0x698c7, + 0x6a047, + 0x6a207, + 0x6a607, + 0x6aec7, + 0x6b147, + 0x6b547, + 0x6b707, + 0x6b8c7, + 0x70587, + 0x71387, + 0x718c7, + 0x71e47, + 0x72007, + 0x72387, + 0x728c7, + 0xdb42, + 0xbbb0a, + 0xffb87, + 0x184cfa0b, + 0x14cfa16, + 0x17e91, + 0x1082ca, + 0xa24ca, + 0x52e06, + 0xd0f8b, + 0x5e82, + 0x2f711, + 0x157789, + 0x942c9, + 0x67e42, + 0x9f54a, + 0xa4909, + 0xa504f, + 0xa5a8e, + 0xa6388, + 0x17f42, + 0x18ef09, + 0x17f08e, + 0xf80cc, + 0xdf20f, + 0x198f4e, + 0xc84c, + 0x11809, + 0x13491, + 0x222c8, + 0x24512, + 0x281cd, + 0x2e0cd, + 0x8618b, + 0xbadd5, + 0xbb9c9, + 0xe268a, + 0x120689, + 0x160310, + 0x39a0b, + 0x4480f, + 0x5648b, + 0x58a8c, + 0x70f90, + 0x7beca, + 0x7d18d, + 0x80d4e, + 0x86cca, + 0x8720c, + 0x89714, + 0x157411, + 0x1a200b, + 0x9004f, + 0x9320d, + 0x9a00e, + 0x9fd8c, + 0xa1acc, + 0xaae8b, + 0xab18e, + 0xab990, + 0x154c0b, + 0x1160cd, + 0x10e80f, + 0x17e50c, + 0xb090e, + 0xb2391, + 0xb3ecc, + 0xc00c7, + 0xc064d, + 0xc0fcc, + 0xc1dd0, + 0x102c8d, + 0x12bc87, + 0xc7750, + 0xd3748, + 0xd51cb, + 0x12aa8f, + 0x17e248, + 0x1084cd, + 0x14d550, + 0x18ba60c6, + 0xaff43, + 0xbe02, + 0x11e309, + 0x5394a, + 0x104186, + 0x18cd9009, + 0x11d43, + 0xd6191, + 0xd65c9, + 0xd7607, + 0xaf6cb, + 0xde6d0, + 0xdeb8c, + 0xdf6c5, + 0x18f248, + 0x19f94a, + 0x111947, + 0x33c2, + 0x124a4a, + 0x127549, + 0x35b4a, + 0x8a3cf, + 0x3edcb, + 0x12814c, + 0x169b92, + 0xaea45, + 0x166aca, + 0x192ece45, + 0x18020c, + 0x122843, + 0x185502, + 0xf2bca, + 0x14f3fcc, + 0x1b1a48, + 0xdfe48, + 0x16fb87, + 0x1c42, + 0x3082, + 0x3f590, + 0x27c2, + 0x1ad58f, + 0x5d306, + 0x77ece, + 0xe598b, + 0x86ec8, + 0xd1a49, + 0x17d152, + 0x1abecd, + 0x55b08, + 0x56d49, + 0x572cd, + 0x57b89, + 0x5c58b, + 0x5d848, + 0x61ac8, + 0x628c8, + 0x62b49, + 0x62d4a, + 0x6398c, + 0xe3cca, + 0xff947, + 0x2270d, + 0xf4b4b, + 0x11a5cc, + 0x18b050, + 0xc2, + 0x7a14d, + 0x2dc2, + 0x35482, + 0xff88a, + 0x1081ca, + 0x10928b, + 0x1ae28c, + 0x108c8e, + 0x100cd, + 0x1b3908, + 0x7b02, + 0x11b5ec4e, + 0x1227020e, + 0x12a83a0a, + 0x1336864e, + 0x13b143ce, + 0x1432ee0c, + 0x1594ac7, + 0x1594ac9, + 0x1410843, + 0x14b3054c, + 0x15333209, + 0x15b49dc9, + 0x50642, + 0x18fb51, + 0x70151, + 0x8394d, + 0x17acd1, + 0x114311, + 0x12ed4f, + 0x13048f, + 0x13314c, + 0x149d0c, + 0x1a688d, + 0x1bb815, + 0x5064c, + 0x11f0cc, + 0xe9c50, + 0x11d44c, + 0x12a54c, + 0x15e999, + 0x168399, + 0x16fd99, + 0x175d54, + 0x181ad4, + 0x19b7d4, + 0x19d714, + 0x1ac314, + 0x16250709, + 0x1699ba89, + 0x1731f189, + 0x11e224c9, + 0x50642, + 0x126224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0x12e224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0x136224c9, + 0x50642, + 0x13e224c9, + 0x50642, + 0x146224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0x14e224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0x156224c9, + 0x50642, + 0x15e224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0x166224c9, + 0x50642, + 0x16e224c9, + 0x50642, + 0x176224c9, + 0x50642, + 0x15e98a, + 0x50642, + 0xaf545, + 0x1a5b04, + 0x2bb84, + 0x1aa404, + 0x1a75c4, + 0xc484, + 0x13fc4, + 0x58f44, + 0xff384, + 0x14ab3c3, + 0x143e603, + 0xfb244, + 0x1547c03, + 0x2bb82, + 0x100c3, + 0x205702, + 0x2099c2, + 0x2006c2, + 0x218342, + 0x20d882, + 0x200442, + 0x203082, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x24a5c3, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x205503, + 0x200983, + 0x3fc3, + 0x2e9dc3, + 0x205702, + 0x38d2c3, + 0x1aea84c3, + 0x3b8e47, + 0x2e9dc3, + 0x206343, + 0x211cc4, + 0x205503, + 0x200983, + 0x255cca, + 0x264a85, + 0x201303, + 0x20b0c2, + 0x16d208, + 0x16d208, + 0x99c2, + 0x11fd02, + 0x6c845, + 0x129845, + 0x16d208, + 0x1b887, + 0xa84c3, + 0x1ba38e47, + 0x13ee06, + 0x1bd49c05, + 0x11de07, + 0x66ca, + 0x3748, + 0x65c7, + 0x56948, + 0x28d87, + 0x2c6cf, + 0x30b87, + 0x3b806, + 0x117090, + 0x12330f, + 0x104204, + 0x1c11dece, + 0xa8b4c, + 0x4f14a, + 0x9a2c7, + 0x112b8a, + 0x18f409, + 0xbf34a, + 0x5414a, + 0x104186, + 0x9a38a, + 0x8350a, + 0xe47c9, + 0xd5a48, + 0xd5d46, + 0xd9a8d, + 0xb3c45, + 0x1a78c7, + 0x5d6c7, + 0xd9394, + 0xf938b, + 0x68a0a, + 0xa2d0d, + 0x1cdc3, + 0x1cdc3, + 0x1cdc6, + 0x1cdc3, + 0x18d2c3, + 0x16d208, + 0x99c2, + 0x49944, + 0x887c3, + 0x173605, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2030c3, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x294483, + 0x25ed03, + 0x2030c3, + 0x25ef44, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2082c3, + 0x2a84c3, + 0x232403, + 0x218343, + 0x2163c3, + 0x2e9dc3, + 0x3b1384, + 0x353903, + 0x227f83, + 0x209703, + 0x205503, + 0x200983, + 0x201303, + 0x311dc3, + 0x1dea84c3, + 0x232403, + 0x246383, + 0x2e9dc3, + 0x20a203, + 0x227f83, + 0x200983, + 0x2072c3, + 0x33bac4, + 0x16d208, + 0x1e6a84c3, + 0x232403, + 0x2a6443, + 0x2e9dc3, + 0x209703, + 0x211cc4, + 0x205503, + 0x200983, + 0x21db03, + 0x16d208, + 0x1eea84c3, + 0x232403, + 0x2163c3, + 0x204e83, + 0x200983, + 0x16d208, + 0x1594ac7, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x211cc4, + 0x205503, + 0x200983, + 0x129845, + 0x16fc07, + 0xd95cb, + 0xd69c4, + 0xb3c45, + 0x1456108, + 0xa6a8d, + 0x20284a05, + 0x18004, + 0x169c3, + 0x186345, + 0x349a05, + 0x16d208, + 0x1cdc2, + 0x336c3, + 0xf1446, + 0x319ec8, + 0x313bc7, + 0x25ef44, + 0x3b2c86, + 0x3bb6c6, + 0x16d208, + 0x30ce43, + 0x33e589, + 0x237295, + 0x3729f, + 0x2a84c3, + 0x31d012, + 0xefac6, + 0x10a045, + 0x26e8a, + 0x56b49, + 0x31cdcf, + 0x2d5f04, + 0x20b145, + 0x2fa150, + 0x3b0887, + 0x204e83, + 0x28b148, + 0x125bc6, + 0x2ae1ca, + 0x256044, + 0x2ec883, + 0x264a86, + 0x20b0c2, + 0x22d54b, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x2f1743, + 0x2099c2, + 0x2cd83, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x206343, + 0x221f03, + 0x200983, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x205503, + 0x200983, + 0x205702, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x9885, + 0x25ef44, + 0x2a84c3, + 0x232403, + 0x210444, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x2143c3, + 0x209703, + 0x205503, + 0x200983, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x391683, + 0x63643, + 0x6343, + 0x205503, + 0x200983, + 0x30d44a, + 0x32b0c9, + 0x346b0b, + 0x34708a, + 0x34d94a, + 0x35d74b, + 0x371e0a, + 0x37814a, + 0x37fc4a, + 0x37fecb, + 0x39f689, + 0x3a140a, + 0x3a178b, + 0x3acfcb, + 0x3b9eca, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x209703, + 0x205503, + 0x200983, + 0x4589, + 0x16d208, + 0x2a84c3, + 0x25cb44, + 0x207ac2, + 0x211cc4, + 0x26fc45, + 0x2030c3, + 0x25ef44, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x249944, + 0x2d5f04, + 0x3b1384, + 0x227f83, + 0x205503, + 0x200983, + 0x27a305, + 0x2082c3, + 0x201303, + 0x22ed03, + 0x250cc4, + 0x390fc4, + 0x34ae45, + 0x16d208, + 0x302044, + 0x3510c6, + 0x276384, + 0x2099c2, + 0x371007, + 0x24c0c7, + 0x247784, + 0x2555c5, + 0x302e85, + 0x2a9305, + 0x3b1384, + 0x3b8ac8, + 0x239486, + 0x30c188, + 0x24ed05, + 0x2da905, + 0x236b84, + 0x200983, + 0x2ed844, + 0x35c946, + 0x264b83, + 0x250cc4, + 0x256005, + 0x32d104, + 0x334944, + 0x20b0c2, + 0x2425c6, + 0x3962c6, + 0x2fdc05, + 0x205702, + 0x38d2c3, + 0x262099c2, + 0x2333c4, + 0x20d882, + 0x209703, + 0x202c82, + 0x205503, + 0x200442, + 0x214843, + 0x25ed03, + 0x16d208, + 0x16d208, + 0x2e9dc3, + 0x205702, + 0x26e099c2, + 0x2e9dc3, + 0x245b43, + 0x353903, + 0x327344, + 0x205503, + 0x200983, + 0x16d208, + 0x205702, + 0x276099c2, + 0x2a84c3, + 0x205503, + 0x200983, + 0x482, + 0x20a9c2, + 0x212982, + 0x206343, + 0x2e87c3, + 0x205702, + 0x129845, + 0x16d208, + 0x16fc07, + 0x2099c2, + 0x232403, + 0x249944, + 0x2032c3, + 0x2e9dc3, + 0x2143c3, + 0x209703, + 0x205503, + 0x216b03, + 0x200983, + 0x21da83, + 0x118fd3, + 0x11c954, + 0x16fc07, + 0x13b46, + 0x53b4b, + 0x1cdc6, + 0x51b87, + 0x11ab09, + 0xe6d4a, + 0x8850d, + 0x1b240c, + 0x1ada8a, + 0x192345, + 0x6708, + 0x5d306, + 0x125c86, + 0x22bb82, + 0xff14c, + 0x1a5cc7, + 0x22e51, + 0x2a84c3, + 0x568c5, + 0x77848, + 0x9e04, + 0x288347c6, + 0x17e86, + 0x8cb46, + 0x8da0a, + 0xac543, + 0x28e54b04, + 0x11aac5, + 0xde283, + 0xdc105, + 0xd104c, + 0xf04c8, + 0xb5708, + 0x9e009, + 0x134b08, + 0x141e046, + 0xda40a, + 0x82b48, + 0xf4648, + 0xff384, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x205702, + 0x2099c2, + 0x2e9dc3, + 0x202bc2, + 0x205503, + 0x200983, + 0x214843, + 0x3653cf, + 0x36578e, + 0x16d208, + 0x2a84c3, + 0x42f87, + 0x232403, + 0x2e9dc3, + 0x244183, + 0x205503, + 0x200983, + 0x201bc3, + 0x201bc7, + 0x200142, + 0x32c249, + 0x200242, + 0x23f88b, + 0x297b8a, + 0x2a2a49, + 0x200882, + 0x391206, + 0x34ed15, + 0x23f9d5, + 0x246993, + 0x23ff53, + 0x202a82, + 0x205ac5, + 0x3b364c, + 0x27160b, + 0x2726c5, + 0x201702, + 0x284202, + 0x386fc6, + 0x200ec2, + 0x3695c6, + 0x2d4c4d, + 0x27ef4c, + 0x224dc4, + 0x203dc2, + 0x205942, + 0x2248c8, + 0x202a42, + 0x312fc6, + 0x2ba844, + 0x34eed5, + 0x246b13, + 0x210783, + 0x32fa0a, + 0x3bb147, + 0x3094c9, + 0x37b887, + 0x30f242, + 0x200002, + 0x3aef06, + 0x20cb42, + 0x16d208, + 0x2105c2, + 0x20b382, + 0x274e87, + 0x20f687, + 0x21b585, + 0x201c02, + 0x21da47, + 0x21dc08, + 0x242b42, + 0x2bf3c2, + 0x22e802, + 0x201ec2, + 0x237b88, + 0x201ec3, + 0x2b5308, + 0x2cf1cd, + 0x213c03, + 0x327988, + 0x239f8f, + 0x23a34e, + 0x25edca, + 0x229751, + 0x229bd0, + 0x2bcdcd, + 0x2bd10c, + 0x311c47, + 0x32fb87, + 0x3b2d49, + 0x224ec2, + 0x206c02, + 0x25340c, + 0x25370b, + 0x204142, + 0x2ab046, + 0x21a1c2, + 0x209882, + 0x21b102, + 0x2099c2, + 0x383a84, + 0x238bc7, + 0x204682, + 0x23d147, + 0x23e487, + 0x20e142, + 0x2301c2, + 0x242e45, + 0x205742, + 0x362e0e, + 0x2ebb8d, + 0x232403, + 0x2be90e, + 0x2e064d, + 0x37eac3, + 0x200e02, + 0x21fec4, + 0x2454c2, + 0x2175c2, + 0x358e45, + 0x364b47, + 0x383382, + 0x218342, + 0x249547, + 0x24d288, + 0x248902, + 0x2aeac6, + 0x25328c, + 0x2535cb, + 0x20fc82, + 0x25924f, + 0x259610, + 0x259a0f, + 0x259dd5, + 0x25a314, + 0x25a80e, + 0x25ab8e, + 0x25af0f, + 0x25b2ce, + 0x25b654, + 0x25bb53, + 0x25c00d, + 0x272a89, + 0x2895c3, + 0x200782, + 0x22b0c5, + 0x207f86, + 0x20d882, + 0x21f507, + 0x2e9dc3, + 0x205e82, + 0x362a08, + 0x229991, + 0x229dd0, + 0x206482, + 0x288d87, + 0x203942, + 0x214607, + 0x20be02, + 0x319cc9, + 0x386f87, + 0x27aac8, + 0x234606, + 0x2e86c3, + 0x32a105, + 0x232682, + 0x202082, + 0x3af305, + 0x380685, + 0x2040c2, + 0x24c543, + 0x32d187, + 0x223787, + 0x200502, + 0x254684, + 0x223b83, + 0x223b89, + 0x22c548, + 0x200282, + 0x204bc2, + 0x3105c7, + 0x31ff05, + 0x2a5348, + 0x219947, + 0x200e83, + 0x28c446, + 0x2bcc4d, + 0x2bcfcc, + 0x2b45c6, + 0x208d02, + 0x2a8542, + 0x202342, + 0x239e0f, + 0x23a20e, + 0x302f07, + 0x203d02, + 0x2bf745, + 0x2bf746, + 0x20f242, + 0x20ec42, + 0x221f06, + 0x214543, + 0x214546, + 0x2c6985, + 0x2c698d, + 0x2c6f55, + 0x2c814c, + 0x2c95cd, + 0x2c9992, + 0x20e602, + 0x2675c2, + 0x202d02, + 0x240806, + 0x2f7f86, + 0x2033c2, + 0x208006, + 0x2023c2, + 0x38b785, + 0x200542, + 0x2ebc89, + 0x31554c, + 0x31588b, + 0x200442, + 0x24e748, + 0x203b02, + 0x2056c2, + 0x26a346, + 0x222445, + 0x226747, + 0x257d85, + 0x29e405, + 0x243002, + 0x2067c2, + 0x2013c2, + 0x2df507, + 0x380c0d, + 0x380f8c, + 0x22f087, + 0x20f982, + 0x2069c2, + 0x241248, + 0x31e488, + 0x2e3988, + 0x308484, + 0x2ab407, + 0x2e90c3, + 0x228ec2, + 0x2082c2, + 0x2eb3c9, + 0x3a40c7, + 0x201302, + 0x26a745, + 0x22d4c2, + 0x21aa02, + 0x2f9f03, + 0x2f9f06, + 0x2f1742, + 0x2f23c2, + 0x201a42, + 0x202f86, + 0x21fe07, + 0x213bc2, + 0x205ec2, + 0x2b514f, + 0x2be74d, + 0x3872ce, + 0x2e04cc, + 0x2009c2, + 0x207302, + 0x234445, + 0x30ba46, + 0x2018c2, + 0x202482, + 0x200482, + 0x2198c4, + 0x2cf044, + 0x2d0e86, + 0x203082, + 0x36cac7, + 0x203083, + 0x285d48, + 0x34e488, + 0x239887, + 0x240706, + 0x203902, + 0x234b03, + 0x234b07, + 0x273946, + 0x2dee45, + 0x308808, + 0x200d02, + 0x331207, + 0x222702, + 0x361782, + 0x20cfc2, + 0x2c6749, + 0x230982, + 0x200842, + 0x22f303, + 0x331c87, + 0x2002c2, + 0x3156cc, + 0x3159cb, + 0x2b4646, + 0x2de1c5, + 0x221c82, + 0x203b42, + 0x2b7bc6, + 0x260dc3, + 0x38c187, + 0x236102, + 0x201442, + 0x34eb95, + 0x23fb95, + 0x246853, + 0x2400d3, + 0x2585c7, + 0x271a48, + 0x271a50, + 0x28d2cf, + 0x297953, + 0x2a2812, + 0x32be10, + 0x2d544f, + 0x35f7d2, + 0x30c3d1, + 0x2b7613, + 0x2c6512, + 0x2cff4f, + 0x2d2e8e, + 0x2d3f52, + 0x2d71d1, + 0x2d7c8f, + 0x30440e, + 0x2f0691, + 0x2f17d0, + 0x2f2752, + 0x2fc711, + 0x364586, + 0x36d3c7, + 0x372187, + 0x203142, + 0x27d8c5, + 0x3933c7, + 0x212982, + 0x209942, + 0x228a85, + 0x21e743, + 0x34b0c6, + 0x380dcd, + 0x38110c, + 0x201682, + 0x3b34cb, + 0x2714ca, + 0x20598a, + 0x2b6449, + 0x2ea64b, + 0x219a8d, + 0x2fa5cc, + 0x25180a, + 0x22090c, + 0x26908b, + 0x27250c, + 0x29474b, + 0x3154c3, + 0x36cfc6, + 0x3a98c2, + 0x2f4542, + 0x20a743, + 0x208602, + 0x21fe83, + 0x2366c6, + 0x259f87, + 0x2c7fc6, + 0x39e4c8, + 0x31e188, + 0x2ce146, + 0x201f82, + 0x2fd5cd, + 0x2fd90c, + 0x2d5fc7, + 0x301f07, + 0x213b82, + 0x201502, + 0x234a82, + 0x24d642, + 0x2099c2, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x211cc4, + 0x205503, + 0x200983, + 0x214843, + 0x205702, + 0x2021c2, + 0x2ae8fdc5, + 0x2b247e45, + 0x2b717806, + 0x16d208, + 0x2baaee05, + 0x2099c2, + 0x2006c2, + 0x2bfb3ac5, + 0x2c27bdc5, + 0x2c67c9c7, + 0x2ca86a09, + 0x2ce3bc44, + 0x20d882, + 0x205e82, + 0x2d24b5c5, + 0x2d68f849, + 0x2db1db88, + 0x2deab805, + 0x2e300187, + 0x2e61ed48, + 0x2eae5d85, + 0x2ee00106, + 0x2f337809, + 0x2f6b5a48, + 0x2fac0488, + 0x2fe9704a, + 0x302732c4, + 0x306d13c5, + 0x30abc9c8, + 0x30e03a85, + 0x20cec2, + 0x31248a43, + 0x316a1686, + 0x31b60148, + 0x31eb94c6, + 0x32281f08, + 0x32719606, + 0x32adef04, + 0x200c82, + 0x32f2cb87, + 0x332a75c4, + 0x336756c7, + 0x33ba2987, + 0x200442, + 0x33e9b0c5, + 0x34334f84, + 0x346cd907, + 0x34a5f187, + 0x34e80886, + 0x3527c585, + 0x356959c7, + 0x35ad0b48, + 0x35e2b447, + 0x363164c9, + 0x36793105, + 0x36b31dc7, + 0x36e8f546, + 0x37391408, + 0x2273cd, + 0x279909, + 0x28174b, + 0x2a4b0b, + 0x34058b, + 0x2ffe8b, + 0x30bc4b, + 0x30bf0b, + 0x30c809, + 0x30d6cb, + 0x30d98b, + 0x30e48b, + 0x30f5ca, + 0x30fb0a, + 0x31010c, + 0x314d8b, + 0x31670a, + 0x32904a, + 0x33404e, + 0x33568e, + 0x335a0a, + 0x33808a, + 0x338dcb, + 0x33908b, + 0x339e8b, + 0x354ecb, + 0x3554ca, + 0x35618b, + 0x35644a, + 0x3566ca, + 0x35694a, + 0x372b0b, + 0x37914b, + 0x37c74e, + 0x37cacb, + 0x38454b, + 0x385acb, + 0x38900a, + 0x389289, + 0x3894ca, + 0x38a94a, + 0x3a00cb, + 0x3a1a4b, + 0x3a22ca, + 0x3a48cb, + 0x3a8c4b, + 0x3b990b, + 0x3767e648, + 0x37a87c89, + 0x37e9de89, + 0x382dacc8, + 0x342505, + 0x217083, + 0x21c6c4, + 0x220005, + 0x23b986, + 0x25da05, + 0x2864c4, + 0x21f408, + 0x308005, + 0x291784, + 0x203447, + 0x29cf8a, + 0x3712ca, + 0x338547, + 0x3af9c7, + 0x2f8f07, + 0x264e87, + 0x2f60c5, + 0x33bb86, + 0x2bb847, + 0x2b4904, + 0x2e4646, + 0x2e4546, + 0x3b9585, + 0x26d1c4, + 0x3519c6, + 0x29bf47, + 0x285746, + 0x2e3247, + 0x25e443, + 0x2b1c06, + 0x2328c5, + 0x27cac7, + 0x2641ca, + 0x260e44, + 0x217c08, + 0x2abd89, + 0x2cd247, + 0x336286, + 0x24e9c8, + 0x2b9c09, + 0x309684, + 0x366944, + 0x244245, + 0x2bb548, + 0x2c4b07, + 0x2a9709, + 0x364688, + 0x345e86, + 0x3204c6, + 0x298048, + 0x359646, + 0x247e45, + 0x280946, + 0x275ec8, + 0x24da46, + 0x2525cb, + 0x298646, + 0x29994d, + 0x3a6005, + 0x2a7486, + 0x208b45, + 0x2f9bc9, + 0x2f9a87, + 0x37a208, + 0x266986, + 0x298bc9, + 0x3793c6, + 0x264145, + 0x268686, + 0x2cae46, + 0x2cb3c9, + 0x3530c6, + 0x339487, + 0x26ad85, + 0x202ac3, + 0x252745, + 0x299c07, + 0x33c6c6, + 0x3a5f09, + 0x317806, + 0x280b86, + 0x210c49, + 0x280349, + 0x29fc07, + 0x282f88, + 0x28c989, + 0x27d548, + 0x378386, + 0x2d5805, + 0x2418ca, + 0x280c06, + 0x3b7986, + 0x2c8985, + 0x265808, + 0x223307, + 0x22f50a, + 0x249e46, + 0x279d45, + 0x37aa46, + 0x21ac47, + 0x336147, + 0x21bbc5, + 0x264305, + 0x357dc6, + 0x2ac5c6, + 0x34dc06, + 0x2b3204, + 0x27f689, + 0x288b46, + 0x2dd38a, + 0x21b388, + 0x3078c8, + 0x3712ca, + 0x20b445, + 0x29be85, + 0x350b88, + 0x2b2c88, + 0x27b5c7, + 0x258946, + 0x322388, + 0x2fdec7, + 0x27dc48, + 0x2b3846, + 0x281408, + 0x294f06, + 0x24ee87, + 0x299ec6, + 0x3519c6, + 0x3778ca, + 0x2bd8c6, + 0x2d5809, + 0x26dbc6, + 0x2af14a, + 0x2def09, + 0x2fb486, + 0x2b4b04, + 0x22b18d, + 0x287f07, + 0x326cc6, + 0x2c0345, + 0x379445, + 0x374246, + 0x2cd749, + 0x2b1647, + 0x277306, + 0x2cc246, + 0x286549, + 0x247d84, + 0x3482c4, + 0x352cc8, + 0x236a86, + 0x26a808, + 0x2e41c8, + 0x312747, + 0x3b7549, + 0x34de07, + 0x2aecca, + 0x2e1f8f, + 0x23188a, + 0x234245, + 0x276105, + 0x216e85, + 0x2ba787, + 0x21a803, + 0x283188, + 0x396786, + 0x396889, + 0x2b87c6, + 0x3b5207, + 0x298989, + 0x37a108, + 0x2c8a47, + 0x30a343, + 0x342585, + 0x21a785, + 0x2b304b, + 0x203b44, + 0x2c2084, + 0x274646, + 0x30abc7, + 0x382bca, + 0x248ac7, + 0x311e87, + 0x27bdc5, + 0x200645, + 0x2eef89, + 0x3519c6, + 0x24894d, + 0x353305, + 0x2b1383, + 0x205043, + 0x26f685, + 0x345c45, + 0x24e9c8, + 0x2790c7, + 0x348046, + 0x29db06, + 0x229105, + 0x2326c7, + 0x312247, + 0x239347, + 0x2d144a, + 0x2b1cc8, + 0x2b3204, + 0x24d7c7, + 0x27acc7, + 0x339306, + 0x262107, + 0x2dc4c8, + 0x2e6f08, + 0x268506, + 0x303008, + 0x2c87c4, + 0x2bb846, + 0x2353c6, + 0x33bfc6, + 0x2ba986, + 0x286004, + 0x264f46, + 0x2bf5c6, + 0x297546, + 0x247846, + 0x204f06, + 0x26e2c6, + 0x347f48, + 0x2b0748, + 0x2d1c88, + 0x25dc08, + 0x350b06, + 0x20dcc5, + 0x315ec6, + 0x2ab885, + 0x388447, + 0x215305, + 0x2125c3, + 0x211585, + 0x344cc4, + 0x205045, + 0x203b03, + 0x33a447, + 0x354648, + 0x2e3306, + 0x2c218d, + 0x2760c6, + 0x296ac5, + 0x2b7843, + 0x2bc389, + 0x247f06, + 0x28e7c6, + 0x29f4c4, + 0x231807, + 0x233606, + 0x2b1905, + 0x203cc3, + 0x3abd84, + 0x27ae86, + 0x2354c4, + 0x2da048, + 0x38ba89, + 0x215589, + 0x29f2ca, + 0x2a070d, + 0x313447, + 0x2b9186, + 0x206804, + 0x286a09, + 0x284688, + 0x287b06, + 0x33f286, + 0x262107, + 0x2b6b46, + 0x226346, + 0x26d606, + 0x3a2a0a, + 0x21ed48, + 0x2bacc5, + 0x262549, + 0x27e14a, + 0x2f5d08, + 0x29b908, + 0x295f08, + 0x2a7acc, + 0x30e705, + 0x29dd88, + 0x2e6586, + 0x37a386, + 0x3b50c7, + 0x2489c5, + 0x280ac5, + 0x215449, + 0x20e247, + 0x396845, + 0x227887, + 0x205043, + 0x2c5045, + 0x20ef48, + 0x252ac7, + 0x29b7c9, + 0x2d7985, + 0x2fa984, + 0x2a03c8, + 0x32ccc7, + 0x2c8c08, + 0x38d688, + 0x354b05, + 0x3a3946, + 0x278cc6, + 0x244609, + 0x2b01c7, + 0x2ac006, + 0x313787, + 0x210103, + 0x23bc44, + 0x2a1785, + 0x232804, + 0x3833c4, + 0x27fdc7, + 0x26c147, + 0x22e704, + 0x29b610, + 0x3b3c47, + 0x200645, + 0x24c20c, + 0x20a8c4, + 0x2c1488, + 0x24ed89, + 0x35acc6, + 0x334c48, + 0x215244, + 0x36c4c8, + 0x22fb06, + 0x2accc8, + 0x29c506, + 0x2bec0b, + 0x202ac5, + 0x2c8748, + 0x215ac4, + 0x38beca, + 0x29b7c9, + 0x245f06, + 0x216f48, + 0x256385, + 0x2b0f44, + 0x2c1386, + 0x239208, + 0x27e648, + 0x322c06, + 0x3a9ec4, + 0x241846, + 0x34de87, + 0x2755c7, + 0x26210f, + 0x207347, + 0x2fb547, + 0x3709c5, + 0x353e05, + 0x29f8c9, + 0x2dd046, + 0x27cc05, + 0x280647, + 0x2e0bc8, + 0x297645, + 0x299ec6, + 0x21b1c8, + 0x2b94ca, + 0x2db4c8, + 0x28ac87, + 0x2e23c6, + 0x262506, + 0x21a5c3, + 0x216a43, + 0x27e309, + 0x28c809, + 0x2c1286, + 0x2d7985, + 0x33bd48, + 0x216f48, + 0x3597c8, + 0x26d68b, + 0x2c23c7, + 0x30a589, + 0x262388, + 0x343084, + 0x3514c8, + 0x28cd89, + 0x2ac305, + 0x2ba687, + 0x23bcc5, + 0x27e548, + 0x28fc4b, + 0x295710, + 0x2a6dc5, + 0x215a0c, + 0x348205, + 0x27be43, + 0x2a8f86, + 0x2be6c4, + 0x335086, + 0x29bf47, + 0x21b244, + 0x240b88, + 0x28304d, + 0x302945, + 0x29b104, + 0x2243c4, + 0x276949, + 0x2a11c8, + 0x317687, + 0x22fb88, + 0x27f748, + 0x277605, + 0x209287, + 0x277587, + 0x33e347, + 0x264309, + 0x233489, + 0x214c46, + 0x2bd306, + 0x262346, + 0x37f785, + 0x3a7184, + 0x200006, + 0x200386, + 0x277648, + 0x21a90b, + 0x260d07, + 0x206804, + 0x353646, + 0x2fe447, + 0x26dec5, + 0x391d05, + 0x219644, + 0x233406, + 0x200088, + 0x286a09, + 0x2510c6, + 0x284048, + 0x2b19c6, + 0x345248, + 0x306dcc, + 0x2774c6, + 0x29678d, + 0x296c0b, + 0x339545, + 0x312387, + 0x3531c6, + 0x336008, + 0x214cc9, + 0x2d0588, + 0x200645, + 0x277987, + 0x27d648, + 0x349649, + 0x28e946, + 0x250fca, + 0x335d88, + 0x2d03cb, + 0x39818c, + 0x36c5c8, + 0x27a7c6, + 0x208c88, + 0x3b77c7, + 0x32cf49, + 0x28f74d, + 0x299dc6, + 0x27b808, + 0x2b0609, + 0x2bda48, + 0x281508, + 0x2bfe0c, + 0x2c0b47, + 0x2c1887, + 0x264145, + 0x2ad587, + 0x2e0a88, + 0x2c1406, + 0x2556cc, + 0x2ef888, + 0x2ccb88, + 0x25dec6, + 0x21a507, + 0x214e44, + 0x25dc08, + 0x22200c, + 0x2ce24c, + 0x2342c5, + 0x2d0d47, + 0x3a9e46, + 0x21a486, + 0x2f9d88, + 0x3af904, + 0x28574b, + 0x36cc0b, + 0x2e23c6, + 0x282ec7, + 0x37a805, + 0x269a05, + 0x285886, + 0x256345, + 0x203b05, + 0x2cc9c7, + 0x274c49, + 0x2ac784, + 0x2fbb45, + 0x2e4bc5, + 0x2d9dc8, + 0x329d05, + 0x2b72c9, + 0x2ae5c7, + 0x2ae5cb, + 0x381306, + 0x347c89, + 0x26d108, + 0x276545, + 0x33e448, + 0x2334c8, + 0x245747, + 0x3776c7, + 0x27fe49, + 0x2acc07, + 0x28a989, + 0x2aa70c, + 0x3163c8, + 0x2b2ac9, + 0x2b3d47, + 0x27f809, + 0x26c287, + 0x398288, + 0x3b7705, + 0x2bb7c6, + 0x2c0388, + 0x308a88, + 0x27e009, + 0x203b47, + 0x269ac5, + 0x222b09, + 0x2bd6c6, + 0x28f544, + 0x30e1c6, + 0x35ffc8, + 0x232ac7, + 0x21ab08, + 0x3030c9, + 0x3a3707, + 0x29d146, + 0x312444, + 0x211609, + 0x209108, + 0x25dd87, + 0x27eb46, + 0x21a846, + 0x3b7904, + 0x2241c6, + 0x204fc3, + 0x3b1649, + 0x202a86, + 0x303345, + 0x29db06, + 0x26cac5, + 0x27dac8, + 0x36c307, + 0x381646, + 0x3b3b06, + 0x3078c8, + 0x29fa47, + 0x299e05, + 0x29b408, + 0x3a1e48, + 0x335d88, + 0x3480c5, + 0x2bb846, + 0x215349, + 0x244484, + 0x26c94b, + 0x22604b, + 0x2babc9, + 0x205043, + 0x254485, + 0x2214c6, + 0x385208, + 0x2e1f04, + 0x2e3306, + 0x2d1589, + 0x2ca445, + 0x2cc906, + 0x32ccc6, + 0x216f44, + 0x2a764a, + 0x303288, + 0x308a86, + 0x3b8645, + 0x37a687, + 0x2e0fc7, + 0x3a3944, + 0x226287, + 0x2aecc4, + 0x33bf46, + 0x2096c3, + 0x264305, + 0x32ad45, + 0x207588, + 0x24d985, + 0x277209, + 0x25da47, + 0x25da4b, + 0x2a148c, + 0x2a224a, + 0x300187, + 0x203503, + 0x3afc08, + 0x348285, + 0x2976c5, + 0x205104, + 0x398186, + 0x24ed86, + 0x224207, + 0x33448b, + 0x286004, + 0x2e6684, + 0x21f044, + 0x2cafc6, + 0x21b244, + 0x2bb648, + 0x342445, + 0x21ba45, + 0x359707, + 0x312489, + 0x345c45, + 0x37424a, + 0x26ac89, + 0x2996ca, + 0x3a2b49, + 0x33fec4, + 0x2cc305, + 0x2b6c48, + 0x2cd9cb, + 0x244245, + 0x2f2fc6, + 0x213e84, + 0x277746, + 0x3a3589, + 0x353707, + 0x3179c8, + 0x2a0a86, + 0x34de07, + 0x27e648, + 0x3747c6, + 0x375604, + 0x365ac7, + 0x357305, + 0x367287, + 0x200104, + 0x353146, + 0x2f4308, + 0x296dc8, + 0x2e6047, + 0x274fc8, + 0x294fc5, + 0x204e84, + 0x3711c8, + 0x2750c4, + 0x216e05, + 0x2f5fc4, + 0x2fdfc7, + 0x288c07, + 0x27f948, + 0x2c8d86, + 0x24d905, + 0x277008, + 0x2db6c8, + 0x29f209, + 0x226346, + 0x22f588, + 0x38bd4a, + 0x26df48, + 0x2e5d85, + 0x20b306, + 0x26ab48, + 0x277a4a, + 0x210f87, + 0x284c45, + 0x292708, + 0x2ade04, + 0x265886, + 0x2c1c08, + 0x204f06, + 0x38e7c8, + 0x28f187, + 0x203346, + 0x2b4b04, + 0x284fc7, + 0x2b0d84, + 0x3a3547, + 0x28e60d, + 0x27b645, + 0x2cd54b, + 0x29c606, + 0x24e848, + 0x240b44, + 0x350d06, + 0x27ae86, + 0x208fc7, + 0x29644d, + 0x243cc7, + 0x2b12c8, + 0x269b85, + 0x278648, + 0x2c4a86, + 0x295048, + 0x228086, + 0x33d987, + 0x300449, + 0x343ac7, + 0x287dc8, + 0x2706c5, + 0x21b608, + 0x21a3c5, + 0x3a4245, + 0x3a2dc5, + 0x234543, + 0x2809c4, + 0x262545, + 0x337809, + 0x27ea46, + 0x2dc5c8, + 0x377485, + 0x2b2e87, + 0x2a78ca, + 0x2cc849, + 0x2cad4a, + 0x2d1d08, + 0x2276cc, + 0x2806cd, + 0x2fc003, + 0x38e6c8, + 0x3abd45, + 0x2b9286, + 0x379f86, + 0x2e58c5, + 0x313889, + 0x33cc45, + 0x277008, + 0x2552c6, + 0x347806, + 0x2a0289, + 0x393947, + 0x28ff06, + 0x2a7848, + 0x33bec8, + 0x2daec7, + 0x2ace4e, + 0x2c4cc5, + 0x349545, + 0x204e08, + 0x21fcc7, + 0x21a882, + 0x2bf984, + 0x334f8a, + 0x25de48, + 0x2fe546, + 0x298ac8, + 0x278cc6, + 0x332608, + 0x2ac008, + 0x3a4204, + 0x2b33c5, + 0x676384, + 0x676384, + 0x676384, + 0x202b43, + 0x21a6c6, + 0x2774c6, + 0x29cb0c, + 0x203383, + 0x27e146, + 0x2151c4, + 0x247e88, + 0x2d13c5, + 0x335086, + 0x2bcac8, + 0x2d2bc6, + 0x3815c6, + 0x245d08, + 0x2a1807, + 0x2ac9c9, + 0x2f214a, + 0x22b484, + 0x215305, + 0x2a96c5, + 0x247c06, + 0x313486, + 0x29d546, + 0x2f5546, + 0x2acb04, + 0x2acb0b, + 0x231804, + 0x29ccc5, + 0x2aad85, + 0x312806, + 0x3a6308, + 0x280587, + 0x317784, + 0x236203, + 0x2ad905, + 0x306047, + 0x28048b, + 0x207487, + 0x2bc9c8, + 0x2e62c7, + 0x370b06, + 0x279bc8, + 0x2a820b, + 0x21ff46, + 0x212309, + 0x2a8385, + 0x30a343, + 0x2cc906, + 0x28f088, + 0x213403, + 0x24f403, + 0x27e646, + 0x278cc6, + 0x35d10a, + 0x27a805, + 0x27accb, + 0x29da4b, + 0x23ef83, + 0x202843, + 0x2aec44, + 0x278a87, + 0x28f104, + 0x244504, + 0x2e6404, + 0x26e248, + 0x3b8588, + 0x3baf89, + 0x393188, + 0x2b9dc7, + 0x247846, + 0x2dc20f, + 0x2c4e06, + 0x2d1344, + 0x3b83ca, + 0x305f47, + 0x3b9606, + 0x28f589, + 0x3baf05, + 0x2076c5, + 0x3bb046, + 0x21b743, + 0x2ade49, + 0x21eec6, + 0x3afa89, + 0x382bc6, + 0x264305, + 0x2346c5, + 0x207343, + 0x278bc8, + 0x20d787, + 0x396784, + 0x247d08, + 0x2e1244, + 0x2f1006, + 0x2a8f86, + 0x23c346, + 0x2c8609, + 0x297645, + 0x3519c6, + 0x2582c9, + 0x2c41c6, + 0x26e2c6, + 0x387886, + 0x2160c5, + 0x2f5fc6, + 0x33d984, + 0x3b7705, + 0x2c0384, + 0x2b2246, + 0x3532c4, + 0x203c43, + 0x284745, + 0x2331c8, + 0x25e607, + 0x2b8209, + 0x284b48, + 0x297e11, + 0x32cd4a, + 0x2e2307, + 0x2e7246, + 0x2151c4, + 0x2c0488, + 0x22e448, + 0x297fca, + 0x2b708d, + 0x268686, + 0x245e06, + 0x285086, + 0x21ba47, + 0x2b1385, + 0x3912c7, + 0x247dc5, + 0x2ae704, + 0x2a6206, + 0x224047, + 0x2adb4d, + 0x26aa87, + 0x21f308, + 0x277309, + 0x20b206, + 0x28e8c5, + 0x22cb04, + 0x3600c6, + 0x3a3846, + 0x25dfc6, + 0x299348, + 0x215f83, + 0x208fc3, + 0x352105, + 0x277dc6, + 0x2abfc5, + 0x2a0c88, + 0x29c10a, + 0x282084, + 0x247e88, + 0x295f08, + 0x312647, + 0x377549, + 0x2bc6c8, + 0x286a87, + 0x2587c6, + 0x204f0a, + 0x360148, + 0x2f98c9, + 0x2a1288, + 0x221609, + 0x2e7107, + 0x2f2f05, + 0x26d886, + 0x2c1288, + 0x27e7c8, + 0x296088, + 0x2e24c8, + 0x29ccc5, + 0x208a84, + 0x20d488, + 0x23e2c4, + 0x3a2944, + 0x264305, + 0x2917c7, + 0x312249, + 0x208dc7, + 0x210cc5, + 0x274846, + 0x34f606, + 0x212444, + 0x2a05c6, + 0x24d744, + 0x278546, + 0x312006, + 0x213246, + 0x200645, + 0x2a0b47, + 0x203503, + 0x2079c9, + 0x3076c8, + 0x247d04, + 0x28690d, + 0x296ec8, + 0x2e3788, + 0x2f9846, + 0x300549, + 0x2cc849, + 0x3a3285, + 0x29c20a, + 0x27cf4a, + 0x29d74c, + 0x29d8c6, + 0x275446, + 0x2c4f86, + 0x2b4749, + 0x2b94c6, + 0x29fa86, + 0x33cd06, + 0x25dc08, + 0x274fc6, + 0x2ce80b, + 0x291945, + 0x21ba45, + 0x2756c5, + 0x352a46, + 0x204ec3, + 0x23c2c6, + 0x26aa07, + 0x2c0345, + 0x320585, + 0x379445, + 0x318446, + 0x31da84, + 0x31da86, + 0x292f49, + 0x3528cc, + 0x2ae448, + 0x239184, + 0x2f5c06, + 0x29c706, + 0x28f088, + 0x216f48, + 0x3527c9, + 0x37a687, + 0x2367c9, + 0x24cfc6, + 0x22e904, + 0x20ea44, + 0x280144, + 0x27e648, + 0x31208a, + 0x345bc6, + 0x353cc7, + 0x362c47, + 0x347d85, + 0x2a9684, + 0x28cd46, + 0x2b13c6, + 0x2336c3, + 0x307507, + 0x38d588, + 0x3a33ca, + 0x2cbb88, + 0x281f08, + 0x353305, + 0x339645, + 0x260e05, + 0x348146, + 0x3ad906, + 0x26c085, + 0x3b1889, + 0x2a948c, + 0x260ec7, + 0x298048, + 0x2e5c05, + 0x676384, + 0x320944, + 0x252c04, + 0x22df86, + 0x29eb0e, + 0x207747, + 0x21bc45, + 0x24440c, + 0x2e1107, + 0x223fc7, + 0x225109, + 0x217cc9, + 0x284c45, + 0x3076c8, + 0x215349, + 0x335c45, + 0x2c0288, + 0x2c2586, + 0x371446, + 0x2def04, + 0x2553c8, + 0x20b3c3, + 0x2af8c4, + 0x2ad985, + 0x3bab07, + 0x21c245, + 0x38bc09, + 0x28b30d, + 0x2a33c6, + 0x225fc4, + 0x2588c8, + 0x274a8a, + 0x2611c7, + 0x235d45, + 0x23b403, + 0x29dc0e, + 0x278ccc, + 0x2f5e07, + 0x29ecc7, + 0x200143, + 0x2b9505, + 0x252c05, + 0x298e88, + 0x295d49, + 0x239086, + 0x28f104, + 0x2e2246, + 0x27b5cb, + 0x2cc5cc, + 0x366d87, + 0x2d0305, + 0x3a1d48, + 0x2dac85, + 0x3b83c7, + 0x32cb87, + 0x247585, + 0x204ec3, + 0x26e584, + 0x21c685, + 0x2ac685, + 0x2ac686, + 0x292008, + 0x224047, + 0x37a286, + 0x26c486, + 0x3a2d06, + 0x268789, + 0x209387, + 0x25e286, + 0x2cc746, + 0x2731c6, + 0x2a7585, + 0x3b2b46, + 0x380145, + 0x329d88, + 0x29114b, + 0x28c346, + 0x362c84, + 0x2b4389, + 0x25da44, + 0x2c2508, + 0x30e2c7, + 0x281404, + 0x2bbd88, + 0x2c1684, + 0x2a75c4, + 0x286845, + 0x302986, + 0x26e187, + 0x203043, + 0x29d205, + 0x323284, + 0x349586, + 0x3a3308, + 0x38d2c5, + 0x290e09, + 0x222d05, + 0x2dbf88, + 0x215087, + 0x388588, + 0x2b8047, + 0x2fb609, + 0x264dc6, + 0x32bb46, + 0x28cac4, + 0x258705, + 0x2fce4c, + 0x2756c7, + 0x275fc7, + 0x362b08, + 0x2a33c6, + 0x26a944, + 0x328004, + 0x27fcc9, + 0x2c5086, + 0x298a07, + 0x208c04, + 0x23da46, + 0x33b785, + 0x2c88c7, + 0x2ce786, + 0x250e89, + 0x27cd87, + 0x262107, + 0x2a0106, + 0x23d985, + 0x27c548, + 0x21ed48, + 0x247a46, + 0x38d305, + 0x390586, + 0x2034c3, + 0x298d09, + 0x29d2ce, + 0x2b7d48, + 0x2e1348, + 0x24784b, + 0x291046, + 0x313104, + 0x2802c4, + 0x29d3ca, + 0x215907, + 0x25e345, + 0x212309, + 0x2bf685, + 0x3a2987, + 0x245c84, + 0x287087, + 0x2e40c8, + 0x2cd306, + 0x27b989, + 0x2bc7ca, + 0x215886, + 0x296a06, + 0x2aad05, + 0x37d085, + 0x282d07, + 0x244e48, + 0x33b6c8, + 0x3a4206, + 0x234745, + 0x31320e, + 0x2b3204, + 0x2479c5, + 0x2741c9, + 0x2dce48, + 0x28abc6, + 0x29af0c, + 0x29bd10, + 0x29e74f, + 0x29f7c8, + 0x300187, + 0x200645, + 0x262545, + 0x26e009, + 0x292909, + 0x241946, + 0x2442c7, + 0x2d0cc5, + 0x337b09, + 0x339386, + 0x2b930d, + 0x280009, + 0x244504, + 0x2b7ac8, + 0x20d549, + 0x345d86, + 0x274945, + 0x32bb46, + 0x317889, + 0x2f3c48, + 0x20dcc5, + 0x2553c4, + 0x29b0cb, + 0x345c45, + 0x29b206, + 0x280a06, + 0x265e46, + 0x276d4b, + 0x290f09, + 0x26c3c5, + 0x388347, + 0x32ccc6, + 0x334dc6, + 0x252988, + 0x302a89, + 0x21f0cc, + 0x305e48, + 0x309e46, + 0x322c03, + 0x2ba886, + 0x276b85, + 0x27b008, + 0x234146, + 0x2c8b08, + 0x248b45, + 0x279305, + 0x32eb08, + 0x332787, + 0x379ec7, + 0x224207, + 0x334c48, + 0x3002c8, + 0x2ad486, + 0x2b2087, + 0x23bb07, + 0x276a4a, + 0x201e03, + 0x352a46, + 0x2392c5, + 0x334f84, + 0x277309, + 0x2fb584, + 0x25e684, + 0x29c584, + 0x29eccb, + 0x20d6c7, + 0x313445, + 0x294cc8, + 0x274846, + 0x274848, + 0x27a746, + 0x28b085, + 0x28b645, + 0x28d886, + 0x28ee48, + 0x28f4c8, + 0x2774c6, + 0x294b0f, + 0x2987d0, + 0x3a6005, + 0x203503, + 0x22e9c5, + 0x30a4c8, + 0x292809, + 0x335d88, + 0x268608, + 0x2b8d48, + 0x20d787, + 0x274509, + 0x2c8d08, + 0x265304, + 0x29c408, + 0x2d9e89, + 0x2b27c7, + 0x299d44, + 0x208e88, + 0x2a090a, + 0x2e77c6, + 0x268686, + 0x226209, + 0x29bf47, + 0x2cba08, + 0x204848, + 0x2ddd88, + 0x35cc45, + 0x37e005, + 0x21ba45, + 0x252bc5, + 0x3b5987, + 0x204ec5, + 0x2c0345, + 0x313686, + 0x335cc7, + 0x2cd907, + 0x2a0c06, + 0x2d2245, + 0x29b206, + 0x27ba85, + 0x2b58c8, + 0x2f4284, + 0x2c4246, + 0x33b5c4, + 0x2b0f48, + 0x2c434a, + 0x2790cc, + 0x334685, + 0x21bb06, + 0x21f286, + 0x351fc6, + 0x309ec4, + 0x33ba45, + 0x27a587, + 0x29bfc9, + 0x2cb4c7, + 0x676384, + 0x676384, + 0x317605, + 0x37b944, + 0x29a8ca, + 0x2746c6, + 0x279e04, + 0x3b9585, + 0x37e405, + 0x2b12c4, + 0x280647, + 0x222c87, + 0x2cafc8, + 0x33de88, + 0x20dcc9, + 0x29cd88, + 0x29aa8b, + 0x2318c4, + 0x366885, + 0x27cc85, + 0x224189, + 0x302a89, + 0x2b4288, + 0x30e048, + 0x2d6604, + 0x29c745, + 0x217083, + 0x247bc5, + 0x351a46, + 0x295b8c, + 0x208b06, + 0x36c3c6, + 0x28ae45, + 0x3184c8, + 0x2b7ec6, + 0x2e73c6, + 0x268686, + 0x22920c, + 0x25e184, + 0x3a2e4a, + 0x28ad88, + 0x2959c7, + 0x323186, + 0x239147, + 0x2ec145, + 0x27eb46, + 0x34d406, + 0x35b847, + 0x25e6c4, + 0x2fe0c5, + 0x2741c4, + 0x2ae787, + 0x274408, + 0x2752ca, + 0x27d4c7, + 0x303407, + 0x300107, + 0x2dadc9, + 0x295b8a, + 0x21f083, + 0x25e5c5, + 0x213283, + 0x2e6449, + 0x33dc08, + 0x3709c7, + 0x335e89, + 0x21ee46, + 0x2b88c8, + 0x33a3c5, + 0x2db7ca, + 0x2d3549, + 0x2683c9, + 0x3b50c7, + 0x22e549, + 0x213148, + 0x35ba06, + 0x21bcc8, + 0x2160c7, + 0x2acc07, + 0x26ac87, + 0x2d0b48, + 0x2f5a86, + 0x2a06c5, + 0x27a587, + 0x296508, + 0x33b544, + 0x2dd244, + 0x28fe07, + 0x2ac387, + 0x2151ca, + 0x35b986, + 0x38c74a, + 0x2bf8c7, + 0x2b2fc7, + 0x246004, + 0x28aa44, + 0x2ce686, + 0x202d04, + 0x202d0c, + 0x3aff05, + 0x216d89, + 0x2d4f04, + 0x2b1385, + 0x274a08, + 0x279fc5, + 0x374246, + 0x223ec4, + 0x293c4a, + 0x2b00c6, + 0x29ba8a, + 0x22b447, + 0x21ac45, + 0x21b745, + 0x347dca, + 0x28efc5, + 0x26dfc6, + 0x23e2c4, + 0x2aedc6, + 0x282dc5, + 0x234206, + 0x2e604c, + 0x2cb14a, + 0x2587c4, + 0x247846, + 0x29bf47, + 0x2cf984, + 0x25dc08, + 0x393006, + 0x313089, + 0x2c7549, + 0x3164c9, + 0x26cb06, + 0x2161c6, + 0x21be07, + 0x3b17c8, + 0x215fc9, + 0x20d6c7, + 0x294e46, + 0x34de87, + 0x284f45, + 0x2b3204, + 0x21b9c7, + 0x23bcc5, + 0x286785, + 0x226987, + 0x247448, + 0x3a1cc6, + 0x29738d, + 0x29908f, + 0x29da4d, + 0x210d04, + 0x2332c6, + 0x2d3c08, + 0x33ccc5, + 0x276c08, + 0x24560a, + 0x244504, + 0x27bb46, + 0x26f3c7, + 0x286007, + 0x2a18c9, + 0x21bc85, + 0x2b12c4, + 0x2b330a, + 0x2bc289, + 0x22e647, + 0x265706, + 0x345d86, + 0x29c686, + 0x365b86, + 0x2d320f, + 0x2d3ac9, + 0x274fc6, + 0x22e346, + 0x31a809, + 0x2b2187, + 0x217443, + 0x229386, + 0x216a43, + 0x2e5788, + 0x34dcc7, + 0x29f9c9, + 0x2a8e08, + 0x37a008, + 0x203c86, + 0x208a49, + 0x242785, + 0x2b2244, + 0x2a99c7, + 0x2b47c5, + 0x210d04, + 0x313508, + 0x215bc4, + 0x2b1ec7, + 0x3545c6, + 0x357e85, + 0x2a1288, + 0x345c4b, + 0x331dc7, + 0x348046, + 0x2c4e84, + 0x319586, + 0x264305, + 0x23bcc5, + 0x27c2c9, + 0x280249, + 0x2acc44, + 0x2acc85, + 0x247885, + 0x2db646, + 0x3077c8, + 0x2bf046, + 0x38d3cb, + 0x35ab4a, + 0x2b0e85, + 0x28b6c6, + 0x396485, + 0x2cf485, + 0x2a54c7, + 0x352cc8, + 0x2367c4, + 0x25f806, + 0x28f546, + 0x213307, + 0x30a304, + 0x27ae86, + 0x237cc5, + 0x237cc9, + 0x2163c4, + 0x2a9809, + 0x2774c6, + 0x2c0c08, + 0x247885, + 0x362d45, + 0x234206, + 0x21efc9, + 0x217cc9, + 0x36c446, + 0x2dcf48, + 0x244508, + 0x396444, + 0x2b3644, + 0x2b3648, + 0x326dc8, + 0x2368c9, + 0x3519c6, + 0x268686, + 0x32224d, + 0x2e3306, + 0x306c89, + 0x315fc5, + 0x3bb046, + 0x391408, + 0x31d9c5, + 0x23bb44, + 0x264305, + 0x27fb48, + 0x29a689, + 0x274284, + 0x353146, + 0x279e8a, + 0x2f5d08, + 0x215349, + 0x38174a, + 0x335e06, + 0x299248, + 0x3b8185, + 0x2e0908, + 0x2b8145, + 0x21ed09, + 0x36a349, + 0x20d8c2, + 0x2a8385, + 0x269746, + 0x277407, + 0x3b05c5, + 0x308986, + 0x301448, + 0x2a33c6, + 0x2b6b09, + 0x2760c6, + 0x252808, + 0x2a89c5, + 0x23ebc6, + 0x33da88, + 0x27e648, + 0x2e7008, + 0x345f08, + 0x3b2b44, + 0x22a183, + 0x2b6d44, + 0x27d6c6, + 0x284f84, + 0x2e1287, + 0x2e72c9, + 0x2c45c5, + 0x204846, + 0x229386, + 0x291e4b, + 0x2b0dc6, + 0x3b8cc6, + 0x2c8488, + 0x3204c6, + 0x21aa43, + 0x3af743, + 0x2b3204, + 0x22f485, + 0x2b1807, + 0x274408, + 0x27440f, + 0x27a48b, + 0x3075c8, + 0x3531c6, + 0x3078ce, + 0x2319c3, + 0x2b1784, + 0x2b0d45, + 0x2b1146, + 0x28ce4b, + 0x291886, + 0x21b249, + 0x357e85, + 0x3899c8, + 0x20c688, + 0x217b8c, + 0x29ed06, + 0x247c06, + 0x2d7985, + 0x287b88, + 0x2790c5, + 0x343088, + 0x29b28a, + 0x29de89, + 0x676384, + 0x38a099c2, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x205503, + 0x200983, + 0x20cf83, + 0x25ef44, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x2d5f04, + 0x2e9dc3, + 0x3b0887, + 0x209703, + 0x204e83, + 0x28b148, + 0x200983, + 0x2ae1cb, + 0x2ec883, + 0x264a86, + 0x20b0c2, + 0x22d54b, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x200983, + 0x26be43, + 0x204783, + 0x205702, + 0x16d208, + 0x325f45, + 0x23bd48, + 0x2df7c8, + 0x2099c2, + 0x37ab45, + 0x38c347, + 0x2007c2, + 0x240d87, + 0x20d882, + 0x248707, + 0x32c589, + 0x3b7d48, + 0x2ddc09, + 0x23e202, + 0x263647, + 0x36c1c4, + 0x38c407, + 0x35aa47, + 0x2bbbc2, + 0x209703, + 0x20e602, + 0x200c82, + 0x200442, + 0x2013c2, + 0x205ec2, + 0x209842, + 0x2a80c5, + 0x320885, + 0x99c2, + 0x32403, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x12083, + 0x1ec1, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x205503, + 0x200983, + 0x219503, + 0x3b819d06, + 0x13f443, + 0x7df85, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x4a82, + 0x16d208, + 0x44e04, + 0xdb085, + 0x205702, + 0x26f544, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x2358c3, + 0x2a9305, + 0x244183, + 0x206343, + 0x205503, + 0x21c2c3, + 0x200983, + 0x214843, + 0x2387c3, + 0x25ed03, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2099c2, + 0x200983, + 0x16d208, + 0x2e9dc3, + 0x16d208, + 0x200c03, + 0x2a84c3, + 0x22fd84, + 0x232403, + 0x2e9dc3, + 0x202bc2, + 0x209703, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x202bc2, + 0x227f83, + 0x205503, + 0x200983, + 0x2e87c3, + 0x214843, + 0x205702, + 0x2099c2, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x264a85, + 0xe4886, + 0x25ef44, + 0x20b0c2, + 0x16d208, + 0x205702, + 0x1d848, + 0x1b4183, + 0x2099c2, + 0x3fc91386, + 0x1320c4, + 0xd95cb, + 0x13eec6, + 0x9807, + 0x232403, + 0x47208, + 0x2e9dc3, + 0xb9b45, + 0x13fb84, + 0x260f83, + 0x4ce87, + 0xd78c4, + 0x205503, + 0x7f1c4, + 0x200983, + 0x2ed844, + 0xd9388, + 0x125c86, + 0x82b48, + 0x6cf05, + 0x1fa49, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x204e83, + 0x200983, + 0x2ec883, + 0x20b0c2, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x24a5c3, + 0x211cc4, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2d5f04, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x264a86, + 0x232403, + 0x2e9dc3, + 0x176e43, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x9807, + 0x16d208, + 0x2e9dc3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x426a84c3, + 0x232403, + 0x205503, + 0x200983, + 0x16d208, + 0x205702, + 0x2099c2, + 0x2a84c3, + 0x2e9dc3, + 0x205503, + 0x200442, + 0x200983, + 0x316e87, + 0x33e6cb, + 0x22d703, + 0x241608, + 0x3b1547, + 0x20a7c6, + 0x2c2c45, + 0x372349, + 0x209488, + 0x360d49, + 0x38f790, + 0x360d4b, + 0x39e189, + 0x201b03, + 0x20fb89, + 0x230f06, + 0x230f0c, + 0x326008, + 0x3b4f08, + 0x34af09, + 0x2905ce, + 0x2dd9cb, + 0x2f364c, + 0x2030c3, + 0x263d0c, + 0x207089, + 0x2fee47, + 0x23234c, + 0x3a89ca, + 0x2030c4, + 0x2d084d, + 0x263bc8, + 0x20cf8d, + 0x273846, + 0x28decb, + 0x283349, + 0x3b8b87, + 0x32fd06, + 0x330f89, + 0x351b8a, + 0x30b148, + 0x2ec484, + 0x2fba07, + 0x34f707, + 0x2bab04, + 0x37b5c4, + 0x22a749, + 0x281d49, + 0x22ae48, + 0x210785, + 0x3b4005, + 0x20db86, + 0x2d0709, + 0x24588d, + 0x2f30c8, + 0x20da87, + 0x2c2cc8, + 0x2e1886, + 0x38b6c4, + 0x3523c5, + 0x202986, + 0x204b04, + 0x206f87, + 0x20b8ca, + 0x212244, + 0x2157c6, + 0x216a09, + 0x216a0f, + 0x21788d, + 0x2184c6, + 0x21d450, + 0x21d846, + 0x21df87, + 0x21e4c7, + 0x21e4cf, + 0x21f6c9, + 0x224c46, + 0x225347, + 0x225348, + 0x225809, + 0x246088, + 0x2e52c7, + 0x20cc83, + 0x372986, + 0x3ba948, + 0x29088a, + 0x213c09, + 0x2095c3, + 0x38c246, + 0x25f64a, + 0x29e587, + 0x2fec8a, + 0x313d4e, + 0x21f806, + 0x2a8587, + 0x20e006, + 0x207146, + 0x37de0b, + 0x20414a, + 0x317f0d, + 0x216287, + 0x33ce88, + 0x33ce89, + 0x33ce8f, + 0x2b838c, + 0x27b289, + 0x2e6a0e, + 0x3b098a, + 0x2ba246, + 0x2f4586, + 0x30b58c, + 0x30ce8c, + 0x30dc08, + 0x3439c7, + 0x2b8c45, + 0x351e04, + 0x33c90e, + 0x228d04, + 0x351747, + 0x26030a, + 0x362554, + 0x36dd8f, + 0x21e688, + 0x372848, + 0x35040d, + 0x35040e, + 0x376ec9, + 0x3a8ec8, + 0x3a8ecf, + 0x23204c, + 0x23204f, + 0x233007, + 0x236dca, + 0x2435cb, + 0x238508, + 0x239cc7, + 0x3690cd, + 0x250406, + 0x2d0a06, + 0x23c149, + 0x394648, + 0x242088, + 0x24208e, + 0x2b5007, + 0x243885, + 0x244bc5, + 0x2063c4, + 0x20aa86, + 0x22ad48, + 0x202203, + 0x2ca10e, + 0x369488, + 0x2a2fcb, + 0x200dc7, + 0x3a4045, + 0x22e206, + 0x2aa0c7, + 0x333d08, + 0x26cd09, + 0x292e45, + 0x284788, + 0x212c06, + 0x38ad4a, + 0x33c809, + 0x232409, + 0x23240b, + 0x38dc48, + 0x2ba9c9, + 0x210846, + 0x22eb8a, + 0x2dc80a, + 0x236fcc, + 0x3a6687, + 0x32c38a, + 0x26ea8b, + 0x26ea99, + 0x3b6a88, + 0x264b05, + 0x2c6086, + 0x211e49, + 0x390746, + 0x28550a, + 0x209686, + 0x202644, + 0x2c620d, + 0x202647, + 0x211149, + 0x246385, + 0x2464c8, + 0x246fc9, + 0x247784, + 0x248387, + 0x248388, + 0x248c87, + 0x261908, + 0x24d487, + 0x26c645, + 0x25488c, + 0x2550c9, + 0x2bc00a, + 0x3937c9, + 0x20fc89, + 0x275a0c, + 0x25774b, + 0x257ec8, + 0x259048, + 0x25c404, + 0x2810c8, + 0x283c89, + 0x3a8a87, + 0x216c46, + 0x2835c7, + 0x2dcac9, + 0x26e6cb, + 0x319407, + 0x200a07, + 0x22b587, + 0x20cf04, + 0x20cf05, + 0x29a545, + 0x341c0b, + 0x39c644, + 0x3b2988, + 0x26614a, + 0x212cc7, + 0x2f6707, + 0x28bed2, + 0x278446, + 0x22f706, + 0x33c24e, + 0x27aa06, + 0x292588, + 0x29374f, + 0x20d348, + 0x37f308, + 0x30eaca, + 0x30ead1, + 0x2a0e8e, + 0x24dd0a, + 0x24dd0c, + 0x21e307, + 0x3a90d0, + 0x200408, + 0x2a1085, + 0x2aa4ca, + 0x204b4c, + 0x29518d, + 0x2f7e46, + 0x2f7e47, + 0x2f7e4c, + 0x300e4c, + 0x3292cc, + 0x2873cb, + 0x284184, + 0x226384, + 0x346d89, + 0x3050c7, + 0x225e49, + 0x37e909, + 0x39f1c7, + 0x3a8846, + 0x3a8849, + 0x2ad1c3, + 0x21c74a, + 0x31a287, + 0x33eb8b, + 0x317d8a, + 0x248844, + 0x22ba46, + 0x27d749, + 0x202b84, + 0x3affca, + 0x348345, + 0x2bdd45, + 0x2bdd4d, + 0x2be08e, + 0x28cc05, + 0x323906, + 0x264687, + 0x3870ca, + 0x39b686, + 0x3616c4, + 0x36d747, + 0x2c3f0b, + 0x2e1947, + 0x33fa84, + 0x24bb86, + 0x24bb8d, + 0x21e1cc, + 0x2053c6, + 0x2f32ca, + 0x2e03c6, + 0x2ed0c8, + 0x377c47, + 0x23568a, + 0x23d6c6, + 0x216183, + 0x391586, + 0x3ba7c8, + 0x29ac8a, + 0x275807, + 0x275808, + 0x281684, + 0x24b687, + 0x279348, + 0x2bd748, + 0x27c0c8, + 0x38c94a, + 0x2da905, + 0x2cf0c7, + 0x24db53, + 0x31e806, + 0x266348, + 0x221a09, + 0x240c48, + 0x203d0b, + 0x2cb608, + 0x2a5f44, + 0x32ec06, + 0x30bac6, + 0x3027c9, + 0x2c3dc7, + 0x254988, + 0x28af06, + 0x226884, + 0x2cb8c5, + 0x2c55c8, + 0x2c5bca, + 0x2c5e88, + 0x2cbf86, + 0x29944a, + 0x2ac808, + 0x2cf788, + 0x2d18c8, + 0x2d1f06, + 0x2d3e06, + 0x38e18c, + 0x2d43d0, + 0x27d2c5, + 0x20d148, + 0x301950, + 0x20d150, + 0x38f60e, + 0x38de0e, + 0x38de14, + 0x32fe8f, + 0x330246, + 0x332d51, + 0x33d213, + 0x33d688, + 0x3b3445, + 0x241b48, + 0x386245, + 0x329a8c, + 0x291549, + 0x228b49, + 0x3201c7, + 0x236b89, + 0x380887, + 0x2f6146, + 0x3521c7, + 0x269c45, + 0x2120c3, + 0x2023c9, + 0x221cc9, + 0x376e43, + 0x27f384, + 0x32a20d, + 0x206bcf, + 0x2268c5, + 0x329986, + 0x211407, + 0x325d87, + 0x288786, + 0x28878b, + 0x2a2405, + 0x256786, + 0x2f6c07, + 0x24e489, + 0x3a7486, + 0x21d305, + 0x22854b, + 0x235946, + 0x249245, + 0x357988, + 0x306a88, + 0x2c8f0c, + 0x2c8f10, + 0x2d2409, + 0x2ffd07, + 0x32840b, + 0x2e3b86, + 0x2e518a, + 0x2e754b, + 0x2e794a, + 0x2e7bc6, + 0x2e8685, + 0x319fc6, + 0x36c808, + 0x32028a, + 0x35009c, + 0x2ec94c, + 0x2ecc48, + 0x264a85, + 0x34ea07, + 0x26bec6, + 0x274e05, + 0x21afc6, + 0x288948, + 0x2bc507, + 0x2904c8, + 0x2a868a, + 0x33130c, + 0x331589, + 0x38b847, + 0x2198c4, + 0x244c86, + 0x37ee8a, + 0x37ea05, + 0x209f8c, + 0x20e648, + 0x367388, + 0x21a00c, + 0x22550c, + 0x225a09, + 0x225c47, + 0x231d4c, + 0x23aa84, + 0x23c60a, + 0x35e6cc, + 0x26b28b, + 0x242b8b, + 0x2efec6, + 0x24a107, + 0x24c687, + 0x3a930f, + 0x2f8a51, + 0x2d8592, + 0x24c68d, + 0x24c68e, + 0x24c9ce, + 0x330048, + 0x330052, + 0x24fbc8, + 0x3b1187, + 0x24aeca, + 0x3681c8, + 0x27a9c5, + 0x3b57ca, + 0x21dd87, + 0x2e36c4, + 0x201543, + 0x2a57c5, + 0x30ed47, + 0x2f5007, + 0x29538e, + 0x3382cd, + 0x33af89, + 0x222705, + 0x35c3c3, + 0x3a78c6, + 0x36e745, + 0x2a3208, + 0x205b49, + 0x2983c5, + 0x3692cf, + 0x2d96c7, + 0x372285, + 0x20178a, + 0x2a36c6, + 0x2ed249, + 0x396ccc, + 0x2f51c9, + 0x3abdc6, + 0x265f4c, + 0x322d06, + 0x2f7588, + 0x2f7786, + 0x3b6c06, + 0x3b96c4, + 0x258243, + 0x2a1fca, + 0x327191, + 0x3a9c0a, + 0x27ee85, + 0x265047, + 0x252207, + 0x279444, + 0x27944b, + 0x3b7bc8, + 0x2b7bc6, + 0x362b85, + 0x38b044, + 0x255f09, + 0x31ad84, + 0x254f07, + 0x32f345, + 0x32f347, + 0x33c485, + 0x2a8183, + 0x3b1048, + 0x33b80a, + 0x203043, + 0x325f8a, + 0x203046, + 0x36904f, + 0x2b4f89, + 0x2ca090, + 0x2f1548, + 0x2ccc89, + 0x2971c7, + 0x24bb0f, + 0x336244, + 0x2d5f84, + 0x21d6c6, + 0x22f246, + 0x25708a, + 0x23cc46, + 0x2f58c7, + 0x300788, + 0x300987, + 0x301207, + 0x30370a, + 0x30534b, + 0x2f3dc5, + 0x2d81c8, + 0x21bb03, + 0x23800c, + 0x36f78f, + 0x2b8a4d, + 0x2a7147, + 0x33b0c9, + 0x22bcc7, + 0x24a2c8, + 0x36274c, + 0x2a5e48, + 0x250bc8, + 0x318ace, + 0x32d354, + 0x32d864, + 0x3475ca, + 0x36148b, + 0x380944, + 0x380949, + 0x27bbc8, + 0x245345, + 0x201d0a, + 0x3696c7, + 0x26f744, + 0x38d2c3, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x209703, + 0x2d43c6, + 0x211cc4, + 0x205503, + 0x200983, + 0x201303, + 0x205702, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x2e9dc3, + 0x244183, + 0x2d43c6, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x211cc4, + 0x205503, + 0x200983, + 0x205702, + 0x2bb143, + 0x2099c2, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x201ec2, + 0x219f02, + 0x2099c2, + 0x2a84c3, + 0x202242, + 0x201fc2, + 0x3b1384, + 0x210444, + 0x227382, + 0x211cc4, + 0x200442, + 0x200983, + 0x201303, + 0x2efec6, + 0x212982, + 0x202dc2, + 0x222f02, + 0x44e0d343, + 0x4521e303, + 0x52d46, + 0x52d46, + 0x25ef44, + 0x204e83, + 0x142abca, + 0x12778c, + 0x102cc, + 0x7dd8d, + 0x129845, + 0x21347, + 0x18648, + 0x1b887, + 0x20348, + 0x19d4ca, + 0x45ed6a45, + 0x12b809, + 0xaf848, + 0x4a70a, + 0x8a64e, + 0x1440a4b, + 0x1320c4, + 0x77848, + 0x68bc8, + 0x38f47, + 0x12807, + 0x4efc9, + 0x2c07, + 0xd4ac8, + 0x1318c9, + 0x3adc5, + 0x124d4e, + 0xa8a0d, + 0x9688, + 0x4622a586, + 0x46c2a588, + 0x70cc8, + 0x117090, + 0x5f347, + 0x601c7, + 0x64547, + 0x69447, + 0xdb42, + 0x190bc7, + 0x430c, + 0x35fc7, + 0xa4246, + 0xa4909, + 0xa6388, + 0x17f42, + 0x1fc2, + 0xb8fcb, + 0x7f247, + 0x11809, + 0xbb9c9, + 0x17e248, + 0xafd42, + 0x113a49, + 0xcdf8a, + 0xc9e09, + 0xd6fc9, + 0xd7ac8, + 0xd8a47, + 0xda889, + 0xde345, + 0xde6d0, + 0x175b86, + 0x192345, + 0x5e98d, + 0xf986, + 0xe9187, + 0xed858, + 0x1b1a48, + 0xb4c8a, + 0x1c42, + 0x52f4d, + 0x27c2, + 0x5d306, + 0x8d108, + 0x86ec8, + 0x16d0c9, + 0x55b08, + 0x5fb4e, + 0x1a78c7, + 0x19d0d, + 0xf2d05, + 0x190948, + 0x194448, + 0xfacc6, + 0xc2, + 0x125c86, + 0x7b02, + 0x341, + 0x57a07, + 0xc8e83, + 0x466ee0c4, + 0x46a94443, + 0x141, + 0x10986, + 0x141, + 0x1, + 0x10986, + 0xc8e83, + 0x1596bc5, + 0x2030c4, + 0x2a84c3, + 0x249944, + 0x3b1384, + 0x205503, + 0x2218c5, + 0x219503, + 0x23e743, + 0x373605, + 0x25ed03, + 0x47ea84c3, + 0x232403, + 0x2e9dc3, + 0x200041, + 0x209703, + 0x210444, + 0x211cc4, + 0x205503, + 0x200983, + 0x214843, + 0x16d208, + 0x205702, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x201fc2, + 0x3b1384, + 0x244183, + 0x209703, + 0x205503, + 0x204e83, + 0x200983, + 0x25ed03, + 0x16d208, + 0x36f502, + 0x99c2, + 0x1456108, + 0x100b4e, + 0x48e016c2, + 0x31a448, + 0x234386, + 0x209cc6, + 0x233d07, + 0x4920c202, + 0x49768ec8, + 0x20884a, + 0x25cc88, + 0x200242, + 0x31a0c9, + 0x2f3e07, + 0x216bc6, + 0x3b0d89, + 0x2cf204, + 0x20a6c6, + 0x2dbcc4, + 0x26ffc4, + 0x2544c9, + 0x326686, + 0x320945, + 0x22c445, + 0x384e07, + 0x2bfb47, + 0x28fa44, + 0x233f46, + 0x2fb005, + 0x2fde45, + 0x3963c5, + 0x3b3dc7, + 0x200c05, + 0x314b49, + 0x312945, + 0x333e44, + 0x39b5c7, + 0x31974e, + 0x32e5c9, + 0x33c109, + 0x3a64c6, + 0x23d408, + 0x26d98b, + 0x2aeecc, + 0x37f806, + 0x2dd887, + 0x20a305, + 0x37b5ca, + 0x22af49, + 0x20bf49, + 0x24ff86, + 0x2f69c5, + 0x27ce45, + 0x3490c9, + 0x39654b, + 0x273346, + 0x33a786, + 0x202504, + 0x28bb86, + 0x243908, + 0x3ba646, + 0x214386, + 0x207c08, + 0x20bb47, + 0x20bd09, + 0x20c585, + 0x16d208, + 0x212784, + 0x3ada04, + 0x283785, + 0x399a49, + 0x220f07, + 0x220f0b, + 0x22394a, + 0x227a45, + 0x49a08d42, + 0x33ea47, + 0x49e28908, + 0x2afb87, + 0x350e85, + 0x20c1ca, + 0x99c2, + 0x34dfcb, + 0x24d5ca, + 0x221bc6, + 0x282bc3, + 0x28e34d, + 0x3492cc, + 0x35084d, + 0x245c45, + 0x32ae05, + 0x202247, + 0x3aba49, + 0x208746, + 0x23cac5, + 0x2d29c8, + 0x28ba83, + 0x2dfac8, + 0x28ba88, + 0x2c3747, + 0x309708, + 0x3a7209, + 0x2cc447, + 0x33e247, + 0x396a48, + 0x251f44, + 0x251f47, + 0x273748, + 0x3a3ac6, + 0x205f4f, + 0x211a07, + 0x2e5446, + 0x225d85, + 0x223083, + 0x371847, + 0x36c043, + 0x248e46, + 0x24aa86, + 0x24b286, + 0x290c05, + 0x261903, + 0x388208, + 0x36f009, + 0x38224b, + 0x24b408, + 0x24d145, + 0x24f605, + 0x4a248902, + 0x352289, + 0x3b1407, + 0x256805, + 0x2543c7, + 0x2559c6, + 0x365a45, + 0x36e58b, + 0x257ec4, + 0x25c845, + 0x25c987, + 0x272cc6, + 0x273105, + 0x2812c7, + 0x281a07, + 0x2cd884, + 0x289c0a, + 0x28a0c8, + 0x3b8209, + 0x241e85, + 0x207886, + 0x243aca, + 0x22c346, + 0x261e07, + 0x3b7ecd, + 0x29c809, + 0x38d185, + 0x314187, + 0x332288, + 0x33d848, + 0x3b3107, + 0x379d86, + 0x215dc7, + 0x249f43, + 0x341c04, + 0x363485, + 0x392707, + 0x395dc9, + 0x22be48, + 0x344c45, + 0x23cd84, + 0x246245, + 0x24b80d, + 0x200f82, + 0x373746, + 0x25d246, + 0x2c578a, + 0x376546, + 0x37edc5, + 0x33df85, + 0x33df87, + 0x38ab8c, + 0x270b4a, + 0x28b846, + 0x2b9645, + 0x28b9c6, + 0x28bd07, + 0x28e186, + 0x290b0c, + 0x3b0ec9, + 0x4a610e07, + 0x293b05, + 0x293b06, + 0x293ec8, + 0x23b705, + 0x2a2c85, + 0x2a3848, + 0x2a3a4a, + 0x4aa4ecc2, + 0x4ae0ee02, + 0x2e6705, + 0x284f83, + 0x3adf08, + 0x204043, + 0x2a3cc4, + 0x2ed38b, + 0x26dd48, + 0x2e4d48, + 0x4b349909, + 0x2a7dc9, + 0x2a8906, + 0x2a9d48, + 0x2a9f49, + 0x2aab46, + 0x2aacc5, + 0x3843c6, + 0x2ab5c9, + 0x331f47, + 0x23ea86, + 0x233747, + 0x2085c7, + 0x32c8c4, + 0x4b7b1d49, + 0x2cab88, + 0x368dc8, + 0x383447, + 0x2c5246, + 0x226ac9, + 0x209c87, + 0x32e90a, + 0x38c588, + 0x3af5c7, + 0x3b9786, + 0x24f38a, + 0x262708, + 0x2dccc5, + 0x226645, + 0x2ee487, + 0x2f7349, + 0x36510b, + 0x315008, + 0x3129c9, + 0x24bfc7, + 0x2b550c, + 0x2b5c4c, + 0x2b5f4a, + 0x2b61cc, + 0x2c2708, + 0x2c2908, + 0x2c2b04, + 0x2c2ec9, + 0x2c3109, + 0x2c334a, + 0x2c35c9, + 0x2c3907, + 0x3af00c, + 0x241146, + 0x34acc8, + 0x22c406, + 0x32e7c6, + 0x38d087, + 0x3b3288, + 0x39034b, + 0x2afa47, + 0x352489, + 0x3445c9, + 0x249ac7, + 0x278a04, + 0x265187, + 0x2db346, + 0x214a06, + 0x2f3485, + 0x2a5888, + 0x291444, + 0x291446, + 0x270a0b, + 0x21ca49, + 0x214b46, + 0x21c489, + 0x3b3f46, + 0x254688, + 0x223b83, + 0x2f6b45, + 0x22edc9, + 0x261145, + 0x2f9684, + 0x272206, + 0x231545, + 0x228f86, + 0x3056c7, + 0x26e986, + 0x3a304b, + 0x22ea87, + 0x3379c6, + 0x346f06, + 0x384ec6, + 0x28fa09, + 0x2ef14a, + 0x2b3505, + 0x2170cd, + 0x2a3b46, + 0x235546, + 0x2b4e86, + 0x2ed045, + 0x2de9c7, + 0x2e14c7, + 0x3581ce, + 0x209703, + 0x2c5209, + 0x391dc9, + 0x37b9c7, + 0x358f07, + 0x29d645, + 0x27ec45, + 0x4ba2a88f, + 0x2ccec7, + 0x2cd088, + 0x2cd484, + 0x2cde46, + 0x4be44c42, + 0x2d2186, + 0x2d43c6, + 0x391f8e, + 0x2df90a, + 0x357b06, + 0x285eca, + 0x203549, + 0x324105, + 0x398008, + 0x3b5606, + 0x38cec8, + 0x26f088, + 0x28eb8b, + 0x233e05, + 0x200c88, + 0x207d4c, + 0x2bd507, + 0x24ae06, + 0x2e28c8, + 0x20a948, + 0x4c208442, + 0x20a48b, + 0x282549, + 0x329f09, + 0x3bb287, + 0x20f7c8, + 0x4c61bf48, + 0x3511cb, + 0x37e0c9, + 0x234fcd, + 0x2750c8, + 0x224a48, + 0x4ca03ec2, + 0x20e3c4, + 0x4ce1a2c2, + 0x2f4ec6, + 0x4d2004c2, + 0x3813ca, + 0x21c346, + 0x285908, + 0x284488, + 0x2af446, + 0x22d8c6, + 0x2f12c6, + 0x2a3185, + 0x238c04, + 0x4d61e144, + 0x205146, + 0x272707, + 0x4dae8bc7, + 0x35490b, + 0x319b09, + 0x32ae4a, + 0x391804, + 0x33e0c8, + 0x23e84d, + 0x2eb709, + 0x2eb948, + 0x2ebfc9, + 0x2ed844, + 0x243484, + 0x27c885, + 0x317b4b, + 0x26dcc6, + 0x3424c5, + 0x250149, + 0x234008, + 0x2047c4, + 0x37b749, + 0x208105, + 0x2bfb88, + 0x33e907, + 0x33c508, + 0x27d946, + 0x35e387, + 0x292349, + 0x2286c9, + 0x2492c5, + 0x334ec5, + 0x4de2d902, + 0x333c04, + 0x2049c5, + 0x32c146, + 0x318385, + 0x2b1ac7, + 0x205245, + 0x272d04, + 0x3a6586, + 0x23cb47, + 0x232986, + 0x2dca05, + 0x203188, + 0x234585, + 0x2062c7, + 0x20f1c9, + 0x21cb8a, + 0x2e1b87, + 0x2e1b8c, + 0x320906, + 0x343cc9, + 0x23b385, + 0x23b648, + 0x210803, + 0x210805, + 0x2e8a05, + 0x261607, + 0x4e20c002, + 0x22d0c7, + 0x2e4f06, + 0x342786, + 0x2e7d06, + 0x20a886, + 0x208388, + 0x241c85, + 0x2e5507, + 0x2e550d, + 0x201543, + 0x21ec05, + 0x201547, + 0x22d408, + 0x201105, + 0x218c88, + 0x36c0c6, + 0x32b9c7, + 0x2c4785, + 0x233e86, + 0x26f5c5, + 0x21390a, + 0x2f2e06, + 0x377ac7, + 0x2ca505, + 0x3612c7, + 0x36d6c4, + 0x2f9606, + 0x2fb3c5, + 0x32648b, + 0x2db1c9, + 0x2bb24a, + 0x249348, + 0x301d08, + 0x304a4c, + 0x306287, + 0x3073c8, + 0x310a48, + 0x31e945, + 0x34020a, + 0x35c3c9, + 0x4e600802, + 0x200806, + 0x219d04, + 0x2ea849, + 0x220b49, + 0x269287, + 0x294947, + 0x37e789, + 0x38cb48, + 0x38cb4f, + 0x315d06, + 0x2d670b, + 0x36e8c5, + 0x36e8c7, + 0x385889, + 0x212ac6, + 0x37b6c7, + 0x2d8905, + 0x2303c4, + 0x261006, + 0x211ac4, + 0x2ce4c7, + 0x307048, + 0x4eaf68c8, + 0x2f7085, + 0x2f71c7, + 0x236549, + 0x23e284, + 0x23e288, + 0x4ee2b888, + 0x279444, + 0x231388, + 0x32fdc4, + 0x3ab849, + 0x2173c5, + 0x4f20b0c2, + 0x315d45, + 0x2e4345, + 0x251288, + 0x232e47, + 0x4f601442, + 0x204785, + 0x2cf606, + 0x24b106, + 0x333bc8, + 0x302108, + 0x318346, + 0x327f06, + 0x2e2e49, + 0x3426c6, + 0x21298b, + 0x296305, + 0x368106, + 0x377088, + 0x250506, + 0x292cc6, + 0x21914a, + 0x23084a, + 0x245005, + 0x241d47, + 0x308786, + 0x4fa01682, + 0x201687, + 0x238705, + 0x243a44, + 0x243a45, + 0x391706, + 0x26a447, + 0x219a85, + 0x220c04, + 0x2c7e88, + 0x292d85, + 0x333a47, + 0x3a1645, + 0x213845, + 0x256e04, + 0x287609, + 0x2fae48, + 0x2e0286, + 0x2d9d06, + 0x2b6e46, + 0x4fefbc88, + 0x2fbe87, + 0x2fc0cd, + 0x2fcb4c, + 0x2fd149, + 0x2fd389, + 0x5035b2c2, + 0x3a8603, + 0x207943, + 0x2db405, + 0x39280a, + 0x327dc6, + 0x302385, + 0x305884, + 0x30588b, + 0x31b70c, + 0x31c14c, + 0x31c455, + 0x31d74d, + 0x320a8f, + 0x320e52, + 0x3212cf, + 0x321692, + 0x321b13, + 0x321fcd, + 0x32258d, + 0x32290e, + 0x322e8e, + 0x3236cc, + 0x323a8c, + 0x323ecb, + 0x32424e, + 0x325392, + 0x327b8c, + 0x328790, + 0x335212, + 0x33640c, + 0x336acd, + 0x336e0c, + 0x339a51, + 0x33a90d, + 0x34084d, + 0x340e4a, + 0x3410cc, + 0x3419cc, + 0x3421cc, + 0x34290c, + 0x344dd3, + 0x345450, + 0x345850, + 0x34610d, + 0x34670c, + 0x347309, + 0x34890d, + 0x348c53, + 0x34a311, + 0x34a753, + 0x34b24f, + 0x34b60c, + 0x34b90f, + 0x34bccd, + 0x34c2cf, + 0x34c690, + 0x34d10e, + 0x3539ce, + 0x353f50, + 0x35518d, + 0x355b0e, + 0x355e8c, + 0x356e93, + 0x35934e, + 0x3599d0, + 0x359dd1, + 0x35a20f, + 0x35a5d3, + 0x35ae4d, + 0x35b18f, + 0x35b54e, + 0x35bc10, + 0x35c009, + 0x35cd90, + 0x35d38f, + 0x35da0f, + 0x35ddd2, + 0x35efce, + 0x35fc4d, + 0x36070d, + 0x360a4d, + 0x36184d, + 0x361b8d, + 0x361ed0, + 0x3622cb, + 0x36324c, + 0x3635cc, + 0x363bcc, + 0x363ece, + 0x371a10, + 0x372dd2, + 0x37324b, + 0x3738ce, + 0x373c4e, + 0x3744ce, + 0x37494b, + 0x50774f56, + 0x37624d, + 0x3766d4, + 0x377e0d, + 0x37b115, + 0x37c40d, + 0x37cd8f, + 0x37d5cf, + 0x38250f, + 0x3828ce, + 0x382e4d, + 0x383f91, + 0x38674c, + 0x386a4c, + 0x386d4b, + 0x38764c, + 0x387a0f, + 0x387dd2, + 0x38878d, + 0x38974c, + 0x389bcc, + 0x389ecd, + 0x38a20f, + 0x38a5ce, + 0x3924cc, + 0x392a8d, + 0x392dcb, + 0x39358c, + 0x393b0d, + 0x393e4e, + 0x3941c9, + 0x394d13, + 0x39524d, + 0x39558d, + 0x395b8c, + 0x39600e, + 0x396fcf, + 0x39738c, + 0x39768d, + 0x3979cf, + 0x397d8c, + 0x39848c, + 0x39890c, + 0x398c0c, + 0x3992cd, + 0x399612, + 0x399c8c, + 0x399f8c, + 0x39a291, + 0x39a6cf, + 0x39aa8f, + 0x39ae53, + 0x39bcce, + 0x39c04f, + 0x39c40c, + 0x50b9c74e, + 0x39cacf, + 0x39ce96, + 0x39dc12, + 0x39f38c, + 0x39fd0f, + 0x3a038d, + 0x3a06cf, + 0x3a0a8c, + 0x3a0d8d, + 0x3a10cd, + 0x3a254e, + 0x3a4b8c, + 0x3a4e8c, + 0x3a5190, + 0x3a7a91, + 0x3a7ecb, + 0x3a820c, + 0x3a850e, + 0x3aa811, + 0x3aac4e, + 0x3aafcd, + 0x3b53cb, + 0x3b5e8f, + 0x3b6d94, + 0x228782, + 0x228782, + 0x200c83, + 0x228782, + 0x200c83, + 0x228782, + 0x205142, + 0x384405, + 0x3aa50c, + 0x228782, + 0x228782, + 0x205142, + 0x228782, + 0x294545, + 0x21cb85, + 0x228782, + 0x228782, + 0x20b382, + 0x294545, + 0x31f3c9, + 0x34a00c, + 0x228782, + 0x228782, + 0x228782, + 0x228782, + 0x384405, + 0x228782, + 0x228782, + 0x228782, + 0x228782, + 0x20b382, + 0x31f3c9, + 0x228782, + 0x228782, + 0x228782, + 0x21cb85, + 0x228782, + 0x21cb85, + 0x34a00c, + 0x3aa50c, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x205503, + 0x200983, + 0x2708, + 0x5fc84, + 0xe0e08, + 0x205702, + 0x51a099c2, + 0x23dbc3, + 0x24f2c4, + 0x2032c3, + 0x393304, + 0x22f706, + 0x20e883, + 0x3328c4, + 0x286bc5, + 0x209703, + 0x205503, + 0x200983, + 0x255cca, + 0x2efec6, + 0x373fcc, + 0x16d208, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x227f83, + 0x2d43c6, + 0x205503, + 0x200983, + 0x201303, + 0xa4508, + 0x129845, + 0x14902, + 0x52f86185, + 0x21347, + 0xc93c8, + 0xec0e, + 0x88192, + 0xfe20b, + 0x532d6a45, + 0x536d6a4c, + 0xb007, + 0x16fc07, + 0x1b254a, + 0x3a6d0, + 0x149c05, + 0xd95cb, + 0x68bc8, + 0x38f47, + 0x304cb, + 0x4efc9, + 0x11dd07, + 0x2c07, + 0x73587, + 0x1c106, + 0xd4ac8, + 0x53c1cdc6, + 0xa8a0d, + 0x1b1f10, + 0x5402bb82, + 0x9688, + 0x4a450, + 0x14434c, + 0x5474e88d, + 0x655c7, + 0x78749, + 0x52e06, + 0x940c8, + 0x67e42, + 0x9f54a, + 0x27f07, + 0x35fc7, + 0xa4909, + 0xa6388, + 0xb9b45, + 0xec50e, + 0xb54e, + 0xdecf, + 0x11809, + 0xbb9c9, + 0x43e4b, + 0x7664f, + 0x8780c, + 0x9ef4b, + 0xbbf48, + 0x154807, + 0xcdc48, + 0xfb80b, + 0xf568c, + 0xf640c, + 0xf908c, + 0xfe68d, + 0x17e248, + 0xeab02, + 0x113a49, + 0x185d4b, + 0xc5446, + 0x116fcb, + 0xd804a, + 0xd8c05, + 0xde6d0, + 0x111806, + 0x192345, + 0xe3f48, + 0xe9187, + 0xe9447, + 0xff487, + 0xf4d0a, + 0xc924a, + 0x5d306, + 0x91a0d, + 0x86ec8, + 0x55b08, + 0x56d49, + 0xb3c45, + 0xf484c, + 0xfe88b, + 0x165044, + 0xfaa89, + 0xfacc6, + 0x1af7c6, + 0x2dc2, + 0x125c86, + 0x107247, + 0x7b02, + 0xc83c5, + 0x29544, + 0x1ec1, + 0x4c983, + 0x53a85146, + 0x94443, + 0xd882, + 0x27f04, + 0x242, + 0x5ef44, + 0x3dc2, + 0x8142, + 0x2502, + 0x10f242, + 0x1ec2, + 0xd6a42, + 0x4142, + 0x1b102, + 0x2cd82, + 0x5742, + 0xdc2, + 0xf882, + 0x32403, + 0x5f02, + 0x7c2, + 0x18342, + 0xfc82, + 0x5e82, + 0x1ae02, + 0x17f42, + 0x15c2, + 0x29c2, + 0x1fc2, + 0x44183, + 0x3942, + 0x6502, + 0xafd42, + 0xbe02, + 0x282, + 0x4bc2, + 0x1f42, + 0xa8542, + 0x2342, + 0x152bc2, + 0x675c2, + 0x2c82, + 0x5503, + 0x8c2, + 0x8442, + 0x33c2, + 0xb482, + 0x49245, + 0xba02, + 0x2d4c2, + 0x3c083, + 0x482, + 0x1c42, + 0x27c2, + 0x3902, + 0x1102, + 0x1442, + 0xc2, + 0x2dc2, + 0x9885, + 0x75c47, + 0x212503, + 0x205702, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x20ad83, + 0x227f83, + 0x205503, + 0x204e83, + 0x200983, + 0x294483, + 0x169c3, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x209703, + 0x205503, + 0x204e83, + 0x200983, + 0x2a84c3, + 0x232403, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x200041, + 0x209703, + 0x205503, + 0x21c2c3, + 0x200983, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x209683, + 0x2163c3, + 0x277dc3, + 0x280b83, + 0x21c303, + 0x252c03, + 0x2e9dc3, + 0x3b1384, + 0x205503, + 0x200983, + 0x25ed03, + 0x352e84, + 0x231a03, + 0x30c3, + 0x228483, + 0x37a908, + 0x24f3c4, + 0x3b870a, + 0x2b8ec6, + 0x1b6a04, + 0x39b2c7, + 0x21e7ca, + 0x315bc9, + 0x3ab587, + 0x3b724a, + 0x38d2c3, + 0x2e678b, + 0x2b9fc9, + 0x2bd645, + 0x2d1fc7, + 0x99c2, + 0x2a84c3, + 0x205747, + 0x2e2b85, + 0x2dbdc9, + 0x232403, + 0x233c06, + 0x2c1a43, + 0xdb283, + 0x104e46, + 0x18ec46, + 0xe807, + 0x212e46, + 0x21b185, + 0x282407, + 0x2d5b87, + 0x56ae9dc3, + 0x336647, + 0x365e03, + 0x206a05, + 0x3b1384, + 0x220688, + 0x38644c, + 0x2ad745, + 0x29c986, + 0x205607, + 0x38b907, + 0x238347, + 0x245108, + 0x303b8f, + 0x315e05, + 0x23dcc7, + 0x26f287, + 0x2a3e0a, + 0x2d2809, + 0x304f85, + 0x30664a, + 0x82a06, + 0x2c1ac5, + 0x374b84, + 0x2843c6, + 0x2f1d47, + 0x2eaa07, + 0x3bb408, + 0x22dc85, + 0x2e2a86, + 0x214305, + 0x3adcc5, + 0x21c984, + 0x2af347, + 0x2081ca, + 0x334808, + 0x35ba86, + 0x27f83, + 0x2da905, + 0x25f906, + 0x3af246, + 0x392246, + 0x209703, + 0x388a07, + 0x26f205, + 0x205503, + 0x2d830d, + 0x204e83, + 0x3bb508, + 0x27f404, + 0x272fc5, + 0x2a3d06, + 0x234d46, + 0x368007, + 0x2a6ec7, + 0x267345, + 0x200983, + 0x21fbc7, + 0x2788c9, + 0x311a49, + 0x22708a, + 0x243002, + 0x2069c4, + 0x2e5084, + 0x390207, + 0x22cf88, + 0x2ea2c9, + 0x21eac9, + 0x2eaf47, + 0x2ba486, + 0xec286, + 0x2ed844, + 0x2ede4a, + 0x2f0d48, + 0x2f1189, + 0x2bdbc6, + 0x2b1445, + 0x3346c8, + 0x2c5f8a, + 0x22ed03, + 0x353006, + 0x2eb047, + 0x223ec5, + 0x3a5e05, + 0x264b83, + 0x250cc4, + 0x226605, + 0x281b07, + 0x2faf85, + 0x2ee346, + 0xfc605, + 0x247d83, + 0x357bc9, + 0x272d8c, + 0x29344c, + 0x2ced08, + 0x293087, + 0x2f7908, + 0x2f7c4a, + 0x2f888b, + 0x2ba108, + 0x234e48, + 0x239586, + 0x390d45, + 0x38da4a, + 0x3a6205, + 0x20b0c2, + 0x2c4647, + 0x25fe86, + 0x35c8c5, + 0x370809, + 0x2f39c5, + 0x27e985, + 0x2ddf09, + 0x351846, + 0x237e88, + 0x33f383, + 0x20f486, + 0x272146, + 0x306445, + 0x306449, + 0x2b6789, + 0x279ac7, + 0x109104, + 0x309107, + 0x21e9c9, + 0x238d05, + 0x413c8, + 0x3b2e85, + 0x330e85, + 0x380509, + 0x201702, + 0x25e544, + 0x201e82, + 0x203942, + 0x31ecc5, + 0x3b6788, + 0x2b3b85, + 0x2c3ac3, + 0x2c3ac5, + 0x2d2383, + 0x20f442, + 0x377804, + 0x2ac783, + 0x2056c2, + 0x379884, + 0x2e5d43, + 0x2082c2, + 0x2b3c03, + 0x28d084, + 0x2e4c83, + 0x248684, + 0x203082, + 0x218943, + 0x22ef03, + 0x200d02, + 0x361782, + 0x2b65c9, + 0x207842, + 0x288d04, + 0x203cc2, + 0x334544, + 0x2ba444, + 0x2b74c4, + 0x202dc2, + 0x2391c2, + 0x225bc3, + 0x2f8403, + 0x23d904, + 0x281c84, + 0x2eb1c4, + 0x2f0f04, + 0x30a483, + 0x26e543, + 0x282984, + 0x30a2c4, + 0x30aac6, + 0x22a282, + 0x2099c2, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x205702, + 0x38d2c3, + 0x2a84c3, + 0x232403, + 0x2007c3, + 0x2e9dc3, + 0x3b1384, + 0x2b6884, + 0x211cc4, + 0x205503, + 0x200983, + 0x201303, + 0x2ee644, + 0x31a403, + 0x2bd0c3, + 0x34ab84, + 0x3b2c86, + 0x202f03, + 0x16fc07, + 0x222403, + 0x2459c3, + 0x2b0543, + 0x206a43, + 0x227f83, + 0x2d6cc5, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x282c43, + 0x2a5143, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x244183, + 0x205503, + 0x23a504, + 0x200983, + 0x26bec4, + 0x2bf145, + 0x16fc07, + 0x2099c2, + 0x2006c2, + 0x20d882, + 0x200c82, + 0x200442, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x211cc4, + 0x205503, + 0x200983, + 0x214843, + 0x25ef44, + 0x16d208, + 0x2a84c3, + 0x204e83, + 0x169c3, + 0x2030c4, + 0x16d208, + 0x2a84c3, + 0x249944, + 0x3b1384, + 0x204e83, + 0x203ec2, + 0x200983, + 0x23e743, + 0x50cc4, + 0x373605, + 0x20b0c2, + 0x30a403, + 0x205702, + 0x16d208, + 0x2099c2, + 0x232403, + 0x2e9dc3, + 0x201fc2, + 0x200983, + 0x205702, + 0x1b7407, + 0x12e3c9, + 0x6f83, + 0x16d208, + 0x18ebc3, + 0x5a31fd87, + 0xa84c3, + 0x708, + 0x232403, + 0x2e9dc3, + 0x1ae886, + 0x244183, + 0x8f2c8, + 0xc0e08, + 0x41a46, + 0x209703, + 0xca988, + 0xb1b43, + 0xdf145, + 0x32607, + 0x8003, + 0x174c0a, + 0x11ed83, + 0x308d44, + 0x10398b, + 0x103f48, + 0x8d742, + 0x205702, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2d5f04, + 0x2e9dc3, + 0x244183, + 0x209703, + 0x205503, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x227f83, + 0x205503, + 0x200983, + 0x21aa03, + 0x214843, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x169c3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x227f83, + 0x205503, + 0x200983, + 0x212982, + 0x200141, + 0x205702, + 0x200001, + 0x320b82, + 0x16d208, + 0x21d445, + 0x201ec1, + 0xa84c3, + 0x200701, + 0x200301, + 0x200081, + 0x298602, + 0x36c044, + 0x384383, + 0x200181, + 0x200401, + 0x200041, + 0x200101, + 0x2e9907, + 0x2eab8f, + 0x340446, + 0x200281, + 0x37f6c6, + 0x200e81, + 0x2008c1, + 0x332a0e, + 0x200441, + 0x200983, + 0x201301, + 0x270e85, + 0x20f942, + 0x264a85, + 0x200341, + 0x200801, + 0x2002c1, + 0x20b0c2, + 0x2000c1, + 0x200201, + 0x200bc1, + 0x2005c1, + 0x201cc1, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x219503, + 0x2a84c3, + 0x2e9dc3, + 0x8d688, + 0x209703, + 0x205503, + 0x20803, + 0x200983, + 0x14e7e88, + 0x16d208, + 0x44e04, + 0x14e7e8a, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x205503, + 0x200983, + 0x2030c3, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2d5f04, + 0x200983, + 0x27a305, + 0x33b804, + 0x2a84c3, + 0x205503, + 0x200983, + 0x225ca, + 0xd5284, + 0x10c9c6, + 0x2099c2, + 0x2a84c3, + 0x230309, + 0x232403, + 0x3034c9, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x2ed648, + 0x22ca47, + 0x373605, + 0x18ed88, + 0x1b7407, + 0x2d20a, + 0xecb, + 0x4ab87, + 0x3d2c8, + 0x1b1b8a, + 0x10a48, + 0x12e3c9, + 0x264c7, + 0x3be87, + 0x152b08, + 0x708, + 0x3df8f, + 0x11d85, + 0xa07, + 0x1ae886, + 0x137607, + 0x3d586, + 0x8f2c8, + 0xa5606, + 0x151647, + 0x19c9, + 0x1aa1c7, + 0xa46c9, + 0xb4a09, + 0xbeec6, + 0xc0e08, + 0xbfcc5, + 0x4eb4a, + 0xca988, + 0xb1b43, + 0xd2648, + 0x32607, + 0x6d505, + 0x69c50, + 0x8003, + 0x1aa047, + 0x15ec5, + 0xe9748, + 0x13ce05, + 0x11ed83, + 0x6fd48, + 0xcd46, + 0x42849, + 0xaa147, + 0x6fa0b, + 0x14ac44, + 0xfa544, + 0x10398b, + 0x103f48, + 0x104d47, + 0x129845, + 0x2a84c3, + 0x232403, + 0x2163c3, + 0x200983, + 0x22a403, + 0x2e9dc3, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x43f8b, + 0x205702, + 0x2099c2, + 0x200983, + 0x16d208, + 0x205702, + 0x2099c2, + 0x20d882, + 0x201fc2, + 0x203d02, + 0x205503, + 0x200442, + 0x205702, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x20d882, + 0x2e9dc3, + 0x244183, + 0x209703, + 0x211cc4, + 0x205503, + 0x216b03, + 0x200983, + 0x308d44, + 0x25ed03, + 0x2e9dc3, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x204e83, + 0x200983, + 0x39f847, + 0x2a84c3, + 0x2614c7, + 0x2c7ac6, + 0x219203, + 0x218343, + 0x2e9dc3, + 0x2143c3, + 0x3b1384, + 0x37ef04, + 0x31ea46, + 0x20d143, + 0x205503, + 0x200983, + 0x27a305, + 0x318284, + 0x3b2a43, + 0x38b743, + 0x2c4647, + 0x33e885, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x28e87, + 0x205942, + 0x287003, + 0x2bf143, + 0x38d2c3, + 0x626a84c3, + 0x202242, + 0x232403, + 0x2032c3, + 0x2e9dc3, + 0x3b1384, + 0x353903, + 0x315e03, + 0x209703, + 0x211cc4, + 0x62a04642, + 0x205503, + 0x200983, + 0x2082c3, + 0x229543, + 0x212982, + 0x25ed03, + 0x16d208, + 0x2e9dc3, + 0x169c3, + 0x26f744, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x235ac4, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x282104, + 0x210444, + 0x2d43c6, + 0x211cc4, + 0x205503, + 0x200983, + 0x201303, + 0x25fe86, + 0x13f08b, + 0x1cdc6, + 0x5eb4a, + 0x107e4a, + 0x16d208, + 0x2142c4, + 0x63ea84c3, + 0x38d284, + 0x232403, + 0x256e84, + 0x2e9dc3, + 0x391683, + 0x209703, + 0x205503, + 0x200983, + 0x56243, + 0x32f78b, + 0x3a140a, + 0x3b9bcc, + 0xda688, + 0x205702, + 0x2099c2, + 0x20d882, + 0x2a9305, + 0x3b1384, + 0x202342, + 0x209703, + 0x210444, + 0x200c82, + 0x200442, + 0x209842, + 0x212982, + 0x18d2c3, + 0x19f02, + 0x2a1cc9, + 0x25d548, + 0x309a89, + 0x337449, + 0x23490a, + 0x23634a, + 0x20cc02, + 0x21b102, + 0x99c2, + 0x2a84c3, + 0x204682, + 0x23de86, + 0x35d882, + 0x201242, + 0x20124e, + 0x21898e, + 0x27b107, + 0x205487, + 0x275d02, + 0x232403, + 0x2e9dc3, + 0x200042, + 0x201fc2, + 0x4a5c3, + 0x2eec0f, + 0x200f42, + 0x32c787, + 0x2c7d07, + 0x2d3907, + 0x2ad24c, + 0x3151cc, + 0x3a3a44, + 0x27c6ca, + 0x2188c2, + 0x20be02, + 0x2b6fc4, + 0x2226c2, + 0x2c2702, + 0x315404, + 0x20cec2, + 0x200282, + 0x6343, + 0x2a5687, + 0x2352c5, + 0x201f42, + 0x2eeb84, + 0x352bc2, + 0x2da248, + 0x205503, + 0x3b0208, + 0x200d42, + 0x233385, + 0x3b04c6, + 0x200983, + 0x20ba02, + 0x2ea507, + 0xf942, + 0x26b005, + 0x3a9f45, + 0x201642, + 0x242b02, + 0x3b7a8a, + 0x2671ca, + 0x202c42, + 0x2e4744, + 0x2002c2, + 0x206888, + 0x201c82, + 0x30a848, + 0x2feb47, + 0x2ff649, + 0x26b082, + 0x305645, + 0x33bc85, + 0x22dd4b, + 0x2c6c4c, + 0x22e848, + 0x3188c8, + 0x22a282, + 0x35f782, + 0x205702, + 0x16d208, + 0x2099c2, + 0x2a84c3, + 0x20d882, + 0x200c82, + 0x200442, + 0x200983, + 0x209842, + 0x205702, + 0x652099c2, + 0x656e9dc3, + 0x206343, + 0x202342, + 0x205503, + 0x375cc3, + 0x200983, + 0x2e87c3, + 0x275d46, + 0x1614843, + 0x16d208, + 0x192345, + 0xa6a8d, + 0xa4dca, + 0x65c87, + 0x65e011c2, + 0x66200242, + 0x66600ec2, + 0x66a00c02, + 0x66e0de02, + 0x67201ec2, + 0x16fc07, + 0x676099c2, + 0x67a301c2, + 0x67e09982, + 0x68200dc2, + 0x218983, + 0x9e04, + 0x225d83, + 0x686149c2, + 0x68a00182, + 0x49f47, + 0x68e03002, + 0x69202e42, + 0x69600b42, + 0x69a02bc2, + 0x69e029c2, + 0x6a201fc2, + 0xb3985, + 0x234543, + 0x202b84, + 0x6a6226c2, + 0x6aa03a82, + 0x6ae03202, + 0x16c90b, + 0x6b200e82, + 0x6ba49a02, + 0x6be02342, + 0x6c203d02, + 0x6c60f242, + 0x6ca0ec42, + 0x6ce0e602, + 0x6d2675c2, + 0x6d604642, + 0x6da01b42, + 0x6de00c82, + 0x6e2042c2, + 0x6e61c702, + 0x6ea00e42, + 0x7f1c4, + 0x350703, + 0x6ee33082, + 0x6f216982, + 0x6f603402, + 0x6fa089c2, + 0x6fe00442, + 0x702056c2, + 0x44107, + 0x70601302, + 0x70a07302, + 0x70e09842, + 0x71218942, + 0xf484c, + 0x71621c82, + 0x71a3ab02, + 0x71e11602, + 0x72201682, + 0x72601f82, + 0x72a34a82, + 0x72e00202, + 0x7320e8c2, + 0x736724c2, + 0x73a56642, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0xa203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x6b753903, + 0x20a203, + 0x2d6d44, + 0x25d446, + 0x2f1743, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x353903, + 0x20a203, + 0x219f02, + 0x219f02, + 0x353903, + 0x20a203, + 0x742a84c3, + 0x232403, + 0x37ac03, + 0x209703, + 0x205503, + 0x200983, + 0x16d208, + 0x2099c2, + 0x2a84c3, + 0x205503, + 0x200983, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x209703, + 0x205503, + 0x200983, + 0x2030c4, + 0x2099c2, + 0x2a84c3, + 0x2028c3, + 0x232403, + 0x249944, + 0x2163c3, + 0x2e9dc3, + 0x3b1384, + 0x244183, + 0x209703, + 0x205503, + 0x200983, + 0x23e743, + 0x373605, + 0x2a1fc3, + 0x25ed03, + 0x2099c2, + 0x2a84c3, + 0x353903, + 0x205503, + 0x200983, + 0x205702, + 0x38d2c3, + 0x16d208, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x22f706, + 0x3b1384, + 0x244183, + 0x211cc4, + 0x205503, + 0x200983, + 0x201303, + 0x2a84c3, + 0x232403, + 0x205503, + 0x200983, + 0x14bb147, + 0x2a84c3, + 0x1cdc6, + 0x232403, + 0x2e9dc3, + 0xdba46, + 0x205503, + 0x200983, + 0x3149c8, + 0x318709, + 0x328b89, + 0x333808, + 0x37dc48, + 0x37dc49, + 0x24318d, + 0x2ee80f, + 0x251490, + 0x34848d, + 0x3638cc, + 0x37f98b, + 0x98605, + 0x205702, + 0x33e6c5, + 0x200243, + 0x772099c2, + 0x232403, + 0x2e9dc3, + 0x343ec7, + 0x206a43, + 0x209703, + 0x205503, + 0x21c2c3, + 0x20dcc3, + 0x204e83, + 0x200983, + 0x2efec6, + 0x20b0c2, + 0x25ed03, + 0x16d208, + 0x205702, + 0x38d2c3, + 0x2099c2, + 0x2a84c3, + 0x232403, + 0x2e9dc3, + 0x3b1384, + 0x209703, + 0x205503, + 0x200983, + 0x214843, + 0x14f53c6, + 0x205702, + 0x2099c2, + 0x2e9dc3, + 0x209703, + 0x200983, } // children is the list of nodes' children, the parent's wildcard bit and the @@ -8342,416 +8764,490 @@ var nodes = [...]uint32{ // [14 bits] high nodes index (exclusive) of children // [14 bits] low nodes index (inclusive) of children var children = [...]uint32{ - 0x00000000, // c0x0000 (---------------) + - 0x10000000, // c0x0001 (---------------) ! - 0x20000000, // c0x0002 (---------------) o - 0x40000000, // c0x0003 (---------------)* + - 0x50000000, // c0x0004 (---------------)* ! - 0x60000000, // c0x0005 (---------------)* o - 0x01834607, // c0x0006 (n0x0607-n0x060d) + - 0x0183860d, // c0x0007 (n0x060d-n0x060e) + - 0x0185860e, // c0x0008 (n0x060e-n0x0616) + - 0x019b4616, // c0x0009 (n0x0616-n0x066d) + - 0x019c866d, // c0x000a (n0x066d-n0x0672) + - 0x019dc672, // c0x000b (n0x0672-n0x0677) + - 0x019ec677, // c0x000c (n0x0677-n0x067b) + - 0x01a0867b, // c0x000d (n0x067b-n0x0682) + - 0x01a0c682, // c0x000e (n0x0682-n0x0683) + - 0x01a24683, // c0x000f (n0x0683-n0x0689) + - 0x01a48689, // c0x0010 (n0x0689-n0x0692) + - 0x01a4c692, // c0x0011 (n0x0692-n0x0693) + - 0x01a64693, // c0x0012 (n0x0693-n0x0699) + - 0x01a68699, // c0x0013 (n0x0699-n0x069a) + - 0x01a8469a, // c0x0014 (n0x069a-n0x06a1) + - 0x01a886a1, // c0x0015 (n0x06a1-n0x06a2) + - 0x01ad06a2, // c0x0016 (n0x06a2-n0x06b4) + - 0x01ad46b4, // c0x0017 (n0x06b4-n0x06b5) + - 0x01af46b5, // c0x0018 (n0x06b5-n0x06bd) + - 0x01b086bd, // c0x0019 (n0x06bd-n0x06c2) + - 0x01b0c6c2, // c0x001a (n0x06c2-n0x06c3) + - 0x01b3c6c3, // c0x001b (n0x06c3-n0x06cf) + - 0x01b686cf, // c0x001c (n0x06cf-n0x06da) + - 0x01b906da, // c0x001d (n0x06da-n0x06e4) + - 0x01b986e4, // c0x001e (n0x06e4-n0x06e6) + - 0x01b9c6e6, // c0x001f (n0x06e6-n0x06e7) + - 0x01c306e7, // c0x0020 (n0x06e7-n0x070c) + - 0x01c4470c, // c0x0021 (n0x070c-n0x0711) + - 0x01c58711, // c0x0022 (n0x0711-n0x0716) + - 0x01c78716, // c0x0023 (n0x0716-n0x071e) + - 0x01c8871e, // c0x0024 (n0x071e-n0x0722) + - 0x01c9c722, // c0x0025 (n0x0722-n0x0727) + - 0x01cc0727, // c0x0026 (n0x0727-n0x0730) + - 0x01dd8730, // c0x0027 (n0x0730-n0x0776) + - 0x01ddc776, // c0x0028 (n0x0776-n0x0777) + - 0x01df0777, // c0x0029 (n0x0777-n0x077c) + - 0x01e0477c, // c0x002a (n0x077c-n0x0781) + - 0x01e0c781, // c0x002b (n0x0781-n0x0783) + - 0x01e1c783, // c0x002c (n0x0783-n0x0787) + - 0x01e20787, // c0x002d (n0x0787-n0x0788) + - 0x01e38788, // c0x002e (n0x0788-n0x078e) + - 0x01e7c78e, // c0x002f (n0x078e-n0x079f) + - 0x01e8c79f, // c0x0030 (n0x079f-n0x07a3) + - 0x01e907a3, // c0x0031 (n0x07a3-n0x07a4) + - 0x01e947a4, // c0x0032 (n0x07a4-n0x07a5) + - 0x01e987a5, // c0x0033 (n0x07a5-n0x07a6) + - 0x01ed47a6, // c0x0034 (n0x07a6-n0x07b5) + - 0x61ed87b5, // c0x0035 (n0x07b5-n0x07b6)* o - 0x01eec7b6, // c0x0036 (n0x07b6-n0x07bb) + - 0x01efc7bb, // c0x0037 (n0x07bb-n0x07bf) + - 0x01fb07bf, // c0x0038 (n0x07bf-n0x07ec) + - 0x21fb47ec, // c0x0039 (n0x07ec-n0x07ed) o - 0x01fb87ed, // c0x003a (n0x07ed-n0x07ee) + - 0x01fbc7ee, // c0x003b (n0x07ee-n0x07ef) + - 0x21fc07ef, // c0x003c (n0x07ef-n0x07f0) o - 0x21fc47f0, // c0x003d (n0x07f0-n0x07f1) o - 0x01ff87f1, // c0x003e (n0x07f1-n0x07fe) + - 0x01ffc7fe, // c0x003f (n0x07fe-n0x07ff) + - 0x0233c7ff, // c0x0040 (n0x07ff-n0x08cf) + - 0x223848cf, // c0x0041 (n0x08cf-n0x08e1) o - 0x023a88e1, // c0x0042 (n0x08e1-n0x08ea) + - 0x023b08ea, // c0x0043 (n0x08ea-n0x08ec) + - 0x223b48ec, // c0x0044 (n0x08ec-n0x08ed) o - 0x023d08ed, // c0x0045 (n0x08ed-n0x08f4) + - 0x023e88f4, // c0x0046 (n0x08f4-n0x08fa) + - 0x023ec8fa, // c0x0047 (n0x08fa-n0x08fb) + - 0x023fc8fb, // c0x0048 (n0x08fb-n0x08ff) + - 0x024048ff, // c0x0049 (n0x08ff-n0x0901) + - 0x22438901, // c0x004a (n0x0901-n0x090e) o - 0x0243c90e, // c0x004b (n0x090e-n0x090f) + - 0x0244090f, // c0x004c (n0x090f-n0x0910) + - 0x02460910, // c0x004d (n0x0910-n0x0918) + - 0x02464918, // c0x004e (n0x0918-n0x0919) + - 0x02478919, // c0x004f (n0x0919-n0x091e) + - 0x024a091e, // c0x0050 (n0x091e-n0x0928) + - 0x024c0928, // c0x0051 (n0x0928-n0x0930) + - 0x024f0930, // c0x0052 (n0x0930-n0x093c) + - 0x0251893c, // c0x0053 (n0x093c-n0x0946) + - 0x0251c946, // c0x0054 (n0x0946-n0x0947) + - 0x02540947, // c0x0055 (n0x0947-n0x0950) + - 0x02544950, // c0x0056 (n0x0950-n0x0951) + - 0x02558951, // c0x0057 (n0x0951-n0x0956) + - 0x0255c956, // c0x0058 (n0x0956-n0x0957) + - 0x0257c957, // c0x0059 (n0x0957-n0x095f) + - 0x0258895f, // c0x005a (n0x095f-n0x0962) + - 0x025e8962, // c0x005b (n0x0962-n0x097a) + - 0x0260497a, // c0x005c (n0x097a-n0x0981) + - 0x02610981, // c0x005d (n0x0981-n0x0984) + - 0x02624984, // c0x005e (n0x0984-n0x0989) + - 0x0263c989, // c0x005f (n0x0989-n0x098f) + - 0x0265098f, // c0x0060 (n0x098f-n0x0994) + - 0x02668994, // c0x0061 (n0x0994-n0x099a) + - 0x0268099a, // c0x0062 (n0x099a-n0x09a0) + - 0x026989a0, // c0x0063 (n0x09a0-n0x09a6) + - 0x026b49a6, // c0x0064 (n0x09a6-n0x09ad) + - 0x026cc9ad, // c0x0065 (n0x09ad-n0x09b3) + - 0x0272c9b3, // c0x0066 (n0x09b3-n0x09cb) + - 0x027449cb, // c0x0067 (n0x09cb-n0x09d1) + - 0x027589d1, // c0x0068 (n0x09d1-n0x09d6) + - 0x0279c9d6, // c0x0069 (n0x09d6-n0x09e7) + - 0x0281c9e7, // c0x006a (n0x09e7-n0x0a07) + - 0x02848a07, // c0x006b (n0x0a07-n0x0a12) + - 0x0284ca12, // c0x006c (n0x0a12-n0x0a13) + - 0x02854a13, // c0x006d (n0x0a13-n0x0a15) + - 0x02874a15, // c0x006e (n0x0a15-n0x0a1d) + - 0x02878a1d, // c0x006f (n0x0a1d-n0x0a1e) + - 0x02894a1e, // c0x0070 (n0x0a1e-n0x0a25) + - 0x0289ca25, // c0x0071 (n0x0a25-n0x0a27) + - 0x028d0a27, // c0x0072 (n0x0a27-n0x0a34) + - 0x028f8a34, // c0x0073 (n0x0a34-n0x0a3e) + - 0x028fca3e, // c0x0074 (n0x0a3e-n0x0a3f) + - 0x02914a3f, // c0x0075 (n0x0a3f-n0x0a45) + - 0x0292ca45, // c0x0076 (n0x0a45-n0x0a4b) + - 0x02950a4b, // c0x0077 (n0x0a4b-n0x0a54) + - 0x02970a54, // c0x0078 (n0x0a54-n0x0a5c) + - 0x02f34a5c, // c0x0079 (n0x0a5c-n0x0bcd) + - 0x02f40bcd, // c0x007a (n0x0bcd-n0x0bd0) + - 0x02f60bd0, // c0x007b (n0x0bd0-n0x0bd8) + - 0x0311cbd8, // c0x007c (n0x0bd8-n0x0c47) + - 0x031ecc47, // c0x007d (n0x0c47-n0x0c7b) + - 0x0325cc7b, // c0x007e (n0x0c7b-n0x0c97) + - 0x032b4c97, // c0x007f (n0x0c97-n0x0cad) + - 0x0339ccad, // c0x0080 (n0x0cad-n0x0ce7) + - 0x033f4ce7, // c0x0081 (n0x0ce7-n0x0cfd) + - 0x03430cfd, // c0x0082 (n0x0cfd-n0x0d0c) + - 0x0352cd0c, // c0x0083 (n0x0d0c-n0x0d4b) + - 0x035f8d4b, // c0x0084 (n0x0d4b-n0x0d7e) + - 0x03690d7e, // c0x0085 (n0x0d7e-n0x0da4) + - 0x03720da4, // c0x0086 (n0x0da4-n0x0dc8) + - 0x03784dc8, // c0x0087 (n0x0dc8-n0x0de1) + - 0x039bcde1, // c0x0088 (n0x0de1-n0x0e6f) + - 0x03a74e6f, // c0x0089 (n0x0e6f-n0x0e9d) + - 0x03b40e9d, // c0x008a (n0x0e9d-n0x0ed0) + - 0x03b8ced0, // c0x008b (n0x0ed0-n0x0ee3) + - 0x03c14ee3, // c0x008c (n0x0ee3-n0x0f05) + - 0x03c50f05, // c0x008d (n0x0f05-n0x0f14) + - 0x03ca0f14, // c0x008e (n0x0f14-n0x0f28) + - 0x03d18f28, // c0x008f (n0x0f28-n0x0f46) + - 0x63d1cf46, // c0x0090 (n0x0f46-n0x0f47)* o - 0x63d20f47, // c0x0091 (n0x0f47-n0x0f48)* o - 0x63d24f48, // c0x0092 (n0x0f48-n0x0f49)* o - 0x03da0f49, // c0x0093 (n0x0f49-n0x0f68) + - 0x03e08f68, // c0x0094 (n0x0f68-n0x0f82) + - 0x03e84f82, // c0x0095 (n0x0f82-n0x0fa1) + - 0x03efcfa1, // c0x0096 (n0x0fa1-n0x0fbf) + - 0x03f80fbf, // c0x0097 (n0x0fbf-n0x0fe0) + - 0x03fecfe0, // c0x0098 (n0x0fe0-n0x0ffb) + - 0x04118ffb, // c0x0099 (n0x0ffb-n0x1046) + - 0x04171046, // c0x009a (n0x1046-n0x105c) + - 0x6417505c, // c0x009b (n0x105c-n0x105d)* o - 0x0420d05d, // c0x009c (n0x105d-n0x1083) + - 0x04295083, // c0x009d (n0x1083-n0x10a5) + - 0x042e10a5, // c0x009e (n0x10a5-n0x10b8) + - 0x043490b8, // c0x009f (n0x10b8-n0x10d2) + - 0x043f10d2, // c0x00a0 (n0x10d2-n0x10fc) + - 0x044b90fc, // c0x00a1 (n0x10fc-n0x112e) + - 0x0452112e, // c0x00a2 (n0x112e-n0x1148) + - 0x04635148, // c0x00a3 (n0x1148-n0x118d) + - 0x6463918d, // c0x00a4 (n0x118d-n0x118e)* o - 0x6463d18e, // c0x00a5 (n0x118e-n0x118f)* o - 0x0469918f, // c0x00a6 (n0x118f-n0x11a6) + - 0x046f51a6, // c0x00a7 (n0x11a6-n0x11bd) + - 0x047851bd, // c0x00a8 (n0x11bd-n0x11e1) + - 0x048011e1, // c0x00a9 (n0x11e1-n0x1200) + - 0x04845200, // c0x00aa (n0x1200-n0x1211) + - 0x04929211, // c0x00ab (n0x1211-n0x124a) + - 0x0495d24a, // c0x00ac (n0x124a-n0x1257) + - 0x049bd257, // c0x00ad (n0x1257-n0x126f) + - 0x04a3126f, // c0x00ae (n0x126f-n0x128c) + - 0x04ab928c, // c0x00af (n0x128c-n0x12ae) + - 0x04af92ae, // c0x00b0 (n0x12ae-n0x12be) + - 0x04b692be, // c0x00b1 (n0x12be-n0x12da) + - 0x64b6d2da, // c0x00b2 (n0x12da-n0x12db)* o - 0x64b712db, // c0x00b3 (n0x12db-n0x12dc)* o - 0x24b752dc, // c0x00b4 (n0x12dc-n0x12dd) o - 0x04b8d2dd, // c0x00b5 (n0x12dd-n0x12e3) + - 0x04ba92e3, // c0x00b6 (n0x12e3-n0x12ea) + - 0x04bed2ea, // c0x00b7 (n0x12ea-n0x12fb) + - 0x04bfd2fb, // c0x00b8 (n0x12fb-n0x12ff) + - 0x04c152ff, // c0x00b9 (n0x12ff-n0x1305) + - 0x04c8d305, // c0x00ba (n0x1305-n0x1323) + - 0x04ca1323, // c0x00bb (n0x1323-n0x1328) + - 0x04cb9328, // c0x00bc (n0x1328-n0x132e) + - 0x04cdd32e, // c0x00bd (n0x132e-n0x1337) + - 0x04cf1337, // c0x00be (n0x1337-n0x133c) + - 0x04d0933c, // c0x00bf (n0x133c-n0x1342) + - 0x04d0d342, // c0x00c0 (n0x1342-n0x1343) + - 0x04d49343, // c0x00c1 (n0x1343-n0x1352) + - 0x04d5d352, // c0x00c2 (n0x1352-n0x1357) + - 0x04d65357, // c0x00c3 (n0x1357-n0x1359) + - 0x04d6d359, // c0x00c4 (n0x1359-n0x135b) + - 0x04d7135b, // c0x00c5 (n0x135b-n0x135c) + - 0x04d9535c, // c0x00c6 (n0x135c-n0x1365) + - 0x04db9365, // c0x00c7 (n0x1365-n0x136e) + - 0x04dd136e, // c0x00c8 (n0x136e-n0x1374) + - 0x04dd9374, // c0x00c9 (n0x1374-n0x1376) + - 0x04ddd376, // c0x00ca (n0x1376-n0x1377) + - 0x04e11377, // c0x00cb (n0x1377-n0x1384) + - 0x04e35384, // c0x00cc (n0x1384-n0x138d) + - 0x04e5538d, // c0x00cd (n0x138d-n0x1395) + - 0x04e71395, // c0x00ce (n0x1395-n0x139c) + - 0x04e8139c, // c0x00cf (n0x139c-n0x13a0) + - 0x04e953a0, // c0x00d0 (n0x13a0-n0x13a5) + - 0x04e993a5, // c0x00d1 (n0x13a5-n0x13a6) + - 0x04ea13a6, // c0x00d2 (n0x13a6-n0x13a8) + - 0x04eb53a8, // c0x00d3 (n0x13a8-n0x13ad) + - 0x04ec53ad, // c0x00d4 (n0x13ad-n0x13b1) + - 0x04ec93b1, // c0x00d5 (n0x13b1-n0x13b2) + - 0x04ee53b2, // c0x00d6 (n0x13b2-n0x13b9) + - 0x057753b9, // c0x00d7 (n0x13b9-n0x15dd) + - 0x057ad5dd, // c0x00d8 (n0x15dd-n0x15eb) + - 0x057d95eb, // c0x00d9 (n0x15eb-n0x15f6) + - 0x057f15f6, // c0x00da (n0x15f6-n0x15fc) + - 0x058115fc, // c0x00db (n0x15fc-n0x1604) + - 0x65815604, // c0x00dc (n0x1604-n0x1605)* o - 0x05859605, // c0x00dd (n0x1605-n0x1616) + - 0x05861616, // c0x00de (n0x1616-n0x1618) + - 0x25865618, // c0x00df (n0x1618-n0x1619) o - 0x25869619, // c0x00e0 (n0x1619-n0x161a) o - 0x0586d61a, // c0x00e1 (n0x161a-n0x161b) + - 0x0594161b, // c0x00e2 (n0x161b-n0x1650) + - 0x25945650, // c0x00e3 (n0x1650-n0x1651) o - 0x2594d651, // c0x00e4 (n0x1651-n0x1653) o - 0x25955653, // c0x00e5 (n0x1653-n0x1655) o - 0x25961655, // c0x00e6 (n0x1655-n0x1658) o - 0x05989658, // c0x00e7 (n0x1658-n0x1662) + - 0x059ad662, // c0x00e8 (n0x1662-n0x166b) + - 0x059b166b, // c0x00e9 (n0x166b-n0x166c) + - 0x259e966c, // c0x00ea (n0x166c-n0x167a) o - 0x059f567a, // c0x00eb (n0x167a-n0x167d) + - 0x0654d67d, // c0x00ec (n0x167d-n0x1953) + - 0x06551953, // c0x00ed (n0x1953-n0x1954) + - 0x06555954, // c0x00ee (n0x1954-n0x1955) + - 0x26559955, // c0x00ef (n0x1955-n0x1956) o - 0x0655d956, // c0x00f0 (n0x1956-n0x1957) + - 0x26561957, // c0x00f1 (n0x1957-n0x1958) o - 0x06565958, // c0x00f2 (n0x1958-n0x1959) + - 0x26571959, // c0x00f3 (n0x1959-n0x195c) o - 0x0657595c, // c0x00f4 (n0x195c-n0x195d) + - 0x0657995d, // c0x00f5 (n0x195d-n0x195e) + - 0x2657d95e, // c0x00f6 (n0x195e-n0x195f) o - 0x0658195f, // c0x00f7 (n0x195f-n0x1960) + - 0x26589960, // c0x00f8 (n0x1960-n0x1962) o - 0x0658d962, // c0x00f9 (n0x1962-n0x1963) + - 0x06591963, // c0x00fa (n0x1963-n0x1964) + - 0x265a1964, // c0x00fb (n0x1964-n0x1968) o - 0x065a5968, // c0x00fc (n0x1968-n0x1969) + - 0x065a9969, // c0x00fd (n0x1969-n0x196a) + - 0x065ad96a, // c0x00fe (n0x196a-n0x196b) + - 0x065b196b, // c0x00ff (n0x196b-n0x196c) + - 0x265b596c, // c0x0100 (n0x196c-n0x196d) o - 0x065b996d, // c0x0101 (n0x196d-n0x196e) + - 0x065bd96e, // c0x0102 (n0x196e-n0x196f) + - 0x065c196f, // c0x0103 (n0x196f-n0x1970) + - 0x065c5970, // c0x0104 (n0x1970-n0x1971) + - 0x265cd971, // c0x0105 (n0x1971-n0x1973) o - 0x065d1973, // c0x0106 (n0x1973-n0x1974) + - 0x065d5974, // c0x0107 (n0x1974-n0x1975) + - 0x065d9975, // c0x0108 (n0x1975-n0x1976) + - 0x265dd976, // c0x0109 (n0x1976-n0x1977) o - 0x065e1977, // c0x010a (n0x1977-n0x1978) + - 0x265e9978, // c0x010b (n0x1978-n0x197a) o - 0x265ed97a, // c0x010c (n0x197a-n0x197b) o - 0x0660997b, // c0x010d (n0x197b-n0x1982) + - 0x06615982, // c0x010e (n0x1982-n0x1985) + - 0x06655985, // c0x010f (n0x1985-n0x1995) + - 0x06659995, // c0x0110 (n0x1995-n0x1996) + - 0x0667d996, // c0x0111 (n0x1996-n0x199f) + - 0x0677199f, // c0x0112 (n0x199f-n0x19dc) + - 0x267799dc, // c0x0113 (n0x19dc-n0x19de) o - 0x2677d9de, // c0x0114 (n0x19de-n0x19df) o - 0x267819df, // c0x0115 (n0x19df-n0x19e0) o - 0x067899e0, // c0x0116 (n0x19e0-n0x19e2) + - 0x068659e2, // c0x0117 (n0x19e2-n0x1a19) + - 0x06891a19, // c0x0118 (n0x1a19-n0x1a24) + - 0x068b1a24, // c0x0119 (n0x1a24-n0x1a2c) + - 0x068bda2c, // c0x011a (n0x1a2c-n0x1a2f) + - 0x068dda2f, // c0x011b (n0x1a2f-n0x1a37) + - 0x06915a37, // c0x011c (n0x1a37-n0x1a45) + - 0x06ba9a45, // c0x011d (n0x1a45-n0x1aea) + - 0x06c65aea, // c0x011e (n0x1aea-n0x1b19) + - 0x06c79b19, // c0x011f (n0x1b19-n0x1b1e) + - 0x06cadb1e, // c0x0120 (n0x1b1e-n0x1b2b) + - 0x06cc9b2b, // c0x0121 (n0x1b2b-n0x1b32) + - 0x06ce5b32, // c0x0122 (n0x1b32-n0x1b39) + - 0x06d09b39, // c0x0123 (n0x1b39-n0x1b42) + - 0x06d21b42, // c0x0124 (n0x1b42-n0x1b48) + - 0x06d3db48, // c0x0125 (n0x1b48-n0x1b4f) + - 0x06d61b4f, // c0x0126 (n0x1b4f-n0x1b58) + - 0x06d71b58, // c0x0127 (n0x1b58-n0x1b5c) + - 0x06da1b5c, // c0x0128 (n0x1b5c-n0x1b68) + - 0x06dbdb68, // c0x0129 (n0x1b68-n0x1b6f) + - 0x06fc9b6f, // c0x012a (n0x1b6f-n0x1bf2) + - 0x06fedbf2, // c0x012b (n0x1bf2-n0x1bfb) + - 0x0700dbfb, // c0x012c (n0x1bfb-n0x1c03) + - 0x07021c03, // c0x012d (n0x1c03-n0x1c08) + - 0x07035c08, // c0x012e (n0x1c08-n0x1c0d) + - 0x07055c0d, // c0x012f (n0x1c0d-n0x1c15) + - 0x070f9c15, // c0x0130 (n0x1c15-n0x1c3e) + - 0x07115c3e, // c0x0131 (n0x1c3e-n0x1c45) + - 0x07131c45, // c0x0132 (n0x1c45-n0x1c4c) + - 0x07135c4c, // c0x0133 (n0x1c4c-n0x1c4d) + - 0x07139c4d, // c0x0134 (n0x1c4d-n0x1c4e) + - 0x0714dc4e, // c0x0135 (n0x1c4e-n0x1c53) + - 0x0716dc53, // c0x0136 (n0x1c53-n0x1c5b) + - 0x07179c5b, // c0x0137 (n0x1c5b-n0x1c5e) + - 0x071a9c5e, // c0x0138 (n0x1c5e-n0x1c6a) + - 0x07229c6a, // c0x0139 (n0x1c6a-n0x1c8a) + - 0x0723dc8a, // c0x013a (n0x1c8a-n0x1c8f) + - 0x07241c8f, // c0x013b (n0x1c8f-n0x1c90) + - 0x07259c90, // c0x013c (n0x1c90-n0x1c96) + - 0x07265c96, // c0x013d (n0x1c96-n0x1c99) + - 0x07269c99, // c0x013e (n0x1c99-n0x1c9a) + - 0x07285c9a, // c0x013f (n0x1c9a-n0x1ca1) + - 0x072c1ca1, // c0x0140 (n0x1ca1-n0x1cb0) + - 0x072c5cb0, // c0x0141 (n0x1cb0-n0x1cb1) + - 0x072e5cb1, // c0x0142 (n0x1cb1-n0x1cb9) + - 0x07335cb9, // c0x0143 (n0x1cb9-n0x1ccd) + - 0x0734dccd, // c0x0144 (n0x1ccd-n0x1cd3) + - 0x073a1cd3, // c0x0145 (n0x1cd3-n0x1ce8) + - 0x073a5ce8, // c0x0146 (n0x1ce8-n0x1ce9) + - 0x073a9ce9, // c0x0147 (n0x1ce9-n0x1cea) + - 0x073edcea, // c0x0148 (n0x1cea-n0x1cfb) + - 0x073fdcfb, // c0x0149 (n0x1cfb-n0x1cff) + - 0x07435cff, // c0x014a (n0x1cff-n0x1d0d) + - 0x07465d0d, // c0x014b (n0x1d0d-n0x1d19) + - 0x075a1d19, // c0x014c (n0x1d19-n0x1d68) + - 0x075c5d68, // c0x014d (n0x1d68-n0x1d71) + - 0x075f1d71, // c0x014e (n0x1d71-n0x1d7c) + - 0x075f5d7c, // c0x014f (n0x1d7c-n0x1d7d) + - 0x075f9d7d, // c0x0150 (n0x1d7d-n0x1d7e) + - 0x076f5d7e, // c0x0151 (n0x1d7e-n0x1dbd) + - 0x07701dbd, // c0x0152 (n0x1dbd-n0x1dc0) + - 0x0770ddc0, // c0x0153 (n0x1dc0-n0x1dc3) + - 0x07719dc3, // c0x0154 (n0x1dc3-n0x1dc6) + - 0x07725dc6, // c0x0155 (n0x1dc6-n0x1dc9) + - 0x07731dc9, // c0x0156 (n0x1dc9-n0x1dcc) + - 0x0773ddcc, // c0x0157 (n0x1dcc-n0x1dcf) + - 0x07749dcf, // c0x0158 (n0x1dcf-n0x1dd2) + - 0x07755dd2, // c0x0159 (n0x1dd2-n0x1dd5) + - 0x07761dd5, // c0x015a (n0x1dd5-n0x1dd8) + - 0x0776ddd8, // c0x015b (n0x1dd8-n0x1ddb) + - 0x07779ddb, // c0x015c (n0x1ddb-n0x1dde) + - 0x07785dde, // c0x015d (n0x1dde-n0x1de1) + - 0x07791de1, // c0x015e (n0x1de1-n0x1de4) + - 0x07799de4, // c0x015f (n0x1de4-n0x1de6) + - 0x077a5de6, // c0x0160 (n0x1de6-n0x1de9) + - 0x077b1de9, // c0x0161 (n0x1de9-n0x1dec) + - 0x077bddec, // c0x0162 (n0x1dec-n0x1def) + - 0x077c9def, // c0x0163 (n0x1def-n0x1df2) + - 0x077d5df2, // c0x0164 (n0x1df2-n0x1df5) + - 0x077e1df5, // c0x0165 (n0x1df5-n0x1df8) + - 0x077eddf8, // c0x0166 (n0x1df8-n0x1dfb) + - 0x077f9dfb, // c0x0167 (n0x1dfb-n0x1dfe) + - 0x07805dfe, // c0x0168 (n0x1dfe-n0x1e01) + - 0x07811e01, // c0x0169 (n0x1e01-n0x1e04) + - 0x0781de04, // c0x016a (n0x1e04-n0x1e07) + - 0x07829e07, // c0x016b (n0x1e07-n0x1e0a) + - 0x07835e0a, // c0x016c (n0x1e0a-n0x1e0d) + - 0x07841e0d, // c0x016d (n0x1e0d-n0x1e10) + - 0x0784de10, // c0x016e (n0x1e10-n0x1e13) + - 0x07859e13, // c0x016f (n0x1e13-n0x1e16) + - 0x07865e16, // c0x0170 (n0x1e16-n0x1e19) + - 0x0786de19, // c0x0171 (n0x1e19-n0x1e1b) + - 0x07879e1b, // c0x0172 (n0x1e1b-n0x1e1e) + - 0x07885e1e, // c0x0173 (n0x1e1e-n0x1e21) + - 0x07891e21, // c0x0174 (n0x1e21-n0x1e24) + - 0x0789de24, // c0x0175 (n0x1e24-n0x1e27) + - 0x078a9e27, // c0x0176 (n0x1e27-n0x1e2a) + - 0x078b5e2a, // c0x0177 (n0x1e2a-n0x1e2d) + - 0x078c1e2d, // c0x0178 (n0x1e2d-n0x1e30) + - 0x078cde30, // c0x0179 (n0x1e30-n0x1e33) + - 0x078d9e33, // c0x017a (n0x1e33-n0x1e36) + - 0x078e5e36, // c0x017b (n0x1e36-n0x1e39) + - 0x078f1e39, // c0x017c (n0x1e39-n0x1e3c) + - 0x078fde3c, // c0x017d (n0x1e3c-n0x1e3f) + - 0x07909e3f, // c0x017e (n0x1e3f-n0x1e42) + - 0x07911e42, // c0x017f (n0x1e42-n0x1e44) + - 0x0791de44, // c0x0180 (n0x1e44-n0x1e47) + - 0x07929e47, // c0x0181 (n0x1e47-n0x1e4a) + - 0x07935e4a, // c0x0182 (n0x1e4a-n0x1e4d) + - 0x07941e4d, // c0x0183 (n0x1e4d-n0x1e50) + - 0x0794de50, // c0x0184 (n0x1e50-n0x1e53) + - 0x07959e53, // c0x0185 (n0x1e53-n0x1e56) + - 0x07965e56, // c0x0186 (n0x1e56-n0x1e59) + - 0x07971e59, // c0x0187 (n0x1e59-n0x1e5c) + - 0x07975e5c, // c0x0188 (n0x1e5c-n0x1e5d) + - 0x07981e5d, // c0x0189 (n0x1e5d-n0x1e60) + - 0x07999e60, // c0x018a (n0x1e60-n0x1e66) + - 0x0799de66, // c0x018b (n0x1e66-n0x1e67) + - 0x079ade67, // c0x018c (n0x1e67-n0x1e6b) + - 0x079c5e6b, // c0x018d (n0x1e6b-n0x1e71) + - 0x07a09e71, // c0x018e (n0x1e71-n0x1e82) + - 0x07a1de82, // c0x018f (n0x1e82-n0x1e87) + - 0x07a51e87, // c0x0190 (n0x1e87-n0x1e94) + - 0x07a61e94, // c0x0191 (n0x1e94-n0x1e98) + - 0x07a7de98, // c0x0192 (n0x1e98-n0x1e9f) + - 0x07a95e9f, // c0x0193 (n0x1e9f-n0x1ea5) + - 0x27ad9ea5, // c0x0194 (n0x1ea5-n0x1eb6) o - 0x07addeb6, // c0x0195 (n0x1eb6-n0x1eb7) + + 0x0, + 0x10000000, + 0x20000000, + 0x40000000, + 0x50000000, + 0x60000000, + 0x184c60d, + 0x1850613, + 0x1870614, + 0x19cc61c, + 0x19e0673, + 0x19f4678, + 0x1a0467d, + 0x1a20681, + 0x1a24688, + 0x1a3c689, + 0x1a6468f, + 0x1a68699, + 0x1a8069a, + 0x1a846a0, + 0x1a886a1, + 0x1ab06a2, + 0x1ab46ac, + 0x21abc6ad, + 0x1b046af, + 0x1b086c1, + 0x1b286c2, + 0x1b3c6ca, + 0x1b406cf, + 0x1b706d0, + 0x1b8c6dc, + 0x1bb46e3, + 0x1bc06ed, + 0x1bc46f0, + 0x1c5c6f1, + 0x1c70717, + 0x1c8471c, + 0x1cb4721, + 0x1cc472d, + 0x1cd8731, + 0x1cfc736, + 0x1e3473f, + 0x1e3878d, + 0x1ea478e, + 0x1f107a9, + 0x1f247c4, + 0x1f387c9, + 0x1f407ce, + 0x1f507d0, + 0x1f547d4, + 0x1f6c7d5, + 0x1fb87db, + 0x1fd47ee, + 0x1fd87f5, + 0x1fdc7f6, + 0x1fe87f7, + 0x20247fa, + 0x62028809, + 0x203c80a, + 0x205080f, + 0x2054814, + 0x2064815, + 0x2114819, + 0x2118845, + 0x22124846, + 0x2212c849, + 0x216484b, + 0x2168859, + 0x25b885a, + 0x2265896e, + 0x2265c996, + 0x22660997, + 0x2266c998, + 0x2267099b, + 0x2267c99c, + 0x2268099f, + 0x226849a0, + 0x226889a1, + 0x2268c9a2, + 0x226909a3, + 0x2269c9a4, + 0x226a09a7, + 0x226ac9a8, + 0x226b09ab, + 0x226b49ac, + 0x226b89ad, + 0x226c49ae, + 0x226c89b1, + 0x226cc9b2, + 0x226d09b3, + 0x26d49b4, + 0x226d89b5, + 0x226e49b6, + 0x226e89b9, + 0x26f09ba, + 0x227089bc, + 0x2270c9c2, + 0x27189c3, + 0x2271c9c6, + 0x27209c7, + 0x227249c8, + 0x27409c9, + 0x27589d0, + 0x275c9d6, + 0x276c9d7, + 0x27749db, + 0x27a89dd, + 0x27ac9ea, + 0x27bc9eb, + 0x28609ef, + 0x22864a18, + 0x286ca19, + 0x2870a1b, + 0x2888a1c, + 0x289ca22, + 0x28c4a27, + 0x28e4a31, + 0x2914a39, + 0x293ca45, + 0x2940a4f, + 0x2964a50, + 0x2968a59, + 0x297ca5a, + 0x2980a5f, + 0x2984a60, + 0x29a4a61, + 0x29c0a69, + 0x29c4a70, + 0x229c8a71, + 0x29cca72, + 0x29d0a73, + 0x29e0a74, + 0x29e4a78, + 0x2a5ca79, + 0x2a78a97, + 0x2a88a9e, + 0x2a9caa2, + 0x2ab4aa7, + 0x2ac8aad, + 0x2ae0ab2, + 0x2ae4ab8, + 0x2afcab9, + 0x2b14abf, + 0x2b30ac5, + 0x2b48acc, + 0x2ba8ad2, + 0x2bc0aea, + 0x2bc4af0, + 0x2bd8af1, + 0x2c1caf6, + 0x2c9cb07, + 0x2cc8b27, + 0x2cccb32, + 0x2cd4b33, + 0x2cf4b35, + 0x2cf8b3d, + 0x2d18b3e, + 0x2d20b46, + 0x2d5cb48, + 0x2d9cb57, + 0x2da0b67, + 0x2e00b68, + 0x2e04b80, + 0x22e08b81, + 0x2e20b82, + 0x2e44b88, + 0x2e64b91, + 0x3428b99, + 0x3434d0a, + 0x3454d0d, + 0x3610d15, + 0x36e0d84, + 0x3750db8, + 0x37a8dd4, + 0x3890dea, + 0x38e8e24, + 0x3924e3a, + 0x3a20e49, + 0x3aece88, + 0x3b84ebb, + 0x3c14ee1, + 0x3c78f05, + 0x3eb0f1e, + 0x3f68fac, + 0x4034fda, + 0x408100d, + 0x4109020, + 0x4145042, + 0x4195051, + 0x420d065, + 0x64211083, + 0x64215084, + 0x64219085, + 0x4295086, + 0x42f10a5, + 0x436d0bc, + 0x43e50db, + 0x44650f9, + 0x44d1119, + 0x45fd134, + 0x465517f, + 0x64659195, + 0x46f1196, + 0x47791bc, + 0x47c51de, + 0x482d1f1, + 0x48d520b, + 0x499d235, + 0x4a05267, + 0x4b19281, + 0x64b1d2c6, + 0x64b212c7, + 0x4b7d2c8, + 0x4bd92df, + 0x4c692f6, + 0x4ce531a, + 0x4d29339, + 0x4e0d34a, + 0x4e41383, + 0x4ea1390, + 0x4f153a8, + 0x4f9d3c5, + 0x4fdd3e7, + 0x504d3f7, + 0x65051413, + 0x65055414, + 0x25059415, + 0x5071416, + 0x508d41c, + 0x50d1423, + 0x50e1434, + 0x50f9438, + 0x517143e, + 0x517945c, + 0x518d45e, + 0x51a5463, + 0x51cd469, + 0x51d1473, + 0x51d9474, + 0x51ed476, + 0x520947b, + 0x520d482, + 0x5215483, + 0x5251485, + 0x5265494, + 0x526d499, + 0x527549b, + 0x527949d, + 0x529d49e, + 0x52c14a7, + 0x52d94b0, + 0x52dd4b6, + 0x52e54b7, + 0x52e94b9, + 0x534d4ba, + 0x53514d3, + 0x53754d4, + 0x53954dd, + 0x53b14e5, + 0x53c14ec, + 0x53d54f0, + 0x53d94f5, + 0x53e14f6, + 0x53f54f8, + 0x54054fd, + 0x5409501, + 0x5425502, + 0x5cb5509, + 0x5ced72d, + 0x5d1973b, + 0x5d31746, + 0x5d5174c, + 0x5d71754, + 0x5db575c, + 0x5dbd76d, + 0x25dc176f, + 0x25dc5770, + 0x5dcd771, + 0x5f29773, + 0x25f2d7ca, + 0x25f3d7cb, + 0x25f457cf, + 0x25f517d1, + 0x5f557d4, + 0x5f597d5, + 0x5f817d6, + 0x5fa97e0, + 0x5fad7ea, + 0x5fe57eb, + 0x5ff97f9, + 0x6b517fe, + 0x6b55ad4, + 0x6b59ad5, + 0x26b5dad6, + 0x6b61ad7, + 0x26b65ad8, + 0x6b69ad9, + 0x26b75ada, + 0x6b79add, + 0x6b7dade, + 0x26b81adf, + 0x6b85ae0, + 0x26b8dae1, + 0x6b91ae3, + 0x6b95ae4, + 0x26ba5ae5, + 0x6ba9ae9, + 0x6badaea, + 0x6bb1aeb, + 0x6bb5aec, + 0x26bb9aed, + 0x6bbdaee, + 0x6bc1aef, + 0x6bc5af0, + 0x6bc9af1, + 0x26bd1af2, + 0x6bd5af4, + 0x6bd9af5, + 0x6bddaf6, + 0x26be1af7, + 0x6be5af8, + 0x26bedaf9, + 0x26bf1afb, + 0x6c0dafc, + 0x6c19b03, + 0x6c59b06, + 0x6c5db16, + 0x6c81b17, + 0x6c85b20, + 0x6c89b21, + 0x6e01b22, + 0x26e05b80, + 0x26e0db81, + 0x26e11b83, + 0x26e15b84, + 0x6e1db85, + 0x6ef9b87, + 0x26efdbbe, + 0x6f01bbf, + 0x6f2dbc0, + 0x6f31bcb, + 0x6f51bcc, + 0x6f5dbd4, + 0x6f7dbd7, + 0x6fb5bdf, + 0x724dbed, + 0x7309c93, + 0x731dcc2, + 0x7351cc7, + 0x7381cd4, + 0x739dce0, + 0x73c1ce7, + 0x73ddcf0, + 0x73f9cf7, + 0x741dcfe, + 0x742dd07, + 0x7431d0b, + 0x7465d0c, + 0x7481d19, + 0x74edd20, + 0x274f1d3b, + 0x7515d3c, + 0x7535d45, + 0x7549d4d, + 0x755dd52, + 0x7561d57, + 0x7581d58, + 0x7625d60, + 0x7641d89, + 0x7661d90, + 0x7665d98, + 0x766dd99, + 0x7671d9b, + 0x7685d9c, + 0x76a5da1, + 0x76b1da9, + 0x76bddac, + 0x76eddaf, + 0x77bddbb, + 0x77c1def, + 0x77d5df0, + 0x77d9df5, + 0x77f1df6, + 0x77f5dfc, + 0x7801dfd, + 0x7805e00, + 0x7821e01, + 0x785de08, + 0x7861e17, + 0x7881e18, + 0x78d1e20, + 0x78ede34, + 0x7941e3b, + 0x7945e50, + 0x7949e51, + 0x794de52, + 0x7991e53, + 0x79a1e64, + 0x79dde68, + 0x79e1e77, + 0x7a11e78, + 0x7b59e84, + 0x7b7ded6, + 0x7ba9edf, + 0x7bb5eea, + 0x7bbdeed, + 0x7ccdeef, + 0x7cd9f33, + 0x7ce5f36, + 0x7cf1f39, + 0x7cfdf3c, + 0x7d09f3f, + 0x7d15f42, + 0x7d21f45, + 0x7d2df48, + 0x7d39f4b, + 0x7d45f4e, + 0x7d51f51, + 0x7d5df54, + 0x7d69f57, + 0x7d71f5a, + 0x7d7df5c, + 0x7d89f5f, + 0x7d95f62, + 0x7da1f65, + 0x7dadf68, + 0x7db9f6b, + 0x7dc5f6e, + 0x7dd1f71, + 0x7dddf74, + 0x7de9f77, + 0x7df5f7a, + 0x7e01f7d, + 0x7e0df80, + 0x7e19f83, + 0x7e25f86, + 0x7e31f89, + 0x7e3df8c, + 0x7e45f8f, + 0x7e51f91, + 0x7e5df94, + 0x7e69f97, + 0x7e75f9a, + 0x7e81f9d, + 0x7e8dfa0, + 0x7e99fa3, + 0x7ea5fa6, + 0x7eb1fa9, + 0x7ebdfac, + 0x7ec9faf, + 0x7ed5fb2, + 0x7ee1fb5, + 0x7ee9fb8, + 0x7ef5fba, + 0x7f01fbd, + 0x7f0dfc0, + 0x7f19fc3, + 0x7f25fc6, + 0x7f31fc9, + 0x7f3dfcc, + 0x7f49fcf, + 0x7f4dfd2, + 0x7f59fd3, + 0x7f71fd6, + 0x7f75fdc, + 0x7f85fdd, + 0x7f9dfe1, + 0x7fe1fe7, + 0x7ff5ff8, + 0x8029ffd, + 0x803a00a, + 0x805a00e, + 0x8072016, + 0x808a01c, + 0x808e022, + 0x280d2023, + 0x80d6034, + 0x8102035, + 0x8106040, + 0x811a041, } -// max children 405 (capacity 511) -// max text offset 26986 (capacity 32767) +// max children 479 (capacity 511) +// max text offset 28411 (capacity 32767) // max text length 36 (capacity 63) -// max hi 7863 (capacity 16383) -// max lo 7862 (capacity 16383) +// max hi 8262 (capacity 16383) +// max lo 8257 (capacity 16383) diff --git a/fn/vendor/golang.org/x/net/publicsuffix/table_test.go b/fn/vendor/golang.org/x/net/publicsuffix/table_test.go index 46cc5470a..416512cb9 100644 --- a/fn/vendor/golang.org/x/net/publicsuffix/table_test.go +++ b/fn/vendor/golang.org/x/net/publicsuffix/table_test.go @@ -148,6 +148,7 @@ var rules = [...]string{ "gov.ar", "int.ar", "mil.ar", + "musica.ar", "net.ar", "org.ar", "tur.ar", @@ -215,16 +216,12 @@ var rules = [...]string{ "pro.az", "biz.az", "ba", - "org.ba", - "net.ba", + "com.ba", "edu.ba", "gov.ba", "mil.ba", - "unsa.ba", - "unbi.ba", - "co.ba", - "com.ba", - "rs.ba", + "net.ba", + "org.ba", "bb", "biz.bb", "co.bb", @@ -321,6 +318,7 @@ var rules = [...]string{ "art.br", "ato.br", "b.br", + "belem.br", "bio.br", "blog.br", "bmd.br", @@ -329,6 +327,8 @@ var rules = [...]string{ "cnt.br", "com.br", "coop.br", + "cri.br", + "def.br", "ecn.br", "eco.br", "edu.br", @@ -339,6 +339,7 @@ var rules = [...]string{ "eti.br", "far.br", "flog.br", + "floripa.br", "fm.br", "fnd.br", "fot.br", @@ -346,9 +347,37 @@ var rules = [...]string{ "g12.br", "ggf.br", "gov.br", + "ac.gov.br", + "al.gov.br", + "am.gov.br", + "ap.gov.br", + "ba.gov.br", + "ce.gov.br", + "df.gov.br", + "es.gov.br", + "go.gov.br", + "ma.gov.br", + "mg.gov.br", + "ms.gov.br", + "mt.gov.br", + "pa.gov.br", + "pb.gov.br", + "pe.gov.br", + "pi.gov.br", + "pr.gov.br", + "rj.gov.br", + "rn.gov.br", + "ro.gov.br", + "rr.gov.br", + "rs.gov.br", + "sc.gov.br", + "se.gov.br", + "sp.gov.br", + "to.gov.br", "imb.br", "ind.br", "inf.br", + "jampa.br", "jor.br", "jus.br", "leg.br", @@ -364,6 +393,7 @@ var rules = [...]string{ "ntr.br", "odo.br", "org.br", + "poa.br", "ppg.br", "pro.br", "psc.br", @@ -371,6 +401,7 @@ var rules = [...]string{ "qsl.br", "radio.br", "rec.br", + "recife.br", "slg.br", "srv.br", "taxi.br", @@ -380,6 +411,7 @@ var rules = [...]string{ "tur.br", "tv.br", "vet.br", + "vix.br", "vlog.br", "wiki.br", "zlg.br", @@ -545,6 +577,7 @@ var rules = [...]string{ "org.cw", "cx", "gov.cx", + "cy", "ac.cy", "biz.cy", "com.cy", @@ -2211,11 +2244,8 @@ var rules = [...]string{ "aso.kumamoto.jp", "choyo.kumamoto.jp", "gyokuto.kumamoto.jp", - "hitoyoshi.kumamoto.jp", "kamiamakusa.kumamoto.jp", - "kashima.kumamoto.jp", "kikuchi.kumamoto.jp", - "kosa.kumamoto.jp", "kumamoto.kumamoto.jp", "mashiki.kumamoto.jp", "mifune.kumamoto.jp", @@ -2300,7 +2330,6 @@ var rules = [...]string{ "kakuda.miyagi.jp", "kami.miyagi.jp", "kawasaki.miyagi.jp", - "kesennuma.miyagi.jp", "marumori.miyagi.jp", "matsushima.miyagi.jp", "minamisanriku.miyagi.jp", @@ -3920,8 +3949,15 @@ var rules = [...]string{ "edu.my", "mil.my", "name.my", - "*.mz", - "!teledata.mz", + "mz", + "ac.mz", + "adv.mz", + "co.mz", + "edu.mz", + "gov.mz", + "mil.mz", + "net.mz", + "org.mz", "na", "info.na", "pro.na", @@ -3943,6 +3979,7 @@ var rules = [...]string{ "name", "nc", "asso.nc", + "nom.nc", "ne", "net", "nf", @@ -3959,27 +3996,29 @@ var rules = [...]string{ "ng", "com.ng", "edu.ng", + "gov.ng", + "i.ng", + "mil.ng", + "mobi.ng", "name.ng", "net.ng", "org.ng", "sch.ng", - "gov.ng", - "mil.ng", - "mobi.ng", - "com.ni", - "gob.ni", - "edu.ni", - "org.ni", - "nom.ni", - "net.ni", - "mil.ni", - "co.ni", - "biz.ni", - "web.ni", - "int.ni", + "ni", "ac.ni", + "biz.ni", + "co.ni", + "com.ni", + "edu.ni", + "gob.ni", "in.ni", "info.ni", + "int.ni", + "mil.ni", + "net.ni", + "nom.ni", + "org.ni", + "web.ni", "nl", "bv.nl", "no", @@ -4773,6 +4812,7 @@ var rules = [...]string{ "net.om", "org.om", "pro.om", + "onion", "org", "pa", "ac.pa", @@ -5047,13 +5087,17 @@ var rules = [...]string{ "prof.pr", "ac.pr", "pro", + "aaa.pro", "aca.pro", + "acct.pro", + "avocat.pro", "bar.pro", "cpa.pro", + "eng.pro", "jur.pro", "law.pro", "med.pro", - "eng.pro", + "recht.pro", "ps", "edu.ps", "gov.ps", @@ -5096,157 +5140,33 @@ var rules = [...]string{ "org.qa", "sch.qa", "re", - "com.re", "asso.re", + "com.re", "nom.re", "ro", - "com.ro", - "org.ro", - "tm.ro", - "nt.ro", - "nom.ro", - "info.ro", - "rec.ro", "arts.ro", + "com.ro", "firm.ro", + "info.ro", + "nom.ro", + "nt.ro", + "org.ro", + "rec.ro", "store.ro", + "tm.ro", "www.ro", "rs", - "co.rs", - "org.rs", - "edu.rs", "ac.rs", + "co.rs", + "edu.rs", "gov.rs", "in.rs", + "org.rs", "ru", "ac.ru", - "com.ru", "edu.ru", - "int.ru", - "net.ru", - "org.ru", - "pp.ru", - "adygeya.ru", - "altai.ru", - "amur.ru", - "arkhangelsk.ru", - "astrakhan.ru", - "bashkiria.ru", - "belgorod.ru", - "bir.ru", - "bryansk.ru", - "buryatia.ru", - "cbg.ru", - "chel.ru", - "chelyabinsk.ru", - "chita.ru", - "chukotka.ru", - "chuvashia.ru", - "dagestan.ru", - "dudinka.ru", - "e-burg.ru", - "grozny.ru", - "irkutsk.ru", - "ivanovo.ru", - "izhevsk.ru", - "jar.ru", - "joshkar-ola.ru", - "kalmykia.ru", - "kaluga.ru", - "kamchatka.ru", - "karelia.ru", - "kazan.ru", - "kchr.ru", - "kemerovo.ru", - "khabarovsk.ru", - "khakassia.ru", - "khv.ru", - "kirov.ru", - "koenig.ru", - "komi.ru", - "kostroma.ru", - "krasnoyarsk.ru", - "kuban.ru", - "kurgan.ru", - "kursk.ru", - "lipetsk.ru", - "magadan.ru", - "mari.ru", - "mari-el.ru", - "marine.ru", - "mordovia.ru", - "msk.ru", - "murmansk.ru", - "nalchik.ru", - "nnov.ru", - "nov.ru", - "novosibirsk.ru", - "nsk.ru", - "omsk.ru", - "orenburg.ru", - "oryol.ru", - "palana.ru", - "penza.ru", - "perm.ru", - "ptz.ru", - "rnd.ru", - "ryazan.ru", - "sakhalin.ru", - "samara.ru", - "saratov.ru", - "simbirsk.ru", - "smolensk.ru", - "spb.ru", - "stavropol.ru", - "stv.ru", - "surgut.ru", - "tambov.ru", - "tatarstan.ru", - "tom.ru", - "tomsk.ru", - "tsaritsyn.ru", - "tsk.ru", - "tula.ru", - "tuva.ru", - "tver.ru", - "tyumen.ru", - "udm.ru", - "udmurtia.ru", - "ulan-ude.ru", - "vladikavkaz.ru", - "vladimir.ru", - "vladivostok.ru", - "volgograd.ru", - "vologda.ru", - "voronezh.ru", - "vrn.ru", - "vyatka.ru", - "yakutia.ru", - "yamal.ru", - "yaroslavl.ru", - "yekaterinburg.ru", - "yuzhno-sakhalinsk.ru", - "amursk.ru", - "baikal.ru", - "cmw.ru", - "fareast.ru", - "jamal.ru", - "kms.ru", - "k-uralsk.ru", - "kustanai.ru", - "kuzbass.ru", - "mytis.ru", - "nakhodka.ru", - "nkz.ru", - "norilsk.ru", - "oskol.ru", - "pyatigorsk.ru", - "rubtsovsk.ru", - "snz.ru", - "syzran.ru", - "vdonsk.ru", - "zgrad.ru", "gov.ru", + "int.ru", "mil.ru", "test.ru", "rw", @@ -5379,38 +5299,6 @@ var rules = [...]string{ "saotome.st", "store.st", "su", - "adygeya.su", - "arkhangelsk.su", - "balashov.su", - "bashkiria.su", - "bryansk.su", - "dagestan.su", - "grozny.su", - "ivanovo.su", - "kalmykia.su", - "kaluga.su", - "karelia.su", - "khakassia.su", - "krasnodar.su", - "kurgan.su", - "lenug.su", - "mordovia.su", - "msk.su", - "murmansk.su", - "nalchik.su", - "nov.su", - "obninsk.su", - "penza.su", - "pokrovsk.su", - "sochi.su", - "spb.su", - "togliatti.su", - "troitsk.su", - "tula.su", - "tuva.su", - "vladikavkaz.su", - "vladimir.su", - "vologda.su", "sv", "com.sv", "edu.sv", @@ -5843,7 +5731,6 @@ var rules = [...]string{ "lib.co.us", "lib.ct.us", "lib.dc.us", - "lib.de.us", "lib.fl.us", "lib.ga.us", "lib.gu.us", @@ -5970,6 +5857,7 @@ var rules = [...]string{ "xn--fiqz9s", "xn--lgbbat1ad8j", "xn--wgbh1c", + "xn--e1a4c", "xn--node", "xn--qxam", "xn--j6w193g", @@ -6017,6 +5905,12 @@ var rules = [...]string{ "xn--ogbpf8fl", "xn--mgbtf8fl", "xn--o3cw4h", + "xn--12c1fe0br.xn--o3cw4h", + "xn--12co0c3b4eva.xn--o3cw4h", + "xn--h3cuzk1di.xn--o3cw4h", + "xn--o3cyx2a.xn--o3cw4h", + "xn--m3ch0j3a.xn--o3cw4h", + "xn--12cfi8ixb8l.xn--o3cw4h", "xn--pgbs0dh", "xn--kpry57d", "xn--kprw13d", @@ -6042,8 +5936,24 @@ var rules = [...]string{ "school.za", "tm.za", "web.za", - "*.zm", - "*.zw", + "zm", + "ac.zm", + "biz.zm", + "co.zm", + "com.zm", + "edu.zm", + "gov.zm", + "info.zm", + "mil.zm", + "net.zm", + "org.zm", + "sch.zm", + "zw", + "ac.zw", + "co.zw", + "gov.zw", + "mil.zw", + "org.zw", "aaa", "aarp", "abarth", @@ -6069,7 +5979,6 @@ var rules = [...]string{ "afamilycompany", "afl", "africa", - "africamagic", "agakhan", "agency", "aig", @@ -6105,6 +6014,7 @@ var rules = [...]string{ "aramco", "archi", "army", + "art", "arte", "asda", "associates", @@ -6203,6 +6113,7 @@ var rules = [...]string{ "cal", "call", "calvinklein", + "cam", "camera", "camp", "cancerresearch", @@ -6303,6 +6214,7 @@ var rules = [...]string{ "dabur", "dad", "dance", + "data", "date", "dating", "datsun", @@ -6335,15 +6247,14 @@ var rules = [...]string{ "diy", "dnp", "docs", + "doctor", "dodge", "dog", "doha", "domains", - "doosan", "dot", "download", "drive", - "dstv", "dtv", "dubai", "duck", @@ -6352,14 +6263,14 @@ var rules = [...]string{ "dupont", "durban", "dvag", - "dwg", + "dvr", "earth", "eat", + "eco", "edeka", "education", "email", "emerck", - "emerson", "energy", "engineer", "engineering", @@ -6416,9 +6327,9 @@ var rules = [...]string{ "flir", "florist", "flowers", - "flsmidth", "fly", "foo", + "food", "foodnetwork", "football", "ford", @@ -6436,6 +6347,7 @@ var rules = [...]string{ "ftr", "fujitsu", "fujixerox", + "fun", "fund", "furniture", "futbol", @@ -6465,6 +6377,7 @@ var rules = [...]string{ "global", "globo", "gmail", + "gmbh", "gmo", "gmx", "godaddy", @@ -6478,12 +6391,12 @@ var rules = [...]string{ "google", "gop", "got", - "gotv", "grainger", "graphics", "gratis", "green", "gripe", + "grocery", "group", "guardian", "gucci", @@ -6520,10 +6433,12 @@ var rules = [...]string{ "honda", "honeywell", "horse", + "hospital", "host", "hosting", "hot", "hoteles", + "hotels", "hotmail", "house", "how", @@ -6538,7 +6453,6 @@ var rules = [...]string{ "icu", "ieee", "ifm", - "iinet", "ikano", "imamat", "imdb", @@ -6604,7 +6518,6 @@ var rules = [...]string{ "krd", "kred", "kuokgroup", - "kyknet", "kyoto", "lacaixa", "ladbrokes", @@ -6673,6 +6586,7 @@ var rules = [...]string{ "man", "management", "mango", + "map", "market", "marketing", "markets", @@ -6693,6 +6607,7 @@ var rules = [...]string{ "men", "menu", "meo", + "merckmsd", "metlife", "miami", "microsoft", @@ -6703,7 +6618,7 @@ var rules = [...]string{ "mlb", "mls", "mma", - "mnet", + "mobile", "mobily", "moda", "moe", @@ -6726,14 +6641,10 @@ var rules = [...]string{ "mtn", "mtpc", "mtr", - "multichoice", "mutual", - "mutuelle", - "mzansimagic", "nab", "nadex", "nagoya", - "naspers", "nationwide", "natura", "navy", @@ -6788,7 +6699,6 @@ var rules = [...]string{ "oracle", "orange", "organic", - "orientexpress", "origins", "osaka", "otsuka", @@ -6805,12 +6715,13 @@ var rules = [...]string{ "party", "passagens", "pay", - "payu", "pccw", "pet", "pfizer", "pharmacy", + "phd", "philips", + "phone", "photo", "photography", "photos", @@ -6856,6 +6767,7 @@ var rules = [...]string{ "quest", "qvc", "racing", + "radio", "raid", "read", "realestate", @@ -6895,6 +6807,7 @@ var rules = [...]string{ "rogers", "room", "rsvp", + "rugby", "ruhr", "run", "rwe", @@ -6930,6 +6843,7 @@ var rules = [...]string{ "scjohnson", "scor", "scot", + "search", "seat", "secure", "security", @@ -6950,6 +6864,8 @@ var rules = [...]string{ "shia", "shiksha", "shoes", + "shop", + "shopping", "shouji", "show", "showtime", @@ -6994,11 +6910,11 @@ var rules = [...]string{ "stockholm", "storage", "store", + "stream", "studio", "study", "style", "sucks", - "supersport", "supplies", "supply", "support", @@ -7034,7 +6950,6 @@ var rules = [...]string{ "thd", "theater", "theatre", - "theguardian", "tiaa", "tickets", "tienda", @@ -7159,7 +7074,6 @@ var rules = [...]string{ "xn--42c2d9a", "xn--45q11c", "xn--4gbrim", - "xn--4gq48lf9j", "xn--55qw42g", "xn--55qx5d", "xn--5su34j936bgsg", @@ -7262,41 +7176,96 @@ var rules = [...]string{ "zippo", "zone", "zuerich", + "cc.ua", + "inf.ua", + "ltd.ua", + "beep.pl", + "*.compute.estate", + "*.alces.network", + "*.alwaysdata.net", "cloudfront.net", - "ap-northeast-1.compute.amazonaws.com", - "ap-southeast-1.compute.amazonaws.com", - "ap-southeast-2.compute.amazonaws.com", - "cn-north-1.compute.amazonaws.cn", - "compute.amazonaws.cn", - "compute.amazonaws.com", - "compute-1.amazonaws.com", - "eu-west-1.compute.amazonaws.com", - "eu-central-1.compute.amazonaws.com", - "sa-east-1.compute.amazonaws.com", + "*.compute.amazonaws.com", + "*.compute-1.amazonaws.com", + "*.compute.amazonaws.com.cn", "us-east-1.amazonaws.com", - "us-gov-west-1.compute.amazonaws.com", - "us-west-1.compute.amazonaws.com", - "us-west-2.compute.amazonaws.com", - "z-1.compute-1.amazonaws.com", - "z-2.compute-1.amazonaws.com", - "elasticbeanstalk.com", - "elb.amazonaws.com", + "elasticbeanstalk.cn-north-1.amazonaws.com.cn", + "*.elasticbeanstalk.com", + "*.elb.amazonaws.com", + "*.elb.amazonaws.com.cn", "s3.amazonaws.com", "s3-ap-northeast-1.amazonaws.com", + "s3-ap-northeast-2.amazonaws.com", + "s3-ap-south-1.amazonaws.com", "s3-ap-southeast-1.amazonaws.com", "s3-ap-southeast-2.amazonaws.com", - "s3-external-1.amazonaws.com", - "s3-external-2.amazonaws.com", - "s3-fips-us-gov-west-1.amazonaws.com", + "s3-ca-central-1.amazonaws.com", "s3-eu-central-1.amazonaws.com", "s3-eu-west-1.amazonaws.com", + "s3-eu-west-2.amazonaws.com", + "s3-external-1.amazonaws.com", + "s3-fips-us-gov-west-1.amazonaws.com", "s3-sa-east-1.amazonaws.com", "s3-us-gov-west-1.amazonaws.com", + "s3-us-east-2.amazonaws.com", "s3-us-west-1.amazonaws.com", "s3-us-west-2.amazonaws.com", + "s3.ap-northeast-2.amazonaws.com", + "s3.ap-south-1.amazonaws.com", "s3.cn-north-1.amazonaws.com.cn", + "s3.ca-central-1.amazonaws.com", "s3.eu-central-1.amazonaws.com", + "s3.eu-west-2.amazonaws.com", + "s3.us-east-2.amazonaws.com", + "s3.dualstack.ap-northeast-1.amazonaws.com", + "s3.dualstack.ap-northeast-2.amazonaws.com", + "s3.dualstack.ap-south-1.amazonaws.com", + "s3.dualstack.ap-southeast-1.amazonaws.com", + "s3.dualstack.ap-southeast-2.amazonaws.com", + "s3.dualstack.ca-central-1.amazonaws.com", + "s3.dualstack.eu-central-1.amazonaws.com", + "s3.dualstack.eu-west-1.amazonaws.com", + "s3.dualstack.eu-west-2.amazonaws.com", + "s3.dualstack.sa-east-1.amazonaws.com", + "s3.dualstack.us-east-1.amazonaws.com", + "s3.dualstack.us-east-2.amazonaws.com", + "s3-website-us-east-1.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ca-central-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website.eu-west-2.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "t3l3p0rt.net", + "tele.amune.org", + "on-aptible.com", + "user.party.eus", + "pimienta.org", + "poivron.org", + "potager.org", + "sweetpepper.org", + "myasustor.com", + "myfritz.net", + "*.awdev.ca", + "*.advisor.ws", + "backplaneapp.io", "betainabox.com", + "bnr.la", + "boxfuse.io", + "square7.ch", + "bplaced.com", + "bplaced.de", + "square7.de", + "bplaced.net", + "square7.net", + "browsersafetymark.io", + "mycd.eu", "ae.org", "ar.com", "br.com", @@ -7331,20 +7300,62 @@ var rules = [...]string{ "us.org", "co.com", "c.la", + "certmgr.org", + "xenapponazure.com", + "virtueeldomein.nl", + "c66.me", "cloudcontrolled.com", "cloudcontrolapp.com", "co.ca", + "co.cz", "c.cdn77.org", "cdn77-ssl.net", "r.cdn77.net", "rsc.cdn77.org", "ssl.origin.cdn77-secure.org", + "cloudns.asia", + "cloudns.biz", + "cloudns.club", + "cloudns.cc", + "cloudns.eu", + "cloudns.in", + "cloudns.info", + "cloudns.org", + "cloudns.pro", + "cloudns.pw", + "cloudns.us", "co.nl", "co.no", - "*.platform.sh", + "dyn.cosidns.de", + "dynamisches-dns.de", + "dnsupdater.de", + "internet-dns.de", + "l-o-g-i-n.de", + "dynamic-dns.info", + "feste-ip.net", + "knx-server.net", + "static-access.net", + "realm.cz", + "*.cryptonomic.net", "cupcake.is", + "cyon.link", + "cyon.site", + "daplie.me", + "localhost.daplie.me", + "biz.dk", + "co.dk", + "firm.dk", + "reg.dk", + "store.dk", + "dedyn.io", + "dnshome.de", "dreamhosters.com", + "mydrobo.com", + "drud.io", + "drud.us", "duckdns.org", + "dy.fi", + "tunk.org", "dyndns-at-home.com", "dyndns-at-work.com", "dyndns-blog.com", @@ -7624,6 +7635,21 @@ var rules = [...]string{ "webhop.org", "worse-than.tv", "writesthisblog.com", + "ddnss.de", + "dyn.ddnss.de", + "dyndns.ddnss.de", + "dyndns1.de", + "dyn-ip24.de", + "home-webserver.de", + "dyn.home-webserver.de", + "myhome-server.de", + "ddnss.org", + "definima.net", + "definima.io", + "dynv6.net", + "e4.cz", + "enonic.io", + "customer.enonic.io", "eu.org", "al.eu.org", "asso.eu.org", @@ -7680,17 +7706,132 @@ var rules = [...]string{ "tr.eu.org", "uk.eu.org", "us.eu.org", + "eu-1.evennode.com", + "eu-2.evennode.com", + "eu-3.evennode.com", + "us-1.evennode.com", + "us-2.evennode.com", + "us-3.evennode.com", + "twmail.cc", + "twmail.net", + "twmail.org", + "mymailer.com.tw", + "url.tw", + "apps.fbsbx.com", + "ru.net", + "adygeya.ru", + "bashkiria.ru", + "bir.ru", + "cbg.ru", + "com.ru", + "dagestan.ru", + "grozny.ru", + "kalmykia.ru", + "kustanai.ru", + "marine.ru", + "mordovia.ru", + "msk.ru", + "mytis.ru", + "nalchik.ru", + "nov.ru", + "pyatigorsk.ru", + "spb.ru", + "vladikavkaz.ru", + "vladimir.ru", + "abkhazia.su", + "adygeya.su", + "aktyubinsk.su", + "arkhangelsk.su", + "armenia.su", + "ashgabad.su", + "azerbaijan.su", + "balashov.su", + "bashkiria.su", + "bryansk.su", + "bukhara.su", + "chimkent.su", + "dagestan.su", + "east-kazakhstan.su", + "exnet.su", + "georgia.su", + "grozny.su", + "ivanovo.su", + "jambyl.su", + "kalmykia.su", + "kaluga.su", + "karacol.su", + "karaganda.su", + "karelia.su", + "khakassia.su", + "krasnodar.su", + "kurgan.su", + "kustanai.su", + "lenug.su", + "mangyshlak.su", + "mordovia.su", + "msk.su", + "murmansk.su", + "nalchik.su", + "navoi.su", + "north-kazakhstan.su", + "nov.su", + "obninsk.su", + "penza.su", + "pokrovsk.su", + "sochi.su", + "spb.su", + "tashkent.su", + "termez.su", + "togliatti.su", + "troitsk.su", + "tselinograd.su", + "tula.su", + "tuva.su", + "vladikavkaz.su", + "vladimir.su", + "vologda.su", + "fastlylb.net", + "map.fastlylb.net", + "freetls.fastly.net", + "map.fastly.net", + "a.prod.fastly.net", + "global.prod.fastly.net", "a.ssl.fastly.net", "b.ssl.fastly.net", "global.ssl.fastly.net", - "a.prod.fastly.net", - "global.prod.fastly.net", + "fhapp.xyz", + "fedorainfracloud.org", + "fedorapeople.org", + "cloud.fedoraproject.org", + "filegear.me", "firebaseapp.com", "flynnhub.com", + "freebox-os.com", + "freeboxos.com", + "fbx-os.fr", + "fbxos.fr", + "freebox-os.fr", + "freeboxos.fr", + "myfusion.cloud", + "futurehosting.at", + "futuremailing.at", + "*.ex.ortsinfo.at", + "*.kunden.ortsinfo.at", + "*.statics.cloud", "service.gov.uk", "github.io", "githubusercontent.com", - "ro.com", + "githubcloud.com", + "*.api.githubcloud.com", + "*.ext.githubcloud.com", + "gist.githubcloud.com", + "*.githubcloudusercontent.com", + "gitlab.io", + "homeoffice.gov.uk", + "ro.im", + "shop.ro", + "goip.de", + "*.0emm.com", "appspot.com", "blogspot.ae", "blogspot.al", @@ -7767,18 +7908,70 @@ var rules = [...]string{ "blogspot.ug", "blogspot.vn", "cloudfunctions.net", + "cloud.goog", "codespot.com", "googleapis.com", "googlecode.com", "pagespeedmobilizer.com", + "publishproxy.com", "withgoogle.com", "withyoutube.com", "hashbang.sh", + "hasura-app.io", + "hepforge.org", "herokuapp.com", "herokussl.com", + "moonscale.net", "iki.fi", "biz.at", "info.at", + "ac.leg.br", + "al.leg.br", + "am.leg.br", + "ap.leg.br", + "ba.leg.br", + "ce.leg.br", + "df.leg.br", + "es.leg.br", + "go.leg.br", + "ma.leg.br", + "mg.leg.br", + "ms.leg.br", + "mt.leg.br", + "pa.leg.br", + "pb.leg.br", + "pe.leg.br", + "pi.leg.br", + "pr.leg.br", + "rj.leg.br", + "rn.leg.br", + "ro.leg.br", + "rr.leg.br", + "rs.leg.br", + "sc.leg.br", + "se.leg.br", + "sp.leg.br", + "to.leg.br", + "ipifony.net", + "*.triton.zone", + "*.cns.joyent.com", + "js.org", + "keymachine.de", + "knightpoint.systems", + "co.krd", + "edu.krd", + "barsy.bg", + "barsyonline.com", + "barsy.de", + "barsy.eu", + "barsy.in", + "barsy.net", + "barsy.online", + "barsy.support", + "*.magentosite.cloud", + "hb.cldmail.ru", + "meteorapp.com", + "eu.meteorapp.com", "co.pl", "azurewebsites.net", "azure-mobile.net", @@ -7787,10 +7980,104 @@ var rules = [...]string{ "4u.com", "ngrok.io", "nfshost.com", + "nsupdate.info", + "nerdpol.ovh", + "blogsyte.com", + "brasilia.me", + "cable-modem.org", + "ciscofreak.com", + "collegefan.org", + "couchpotatofries.org", + "damnserver.com", + "ddns.me", + "ditchyourip.com", + "dnsfor.me", + "dnsiskinky.com", + "dvrcam.info", + "dynns.com", + "eating-organic.net", + "fantasyleague.cc", + "geekgalaxy.com", + "golffan.us", + "health-carereform.com", + "homesecuritymac.com", + "homesecuritypc.com", + "hopto.me", + "ilovecollege.info", + "loginto.me", + "mlbfan.org", + "mmafan.biz", + "myactivedirectory.com", + "mydissent.net", + "myeffect.net", + "mymediapc.net", + "mypsx.net", + "mysecuritycamera.com", + "mysecuritycamera.net", + "mysecuritycamera.org", + "net-freaks.com", + "nflfan.org", + "nhlfan.net", + "no-ip.ca", + "no-ip.co.uk", + "no-ip.net", + "noip.us", + "onthewifi.com", + "pgafan.net", + "point2this.com", + "pointto.us", + "privatizehealthinsurance.net", + "quicksytes.com", + "read-books.org", + "securitytactics.com", + "serveexchange.com", + "servehumour.com", + "servep2p.com", + "servesarcasm.com", + "stufftoread.com", + "ufcfan.org", + "unusualperson.com", + "workisboring.com", + "3utilities.com", + "bounceme.net", + "ddns.net", + "ddnsking.com", + "gotdns.ch", + "hopto.org", + "myftp.biz", + "myftp.org", + "myvnc.com", + "no-ip.biz", + "no-ip.info", + "no-ip.org", + "noip.me", + "redirectme.net", + "servebeer.com", + "serveblog.net", + "servecounterstrike.com", + "serveftp.com", + "servegame.com", + "servehalflife.com", + "servehttp.com", + "serveirc.com", + "serveminecraft.net", + "servemp3.com", + "servepics.com", + "servequake.com", + "sytes.net", + "webhop.me", + "zapto.org", + "nodum.co", + "nodum.io", "nyc.mn", + "cya.gg", "nid.io", + "opencraft.hosting", "operaunite.com", "outsystemscloud.com", + "ownprovider.com", + "oy.lc", + "pgfog.com", "pagefrontapp.com", "art.pl", "gliwice.pl", @@ -7798,20 +8085,65 @@ var rules = [...]string{ "poznan.pl", "wroc.pl", "zakopane.pl", - "pantheon.io", + "pantheonsite.io", "gotpantheon.com", + "mypep.link", + "on-web.fr", + "*.platform.sh", + "*.platformsh.site", + "xen.prgmr.com", "priv.at", + "protonet.io", + "chirurgiens-dentistes-en-france.fr", "qa2.com", + "dev-myqnapcloud.com", + "alpha-myqnapcloud.com", + "myqnapcloud.com", + "*.quipelements.com", + "vapor.cloud", + "vaporcloud.io", "rackmaze.com", "rackmaze.net", "rhcloud.com", + "hzc.io", + "wellbeingzone.eu", + "ptplus.fit", + "wellbeingzone.co.uk", "sandcats.io", + "logoip.de", + "logoip.com", + "firewall-gateway.com", + "firewall-gateway.de", + "my-gateway.de", + "my-router.de", + "spdns.de", + "spdns.eu", + "firewall-gateway.net", + "my-firewall.org", + "myfirewall.org", + "spdns.org", + "*.sensiosite.cloud", "biz.ua", "co.ua", "pp.ua", + "shiftedit.io", + "myshopblocks.com", + "1kapp.com", + "appchizi.com", + "applinzi.com", "sinaapp.com", "vipsinaapp.com", - "1kapp.com", + "bounty-full.com", + "alpha.bounty-full.com", + "beta.bounty-full.com", + "static.land", + "dev.static.land", + "sites.static.land", + "apps.lair.io", + "*.stolos.io", + "spacekit.io", + "stackspace.space", + "storj.farm", "diskstation.me", "dscloud.biz", "dscloud.me", @@ -7825,18 +8157,55 @@ var rules = [...]string{ "i234.me", "myds.me", "synology.me", + "vpnplus.to", + "taifun-dns.de", "gda.pl", "gdansk.pl", "gdynia.pl", "med.pl", "sopot.pl", + "bloxcms.com", + "townnews-staging.com", + "*.transurl.be", + "*.transurl.eu", + "*.transurl.nl", + "tuxfamily.org", + "dd-dns.de", + "diskstation.eu", + "diskstation.org", + "dray-dns.de", + "draydns.de", + "dyn-vpn.de", + "dynvpn.de", + "mein-vigor.de", + "my-vigor.de", + "my-wan.de", + "syno-ds.de", + "synology-diskstation.de", + "synology-ds.de", + "uber.space", "hk.com", "hk.org", "ltd.hk", "inc.hk", + "lib.de.us", + "router.management", + "wedeploy.io", + "wedeploy.me", + "remotewd.com", + "wmflabs.org", + "xs4all.space", "yolasite.com", + "ybo.faith", + "yombo.me", + "homelink.one", + "ybo.party", + "ybo.review", + "ybo.science", + "ybo.trade", "za.net", "za.org", + "now.sh", } var nodeLabels = [...]string{ @@ -7870,7 +8239,6 @@ var nodeLabels = [...]string{ "afamilycompany", "afl", "africa", - "africamagic", "ag", "agakhan", "agency", @@ -7914,6 +8282,7 @@ var nodeLabels = [...]string{ "archi", "army", "arpa", + "art", "arte", "as", "asda", @@ -8040,6 +8409,7 @@ var nodeLabels = [...]string{ "cal", "call", "calvinklein", + "cam", "camera", "camp", "cancerresearch", @@ -8161,6 +8531,7 @@ var nodeLabels = [...]string{ "dabur", "dad", "dance", + "data", "date", "dating", "datsun", @@ -8198,15 +8569,14 @@ var nodeLabels = [...]string{ "dnp", "do", "docs", + "doctor", "dodge", "dog", "doha", "domains", - "doosan", "dot", "download", "drive", - "dstv", "dtv", "dubai", "duck", @@ -8215,11 +8585,12 @@ var nodeLabels = [...]string{ "dupont", "durban", "dvag", - "dwg", + "dvr", "dz", "earth", "eat", "ec", + "eco", "edeka", "edu", "education", @@ -8227,7 +8598,6 @@ var nodeLabels = [...]string{ "eg", "email", "emerck", - "emerson", "energy", "engineer", "engineering", @@ -8291,11 +8661,11 @@ var nodeLabels = [...]string{ "flir", "florist", "flowers", - "flsmidth", "fly", "fm", "fo", "foo", + "food", "foodnetwork", "football", "ford", @@ -8314,6 +8684,7 @@ var nodeLabels = [...]string{ "ftr", "fujitsu", "fujixerox", + "fun", "fund", "furniture", "futbol", @@ -8353,6 +8724,7 @@ var nodeLabels = [...]string{ "globo", "gm", "gmail", + "gmbh", "gmo", "gmx", "gn", @@ -8367,7 +8739,6 @@ var nodeLabels = [...]string{ "google", "gop", "got", - "gotv", "gov", "gp", "gq", @@ -8377,6 +8748,7 @@ var nodeLabels = [...]string{ "gratis", "green", "gripe", + "grocery", "group", "gs", "gt", @@ -8421,10 +8793,12 @@ var nodeLabels = [...]string{ "honda", "honeywell", "horse", + "hospital", "host", "hosting", "hot", "hoteles", + "hotels", "hotmail", "house", "how", @@ -8444,7 +8818,6 @@ var nodeLabels = [...]string{ "ie", "ieee", "ifm", - "iinet", "ikano", "il", "im", @@ -8535,7 +8908,6 @@ var nodeLabels = [...]string{ "kuokgroup", "kw", "ky", - "kyknet", "kyoto", "kz", "la", @@ -8617,6 +8989,7 @@ var nodeLabels = [...]string{ "man", "management", "mango", + "map", "market", "marketing", "markets", @@ -8640,6 +9013,7 @@ var nodeLabels = [...]string{ "men", "menu", "meo", + "merckmsd", "metlife", "mg", "mh", @@ -8657,9 +9031,9 @@ var nodeLabels = [...]string{ "mm", "mma", "mn", - "mnet", "mo", "mobi", + "mobile", "mobily", "moda", "moe", @@ -8688,22 +9062,18 @@ var nodeLabels = [...]string{ "mtpc", "mtr", "mu", - "multichoice", "museum", "mutual", - "mutuelle", "mv", "mw", "mx", "my", "mz", - "mzansimagic", "na", "nab", "nadex", "nagoya", "name", - "naspers", "nationwide", "natura", "navy", @@ -8763,6 +9133,7 @@ var nodeLabels = [...]string{ "omega", "one", "ong", + "onion", "onl", "online", "onyourside", @@ -8772,7 +9143,6 @@ var nodeLabels = [...]string{ "orange", "org", "organic", - "orientexpress", "origins", "osaka", "otsuka", @@ -8790,7 +9160,6 @@ var nodeLabels = [...]string{ "party", "passagens", "pay", - "payu", "pccw", "pe", "pet", @@ -8799,7 +9168,9 @@ var nodeLabels = [...]string{ "pg", "ph", "pharmacy", + "phd", "philips", + "phone", "photo", "photography", "photos", @@ -8857,6 +9228,7 @@ var nodeLabels = [...]string{ "quest", "qvc", "racing", + "radio", "raid", "re", "read", @@ -8900,6 +9272,7 @@ var nodeLabels = [...]string{ "rs", "rsvp", "ru", + "rugby", "ruhr", "run", "rw", @@ -8941,6 +9314,7 @@ var nodeLabels = [...]string{ "scot", "sd", "se", + "search", "seat", "secure", "security", @@ -8963,6 +9337,8 @@ var nodeLabels = [...]string{ "shia", "shiksha", "shoes", + "shop", + "shopping", "shouji", "show", "showtime", @@ -9016,12 +9392,12 @@ var nodeLabels = [...]string{ "stockholm", "storage", "store", + "stream", "studio", "study", "style", "su", "sucks", - "supersport", "supplies", "supply", "support", @@ -9067,7 +9443,6 @@ var nodeLabels = [...]string{ "thd", "theater", "theatre", - "theguardian", "tiaa", "tickets", "tienda", @@ -9221,7 +9596,6 @@ var nodeLabels = [...]string{ "xn--45brj9c", "xn--45q11c", "xn--4gbrim", - "xn--4gq48lf9j", "xn--54b7fta0cc", "xn--55qw42g", "xn--55qx5d", @@ -9252,6 +9626,7 @@ var nodeLabels = [...]string{ "xn--czru2d", "xn--d1acj3b", "xn--d1alf", + "xn--e1a4c", "xn--eckvdtc9d", "xn--efvy88h", "xn--estv75g", @@ -9519,6 +9894,7 @@ var nodeLabels = [...]string{ "gov", "int", "mil", + "musica", "net", "org", "tur", @@ -9530,14 +9906,20 @@ var nodeLabels = [...]string{ "uri", "urn", "gov", + "cloudns", "ac", "biz", "co", + "futurehosting", + "futuremailing", "gv", "info", "or", + "ortsinfo", "priv", "blogspot", + "ex", + "kunden", "act", "asn", "com", @@ -9584,16 +9966,12 @@ var nodeLabels = [...]string{ "pp", "pro", "blogspot", - "co", "com", "edu", "gov", "mil", "net", "org", - "rs", - "unbi", - "unsa", "biz", "co", "com", @@ -9606,6 +9984,7 @@ var nodeLabels = [...]string{ "tv", "ac", "blogspot", + "transurl", "gov", "0", "1", @@ -9619,6 +9998,7 @@ var nodeLabels = [...]string{ "9", "a", "b", + "barsy", "blogspot", "c", "d", @@ -9654,12 +10034,16 @@ var nodeLabels = [...]string{ "edu", "or", "org", + "cloudns", "dscloud", "dyndns", "for-better", "for-more", "for-some", "for-the", + "mmafan", + "myftp", + "no-ip", "selfip", "webhop", "asso", @@ -9688,6 +10072,7 @@ var nodeLabels = [...]string{ "art", "ato", "b", + "belem", "bio", "blog", "bmd", @@ -9696,6 +10081,8 @@ var nodeLabels = [...]string{ "cnt", "com", "coop", + "cri", + "def", "ecn", "eco", "edu", @@ -9706,6 +10093,7 @@ var nodeLabels = [...]string{ "eti", "far", "flog", + "floripa", "fm", "fnd", "fot", @@ -9716,6 +10104,7 @@ var nodeLabels = [...]string{ "imb", "ind", "inf", + "jampa", "jor", "jus", "leg", @@ -9731,6 +10120,7 @@ var nodeLabels = [...]string{ "ntr", "odo", "org", + "poa", "ppg", "pro", "psc", @@ -9738,6 +10128,7 @@ var nodeLabels = [...]string{ "qsl", "radio", "rec", + "recife", "slg", "srv", "taxi", @@ -9747,10 +10138,65 @@ var nodeLabels = [...]string{ "tur", "tv", "vet", + "vix", "vlog", "wiki", "zlg", "blogspot", + "ac", + "al", + "am", + "ap", + "ba", + "ce", + "df", + "es", + "go", + "ma", + "mg", + "ms", + "mt", + "pa", + "pb", + "pe", + "pi", + "pr", + "rj", + "rn", + "ro", + "rr", + "rs", + "sc", + "se", + "sp", + "to", + "ac", + "al", + "am", + "ap", + "ba", + "ce", + "df", + "es", + "go", + "ma", + "mg", + "ms", + "mt", + "pa", + "pb", + "pe", + "pi", + "pr", + "rj", + "rn", + "ro", + "rr", + "rs", + "sc", + "se", + "sp", + "to", "com", "edu", "gov", @@ -9775,6 +10221,7 @@ var nodeLabels = [...]string{ "org", "za", "ab", + "awdev", "bc", "blogspot", "co", @@ -9783,6 +10230,7 @@ var nodeLabels = [...]string{ "nb", "nf", "nl", + "no-ip", "ns", "nt", "nu", @@ -9791,13 +10239,18 @@ var nodeLabels = [...]string{ "qc", "sk", "yk", + "cloudns", + "fantasyleague", "ftpaccess", "game-server", "myphotos", "scrapping", + "twmail", "gov", "blogspot", "blogspot", + "gotdns", + "square7", "ac", "asso", "co", @@ -9819,13 +10272,18 @@ var nodeLabels = [...]string{ "gob", "gov", "mil", + "magentosite", + "myfusion", + "sensiosite", + "statics", + "vapor", + "cloudns", "co", "com", "gov", "net", "ac", "ah", - "amazonaws", "bj", "com", "cq", @@ -9868,10 +10326,11 @@ var nodeLabels = [...]string{ "xz", "yn", "zj", - "compute", - "cn-north-1", "amazonaws", "cn-north-1", + "compute", + "elb", + "elasticbeanstalk", "s3", "arts", "com", @@ -9882,30 +10341,47 @@ var nodeLabels = [...]string{ "int", "mil", "net", + "nodum", "nom", "org", "rec", "web", "blogspot", + "0emm", "1kapp", + "3utilities", "4u", "africa", + "alpha-myqnapcloud", "amazonaws", + "appchizi", + "applinzi", "appspot", "ar", + "barsyonline", "betainabox", "blogdns", "blogspot", + "blogsyte", + "bloxcms", + "bounty-full", + "bplaced", "br", "cechire", + "ciscofreak", "cloudcontrolapp", "cloudcontrolled", "cn", "co", "codespot", + "damnserver", + "ddnsking", "de", + "dev-myqnapcloud", + "ditchyourip", "dnsalias", "dnsdojo", + "dnsiskinky", "doesntexist", "dontexist", "doomdns", @@ -9927,15 +10403,21 @@ var nodeLabels = [...]string{ "dyndns-web", "dyndns-wiki", "dyndns-work", + "dynns", "elasticbeanstalk", "est-a-la-maison", "est-a-la-masion", "est-le-patron", "est-mon-blogueur", "eu", + "evennode", "familyds", + "fbsbx", "firebaseapp", + "firewall-gateway", "flynnhub", + "freebox-os", + "freeboxos", "from-ak", "from-al", "from-ar", @@ -9984,18 +10466,24 @@ var nodeLabels = [...]string{ "from-wv", "from-wy", "gb", + "geekgalaxy", "getmyip", + "githubcloud", + "githubcloudusercontent", "githubusercontent", "googleapis", "googlecode", "gotdns", "gotpantheon", "gr", + "health-carereform", "herokuapp", "herokussl", "hk", "hobby-site", "homelinux", + "homesecuritymac", + "homesecuritypc", "homeunix", "hu", "iamallama", @@ -10057,74 +10545,175 @@ var nodeLabels = [...]string{ "isa-geek", "isa-hockeynut", "issmarterthanyou", + "joyent", "jpn", "kr", "likes-pie", "likescandy", + "logoip", + "meteorapp", "mex", + "myactivedirectory", + "myasustor", + "mydrobo", + "myqnapcloud", + "mysecuritycamera", + "myshopblocks", + "myvnc", "neat-url", + "net-freaks", "nfshost", "no", + "on-aptible", + "onthewifi", "operaunite", "outsystemscloud", + "ownprovider", "pagefrontapp", "pagespeedmobilizer", + "pgfog", + "point2this", + "prgmr", + "publishproxy", "qa2", "qc", + "quicksytes", + "quipelements", "rackmaze", + "remotewd", "rhcloud", - "ro", "ru", "sa", "saves-the-whales", "se", + "securitytactics", "selfip", "sells-for-less", "sells-for-u", "servebbs", + "servebeer", + "servecounterstrike", + "serveexchange", + "serveftp", + "servegame", + "servehalflife", + "servehttp", + "servehumour", + "serveirc", + "servemp3", + "servep2p", + "servepics", + "servequake", + "servesarcasm", "simple-url", "sinaapp", "space-to-rent", + "stufftoread", "teaches-yoga", + "townnews-staging", "uk", + "unusualperson", "us", "uy", "vipsinaapp", "withgoogle", "withyoutube", + "workisboring", "writesthisblog", + "xenapponazure", "yolasite", "za", + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", "compute", "compute-1", "elb", "eu-central-1", + "eu-west-1", + "eu-west-2", "s3", "s3-ap-northeast-1", + "s3-ap-northeast-2", + "s3-ap-south-1", "s3-ap-southeast-1", "s3-ap-southeast-2", + "s3-ca-central-1", "s3-eu-central-1", "s3-eu-west-1", + "s3-eu-west-2", "s3-external-1", - "s3-external-2", "s3-fips-us-gov-west-1", "s3-sa-east-1", + "s3-us-east-2", "s3-us-gov-west-1", "s3-us-west-1", "s3-us-west-2", - "us-east-1", - "ap-northeast-1", - "ap-southeast-1", - "ap-southeast-2", - "eu-central-1", - "eu-west-1", + "s3-website-ap-northeast-1", + "s3-website-ap-southeast-1", + "s3-website-ap-southeast-2", + "s3-website-eu-west-1", + "s3-website-sa-east-1", + "s3-website-us-east-1", + "s3-website-us-west-1", + "s3-website-us-west-2", "sa-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", - "z-1", - "z-2", + "us-east-1", + "us-east-2", + "dualstack", "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "alpha", + "beta", + "eu-1", + "eu-2", + "eu-3", + "us-1", + "us-2", + "us-3", + "apps", + "api", + "ext", + "gist", + "cns", + "eu", + "xen", "ac", "co", "ed", @@ -10160,15 +10749,60 @@ var nodeLabels = [...]string{ "tm", "blogspot", "blogspot", + "co", + "e4", + "realm", + "barsy", "blogspot", + "bplaced", "com", + "cosidns", + "dd-dns", + "ddnss", + "dnshome", + "dnsupdater", + "dray-dns", + "draydns", + "dyn-ip24", + "dyn-vpn", + "dynamisches-dns", + "dyndns1", + "dynvpn", + "firewall-gateway", "fuettertdasnetz", + "goip", + "home-webserver", + "internet-dns", "isteingeek", "istmein", + "keymachine", + "l-o-g-i-n", "lebtimnetz", "leitungsen", + "logoip", + "mein-vigor", + "my-gateway", + "my-router", + "my-vigor", + "my-wan", + "myhome-server", + "spdns", + "square7", + "syno-ds", + "synology-diskstation", + "synology-ds", + "taifun-dns", "traeumtgerade", + "dyn", + "dyn", + "dyndns", + "dyn", + "biz", "blogspot", + "co", + "firm", + "reg", + "store", "com", "edu", "gov", @@ -10231,6 +10865,7 @@ var nodeLabels = [...]string{ "nom", "org", "blogspot", + "compute", "biz", "com", "edu", @@ -10239,9 +10874,22 @@ var nodeLabels = [...]string{ "name", "net", "org", + "barsy", + "cloudns", + "diskstation", + "mycd", + "spdns", + "transurl", + "wellbeingzone", + "party", + "user", + "ybo", + "storj", "aland", "blogspot", + "dy", "iki", + "ptplus", "aeroport", "assedic", "asso", @@ -10251,8 +10899,13 @@ var nodeLabels = [...]string{ "cci", "chambagri", "chirurgiens-dentistes", + "chirurgiens-dentistes-en-france", "com", "experts-comptables", + "fbx-os", + "fbxos", + "freebox-os", + "freeboxos", "geometre-expert", "gouv", "greta", @@ -10260,6 +10913,7 @@ var nodeLabels = [...]string{ "medecin", "nom", "notaires", + "on-web", "pharmacien", "port", "prd", @@ -10274,6 +10928,7 @@ var nodeLabels = [...]string{ "org", "pvt", "co", + "cya", "net", "org", "com", @@ -10298,6 +10953,7 @@ var nodeLabels = [...]string{ "gov", "net", "org", + "cloud", "asso", "com", "edu", @@ -10353,6 +11009,7 @@ var nodeLabels = [...]string{ "mil", "net", "org", + "opencraft", "blogspot", "com", "from", @@ -10435,12 +11092,15 @@ var nodeLabels = [...]string{ "com", "net", "org", + "ro", "tt", "tv", "ltd", "plc", "ac", + "barsy", "blogspot", + "cloudns", "co", "edu", "firm", @@ -10454,21 +11114,47 @@ var nodeLabels = [...]string{ "res", "barrel-of-knowledge", "barrell-of-knowledge", + "cloudns", + "dvrcam", + "dynamic-dns", "dyndns", "for-our", "groks-the", "groks-this", "here-for-more", + "ilovecollege", "knowsitall", + "no-ip", + "nsupdate", "selfip", "webhop", "eu", + "backplaneapp", + "boxfuse", + "browsersafetymark", "com", + "dedyn", + "definima", + "drud", + "enonic", "github", + "gitlab", + "hasura-app", + "hzc", + "lair", "ngrok", "nid", - "pantheon", + "nodum", + "pantheonsite", + "protonet", "sandcats", + "shiftedit", + "spacekit", + "stolos", + "vaporcloud", + "wedeploy", + "customer", + "apps", "com", "edu", "gov", @@ -11789,11 +12475,8 @@ var nodeLabels = [...]string{ "aso", "choyo", "gyokuto", - "hitoyoshi", "kamiamakusa", - "kashima", "kikuchi", - "kosa", "kumamoto", "mashiki", "mifune", @@ -11878,7 +12561,6 @@ var nodeLabels = [...]string{ "kakuda", "kami", "kawasaki", - "kesennuma", "marumori", "matsushima", "minamisanriku", @@ -12739,6 +13421,8 @@ var nodeLabels = [...]string{ "sc", "seoul", "ulsan", + "co", + "edu", "com", "edu", "gov", @@ -12750,6 +13434,7 @@ var nodeLabels = [...]string{ "mil", "net", "org", + "bnr", "c", "com", "edu", @@ -12759,6 +13444,9 @@ var nodeLabels = [...]string{ "net", "org", "per", + "static", + "dev", + "sites", "com", "edu", "gov", @@ -12770,7 +13458,10 @@ var nodeLabels = [...]string{ "gov", "net", "org", + "oy", "blogspot", + "cyon", + "mypep", "ac", "assn", "com", @@ -12820,22 +13511,36 @@ var nodeLabels = [...]string{ "net", "org", "press", + "router", "asso", "tm", "blogspot", "ac", + "brasilia", + "c66", "co", + "daplie", + "ddns", "diskstation", + "dnsfor", "dscloud", "edu", + "filegear", "gov", + "hopto", "i234", "its", + "loginto", "myds", "net", + "noip", "org", "priv", "synology", + "webhop", + "wedeploy", + "yombo", + "localhost", "co", "com", "edu", @@ -13476,7 +14181,14 @@ var nodeLabels = [...]string{ "name", "net", "org", - "teledata", + "ac", + "adv", + "co", + "edu", + "gov", + "mil", + "net", + "org", "ca", "cc", "co", @@ -13499,10 +14211,15 @@ var nodeLabels = [...]string{ "forgot", "forgot", "asso", + "nom", + "alwaysdata", "at-band-camp", "azure-mobile", "azurewebsites", + "barsy", "blogdns", + "bounceme", + "bplaced", "broke-it", "buyshouses", "cdn77", @@ -13510,6 +14227,9 @@ var nodeLabels = [...]string{ "cloudapp", "cloudfront", "cloudfunctions", + "cryptonomic", + "ddns", + "definima", "dnsalias", "dnsdojo", "does-it", @@ -13517,9 +14237,14 @@ var nodeLabels = [...]string{ "dsmynas", "dynalias", "dynathome", + "dynv6", + "eating-organic", "endofinternet", "familyds", "fastly", + "fastlylb", + "feste-ip", + "firewall-gateway", "from-az", "from-co", "from-la", @@ -13534,25 +14259,49 @@ var nodeLabels = [...]string{ "hu", "in", "in-the-band", + "ipifony", "is-a-chef", "is-a-geek", "isa-geek", "jp", "kicks-ass", + "knx-server", + "moonscale", + "mydissent", + "myeffect", + "myfritz", + "mymediapc", + "mypsx", + "mysecuritycamera", + "nhlfan", + "no-ip", "office-on-the", + "pgafan", "podzone", + "privatizehealthinsurance", "rackmaze", + "redirectme", + "ru", "scrapper-site", "se", "selfip", "sells-it", "servebbs", + "serveblog", "serveftp", + "serveminecraft", + "square7", + "static-access", + "sytes", + "t3l3p0rt", "thruhere", + "twmail", "uk", "webhop", "za", "r", + "freetls", + "map", "prod", "ssl", "a", @@ -13560,6 +14309,8 @@ var nodeLabels = [...]string{ "a", "b", "global", + "map", + "alces", "arts", "com", "firm", @@ -13573,6 +14324,7 @@ var nodeLabels = [...]string{ "com", "edu", "gov", + "i", "mil", "mobi", "name", @@ -13597,6 +14349,8 @@ var nodeLabels = [...]string{ "blogspot", "bv", "co", + "transurl", + "virtueeldomein", "aa", "aarborte", "aejrie", @@ -14399,13 +15153,23 @@ var nodeLabels = [...]string{ "net", "org", "pro", + "homelink", + "barsy", "ae", + "amune", "blogdns", "blogsite", "bmoattachments", "boldlygoingnowhere", + "cable-modem", "cdn77", "cdn77-secure", + "certmgr", + "cloudns", + "collegefan", + "couchpotatofries", + "ddnss", + "diskstation", "dnsalias", "dnsdojo", "doesntexist", @@ -14420,15 +15184,20 @@ var nodeLabels = [...]string{ "endoftheinternet", "eu", "familyds", + "fedorainfracloud", + "fedorapeople", + "fedoraproject", "from-me", "game-host", "gotdns", + "hepforge", "hk", "hobby-site", "homedns", "homeftp", "homelinux", "homeunix", + "hopto", "is-a-bruinsfan", "is-a-candidate", "is-a-celticsfan", @@ -14447,19 +15216,40 @@ var nodeLabels = [...]string{ "is-very-nice", "is-very-sweet", "isa-geek", + "js", "kicks-ass", "misconfused", + "mlbfan", + "my-firewall", + "myfirewall", + "myftp", + "mysecuritycamera", + "nflfan", + "no-ip", + "pimienta", "podzone", + "poivron", + "potager", + "read-books", "readmyblog", "selfip", "sellsyourhome", "servebbs", "serveftp", "servegame", + "spdns", "stuff-4-sale", + "sweetpepper", + "tunk", + "tuxfamily", + "twmail", + "ufcfan", "us", "webhop", + "wmflabs", "za", + "zapto", + "tele", "c", "rsc", "origin", @@ -14521,6 +15311,8 @@ var nodeLabels = [...]string{ "tr", "uk", "us", + "cloud", + "nerdpol", "abo", "ac", "com", @@ -14532,6 +15324,7 @@ var nodeLabels = [...]string{ "nom", "org", "sld", + "ybo", "blogspot", "com", "edu", @@ -14573,6 +15366,7 @@ var nodeLabels = [...]string{ "auto", "babia-gora", "bedzin", + "beep", "beskidy", "bialowieza", "bialystok", @@ -14795,13 +15589,18 @@ var nodeLabels = [...]string{ "org", "pro", "prof", + "aaa", "aca", + "acct", + "avocat", "bar", + "cloudns", "cpa", "eng", "jur", "law", "med", + "recht", "com", "edu", "gov", @@ -14819,6 +15618,7 @@ var nodeLabels = [...]string{ "org", "publ", "belau", + "cloudns", "co", "ed", "go", @@ -14844,6 +15644,7 @@ var nodeLabels = [...]string{ "blogspot", "com", "nom", + "ybo", "arts", "blogspot", "com", @@ -14853,6 +15654,7 @@ var nodeLabels = [...]string{ "nt", "org", "rec", + "shop", "store", "tm", "www", @@ -14865,135 +15667,32 @@ var nodeLabels = [...]string{ "org", "ac", "adygeya", - "altai", - "amur", - "amursk", - "arkhangelsk", - "astrakhan", - "baikal", "bashkiria", - "belgorod", "bir", "blogspot", - "bryansk", - "buryatia", "cbg", - "chel", - "chelyabinsk", - "chita", - "chukotka", - "chuvashia", - "cmw", + "cldmail", "com", "dagestan", - "dudinka", - "e-burg", "edu", - "fareast", "gov", "grozny", "int", - "irkutsk", - "ivanovo", - "izhevsk", - "jamal", - "jar", - "joshkar-ola", - "k-uralsk", "kalmykia", - "kaluga", - "kamchatka", - "karelia", - "kazan", - "kchr", - "kemerovo", - "khabarovsk", - "khakassia", - "khv", - "kirov", - "kms", - "koenig", - "komi", - "kostroma", - "krasnoyarsk", - "kuban", - "kurgan", - "kursk", "kustanai", - "kuzbass", - "lipetsk", - "magadan", - "mari", - "mari-el", "marine", "mil", "mordovia", "msk", - "murmansk", "mytis", - "nakhodka", "nalchik", - "net", - "nkz", - "nnov", - "norilsk", "nov", - "novosibirsk", - "nsk", - "omsk", - "orenburg", - "org", - "oryol", - "oskol", - "palana", - "penza", - "perm", - "pp", - "ptz", "pyatigorsk", - "rnd", - "rubtsovsk", - "ryazan", - "sakhalin", - "samara", - "saratov", - "simbirsk", - "smolensk", - "snz", "spb", - "stavropol", - "stv", - "surgut", - "syzran", - "tambov", - "tatarstan", "test", - "tom", - "tomsk", - "tsaritsyn", - "tsk", - "tula", - "tuva", - "tver", - "tyumen", - "udm", - "udmurtia", - "ulan-ude", - "vdonsk", "vladikavkaz", "vladimir", - "vladivostok", - "volgograd", - "vologda", - "voronezh", - "vrn", - "vyatka", - "yakutia", - "yamal", - "yaroslavl", - "yekaterinburg", - "yuzhno-sakhalinsk", - "zgrad", + "hb", "ac", "co", "com", @@ -15021,6 +15720,7 @@ var nodeLabels = [...]string{ "gov", "net", "org", + "ybo", "com", "edu", "gov", @@ -15082,9 +15782,12 @@ var nodeLabels = [...]string{ "hashbang", "mil", "net", + "now", "org", "platform", "blogspot", + "cyon", + "platformsh", "blogspot", "com", "edu", @@ -15102,6 +15805,9 @@ var nodeLabels = [...]string{ "com", "net", "org", + "stackspace", + "uber", + "xs4all", "co", "com", "consulado", @@ -15114,38 +15820,59 @@ var nodeLabels = [...]string{ "principe", "saotome", "store", + "abkhazia", "adygeya", + "aktyubinsk", "arkhangelsk", + "armenia", + "ashgabad", + "azerbaijan", "balashov", "bashkiria", "bryansk", + "bukhara", + "chimkent", "dagestan", + "east-kazakhstan", + "exnet", + "georgia", "grozny", "ivanovo", + "jambyl", "kalmykia", "kaluga", + "karacol", + "karaganda", "karelia", "khakassia", "krasnodar", "kurgan", + "kustanai", "lenug", + "mangyshlak", "mordovia", "msk", "murmansk", "nalchik", + "navoi", + "north-kazakhstan", "nov", "obninsk", "penza", "pokrovsk", "sochi", "spb", + "tashkent", + "termez", "togliatti", "troitsk", + "tselinograd", "tula", "tuva", "vladikavkaz", "vladimir", "vologda", + "barsy", "com", "edu", "gob", @@ -15158,6 +15885,7 @@ var nodeLabels = [...]string{ "mil", "net", "org", + "knightpoint", "ac", "co", "org", @@ -15219,6 +15947,7 @@ var nodeLabels = [...]string{ "mil", "net", "org", + "vpnplus", "av", "bbs", "bel", @@ -15242,6 +15971,7 @@ var nodeLabels = [...]string{ "web", "blogspot", "gov", + "ybo", "aero", "biz", "co", @@ -15274,9 +16004,11 @@ var nodeLabels = [...]string{ "mil", "net", "org", + "url", "xn--czrw28b", "xn--uc0atv", "xn--zf0ao64a", + "mymailer", "ac", "co", "go", @@ -15290,6 +16022,7 @@ var nodeLabels = [...]string{ "sc", "tv", "biz", + "cc", "cherkassy", "cherkasy", "chernigov", @@ -15313,6 +16046,7 @@ var nodeLabels = [...]string{ "gov", "if", "in", + "inf", "ivano-frankivsk", "kh", "kharkiv", @@ -15330,6 +16064,7 @@ var nodeLabels = [...]string{ "kyiv", "lg", "lt", + "ltd", "lugansk", "lutsk", "lv", @@ -15389,6 +16124,9 @@ var nodeLabels = [...]string{ "police", "sch", "blogspot", + "no-ip", + "wellbeingzone", + "homeoffice", "service", "ak", "al", @@ -15396,14 +16134,17 @@ var nodeLabels = [...]string{ "as", "az", "ca", + "cloudns", "co", "ct", "dc", "de", "dni", + "drud", "fed", "fl", "ga", + "golffan", "gu", "hi", "ia", @@ -15431,6 +16172,7 @@ var nodeLabels = [...]string{ "nh", "nj", "nm", + "noip", "nsn", "nv", "ny", @@ -15438,6 +16180,7 @@ var nodeLabels = [...]string{ "ok", "or", "pa", + "pointto", "pr", "ri", "sc", @@ -15672,6 +16415,7 @@ var nodeLabels = [...]string{ "edu", "net", "org", + "advisor", "com", "dyndns", "edu", @@ -15685,6 +16429,13 @@ var nodeLabels = [...]string{ "xn--d1at", "xn--o1ac", "xn--o1ach", + "xn--12c1fe0br", + "xn--12cfi8ixb8l", + "xn--12co0c3b4eva", + "xn--h3cuzk1di", + "xn--m3ch0j3a", + "xn--o3cyx2a", + "fhapp", "ac", "agric", "alt", @@ -15703,4 +16454,21 @@ var nodeLabels = [...]string{ "tm", "web", "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "mil", + "net", + "org", + "sch", + "triton", + "ac", + "co", + "gov", + "mil", + "org", } diff --git a/fn/vendor/golang.org/x/net/route/address.go b/fn/vendor/golang.org/x/net/route/address.go new file mode 100644 index 000000000..e6bfa39e9 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/address.go @@ -0,0 +1,425 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "runtime" + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// A LinkAddr represents a link-layer address. +type LinkAddr struct { + Index int // interface index when attached + Name string // interface name when attached + Addr []byte // link-layer address when attached +} + +// Family implements the Family method of Addr interface. +func (a *LinkAddr) Family() int { return sysAF_LINK } + +func (a *LinkAddr) lenAndSpace() (int, int) { + l := 8 + len(a.Name) + len(a.Addr) + return l, roundup(l) +} + +func (a *LinkAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + nlen, alen := len(a.Name), len(a.Addr) + if nlen > 255 || alen > 255 { + return 0, errInvalidAddr + } + b[0] = byte(l) + b[1] = sysAF_LINK + if a.Index > 0 { + nativeEndian.PutUint16(b[2:4], uint16(a.Index)) + } + data := b[8:] + if nlen > 0 { + b[5] = byte(nlen) + copy(data[:nlen], a.Addr) + data = data[nlen:] + } + if alen > 0 { + b[6] = byte(alen) + copy(data[:alen], a.Name) + data = data[alen:] + } + return ll, nil +} + +func parseLinkAddr(b []byte) (Addr, error) { + if len(b) < 8 { + return nil, errInvalidAddr + } + _, a, err := parseKernelLinkAddr(sysAF_LINK, b[4:]) + if err != nil { + return nil, err + } + a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4])) + return a, nil +} + +// parseKernelLinkAddr parses b as a link-layer address in +// conventional BSD kernel form. +func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) { + // The encoding looks like the following: + // +----------------------------+ + // | Type (1 octet) | + // +----------------------------+ + // | Name length (1 octet) | + // +----------------------------+ + // | Address length (1 octet) | + // +----------------------------+ + // | Selector length (1 octet) | + // +----------------------------+ + // | Data (variable) | + // +----------------------------+ + // + // On some platforms, all-bit-one of length field means "don't + // care". + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + if nlen == 0xff { + nlen = 0 + } + if alen == 0xff { + alen = 0 + } + if slen == 0xff { + slen = 0 + } + l := 4 + nlen + alen + slen + if len(b) < l { + return 0, nil, errInvalidAddr + } + data := b[4:] + var name string + var addr []byte + if nlen > 0 { + name = string(data[:nlen]) + data = data[nlen:] + } + if alen > 0 { + addr = data[:alen] + data = data[alen:] + } + return l, &LinkAddr{Name: name, Addr: addr}, nil +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +func (a *Inet4Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet, roundup(sizeofSockaddrInet) +} + +func (a *Inet4Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET + copy(b[4:8], a.IP[:]) + return ll, nil +} + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +func (a *Inet6Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6) +} + +func (a *Inet6Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET6 + copy(b[8:24], a.IP[:]) + if a.ZoneID > 0 { + nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID)) + } + return ll, nil +} + +// parseInetAddr parses b as an internet address for IPv4 or IPv6. +func parseInetAddr(af int, b []byte) (Addr, error) { + switch af { + case sysAF_INET: + if len(b) < sizeofSockaddrInet { + return nil, errInvalidAddr + } + a := &Inet4Addr{} + copy(a.IP[:], b[4:8]) + return a, nil + case sysAF_INET6: + if len(b) < sizeofSockaddrInet6 { + return nil, errInvalidAddr + } + a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))} + copy(a.IP[:], b[8:24]) + if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) { + // KAME based IPv6 protocol stack usually + // embeds the interface index in the + // interface-local or link-local address as + // the kernel-internal form. + id := int(bigEndian.Uint16(a.IP[2:4])) + if id != 0 { + a.ZoneID = id + a.IP[2], a.IP[3] = 0, 0 + } + } + return a, nil + default: + return nil, errInvalidAddr + } +} + +// parseKernelInetAddr parses b as an internet address in conventional +// BSD kernel form. +func parseKernelInetAddr(af int, b []byte) (int, Addr, error) { + // The encoding looks similar to the NLRI encoding. + // +----------------------------+ + // | Length (1 octet) | + // +----------------------------+ + // | Address prefix (variable) | + // +----------------------------+ + // + // The differences between the kernel form and the NLRI + // encoding are: + // + // - The length field of the kernel form indicates the prefix + // length in bytes, not in bits + // + // - In the kernel form, zero value of the length field + // doesn't mean 0.0.0.0/0 or ::/0 + // + // - The kernel form appends leading bytes to the prefix field + // to make the tuple to be conformed with + // the routing message boundary + l := int(b[0]) + if runtime.GOOS == "darwin" { + // On Darwn, an address in the kernel form is also + // used as a message filler. + if l == 0 || len(b) > roundup(l) { + l = roundup(l) + } + } else { + l = roundup(l) + } + if len(b) < l { + return 0, nil, errInvalidAddr + } + // Don't reorder case expressions. + // The case expressions for IPv6 must come first. + const ( + off4 = 4 // offset of in_addr + off6 = 8 // offset of in6_addr + ) + switch { + case b[0] == sizeofSockaddrInet6: + a := &Inet6Addr{} + copy(a.IP[:], b[off6:off6+16]) + return int(b[0]), a, nil + case af == sysAF_INET6: + a := &Inet6Addr{} + if l-1 < off6 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off6:l]) + } + return int(b[0]), a, nil + case b[0] == sizeofSockaddrInet: + a := &Inet4Addr{} + copy(a.IP[:], b[off4:off4+4]) + return int(b[0]), a, nil + default: // an old fashion, AF_UNSPEC or unknown means AF_INET + a := &Inet4Addr{} + if l-1 < off4 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off4:l]) + } + return int(b[0]), a, nil + } +} + +// A DefaultAddr represents an address of various operating +// system-specific features. +type DefaultAddr struct { + af int + Raw []byte // raw format of address +} + +// Family implements the Family method of Addr interface. +func (a *DefaultAddr) Family() int { return a.af } + +func (a *DefaultAddr) lenAndSpace() (int, int) { + l := len(a.Raw) + return l, roundup(l) +} + +func (a *DefaultAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + if l > 255 { + return 0, errInvalidAddr + } + b[1] = byte(l) + copy(b[:l], a.Raw) + return ll, nil +} + +func parseDefaultAddr(b []byte) (Addr, error) { + if len(b) < 2 || len(b) < int(b[0]) { + return nil, errInvalidAddr + } + a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]} + return a, nil +} + +func addrsSpace(as []Addr) int { + var l int + for _, a := range as { + switch a := a.(type) { + case *LinkAddr: + _, ll := a.lenAndSpace() + l += ll + case *Inet4Addr: + _, ll := a.lenAndSpace() + l += ll + case *Inet6Addr: + _, ll := a.lenAndSpace() + l += ll + case *DefaultAddr: + _, ll := a.lenAndSpace() + l += ll + } + } + return l +} + +// marshalAddrs marshals as and returns a bitmap indicating which +// address is stored in b. +func marshalAddrs(b []byte, as []Addr) (uint, error) { + var attrs uint + for i, a := range as { + switch a := a.(type) { + case *LinkAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet4Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet6Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *DefaultAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + } + } + return attrs, nil +} + +func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) { + var as [sysRTAX_MAX]Addr + af := int(sysAF_UNSPEC) + for i := uint(0); i < sysRTAX_MAX && len(b) >= roundup(0); i++ { + if attrs&(1<> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} diff --git a/fn/vendor/golang.org/x/net/route/defs_darwin.go b/fn/vendor/golang.org/x/net/route/defs_darwin.go new file mode 100644 index 000000000..e7716442d --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/defs_darwin.go @@ -0,0 +1,114 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STAT = C.NET_RT_STAT + sysNET_RT_TRASH = C.NET_RT_TRASH + sysNET_RT_IFLIST2 = C.NET_RT_IFLIST2 + sysNET_RT_DUMP2 = C.NET_RT_DUMP2 + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFINFO2 = C.RTM_IFINFO2 + sysRTM_NEWMADDR2 = C.RTM_NEWMADDR2 + sysRTM_GET2 = C.RTM_GET2 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDarwin15 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDarwin15 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDarwin15 = C.sizeof_struct_ifma_msghdr + sizeofIfMsghdr2Darwin15 = C.sizeof_struct_if_msghdr2 + sizeofIfmaMsghdr2Darwin15 = C.sizeof_struct_ifma_msghdr2 + sizeofIfDataDarwin15 = C.sizeof_struct_if_data + sizeofIfData64Darwin15 = C.sizeof_struct_if_data64 + + sizeofRtMsghdrDarwin15 = C.sizeof_struct_rt_msghdr + sizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2 + sizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/route/defs_dragonfly.go b/fn/vendor/golang.org/x/net/route/defs_dragonfly.go new file mode 100644 index 000000000..dd31de269 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/defs_dragonfly.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B + sysCTL_LWKT = C.CTL_LWKT + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_MPLS1 = C.RTA_MPLS1 + sysRTA_MPLS2 = C.RTA_MPLS2 + sysRTA_MPLS3 = C.RTA_MPLS3 + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MPLS1 = C.RTAX_MPLS1 + sysRTAX_MPLS2 = C.RTAX_MPLS2 + sysRTAX_MPLS3 = C.RTAX_MPLS3 + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrDragonFlyBSD4 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/route/defs_freebsd.go b/fn/vendor/golang.org/x/net/route/defs_freebsd.go new file mode 100644 index 000000000..d95594d8e --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/defs_freebsd.go @@ -0,0 +1,337 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include + +struct if_data_freebsd7 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd8 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd9 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd10 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_vhid; + u_char ifi_baudrate_pf; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + uint64_t ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd11 { + uint8_t ifi_type; + uint8_t ifi_physical; + uint8_t ifi_addrlen; + uint8_t ifi_hdrlen; + uint8_t ifi_link_state; + uint8_t ifi_vhid; + uint16_t ifi_datalen; + uint32_t ifi_mtu; + uint32_t ifi_metric; + uint64_t ifi_baudrate; + uint64_t ifi_ipackets; + uint64_t ifi_ierrors; + uint64_t ifi_opackets; + uint64_t ifi_oerrors; + uint64_t ifi_collisions; + uint64_t ifi_ibytes; + uint64_t ifi_obytes; + uint64_t ifi_imcasts; + uint64_t ifi_omcasts; + uint64_t ifi_iqdrops; + uint64_t ifi_oqdrops; + uint64_t ifi_noproto; + uint64_t ifi_hwassist; + union { + time_t tt; + uint64_t ph; + } __ifi_epoch; + union { + struct timeval tv; + struct { + uint64_t ph1; + uint64_t ph2; + } ph; + } __ifi_lastchange; +}; + +struct if_msghdr_freebsd7 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd7 ifm_data; +}; + +struct if_msghdr_freebsd8 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd8 ifm_data; +}; + +struct if_msghdr_freebsd9 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd9 ifm_data; +}; + +struct if_msghdr_freebsd10 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd10 ifm_data; +}; + +struct if_msghdr_freebsd11 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd11 ifm_data; +}; +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_IFMALIST = C.NET_RT_IFMALIST + sysNET_RT_IFLISTL = C.NET_RT_IFLISTL +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrlFreeBSD10 = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10 = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10 = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10 = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7 = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8 = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9 = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10 = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11 = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7 = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8 = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9 = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10 = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11 = C.sizeof_struct_if_data_freebsd11 + + sizeofIfMsghdrlFreeBSD10Emu = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10Emu = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10Emu = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10Emu = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10Emu = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10Emu = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10Emu = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7Emu = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8Emu = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9Emu = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10Emu = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11Emu = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7Emu = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8Emu = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9Emu = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11 + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/route/defs_netbsd.go b/fn/vendor/golang.org/x/net/route/defs_netbsd.go new file mode 100644 index 000000000..b0abd549a --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/defs_netbsd.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_DDB = C.CTL_DDB + sysCTL_PROC = C.CTL_PROC + sysCTL_VENDOR = C.CTL_VENDOR + sysCTL_EMUL = C.CTL_EMUL + sysCTL_SECURITY = C.CTL_SECURITY + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + sysRTM_SETGATE = C.RTM_SETGATE + sysRTM_LLINFO_UPD = C.RTM_LLINFO_UPD + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_CHGADDR = C.RTM_CHGADDR + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_TAG = C.RTA_TAG + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_TAG = C.RTAX_TAG + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrNetBSD7 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrNetBSD7 = C.sizeof_struct_ifa_msghdr + sizeofIfAnnouncemsghdrNetBSD7 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/route/defs_openbsd.go b/fn/vendor/golang.org/x/net/route/defs_openbsd.go new file mode 100644 index 000000000..0f66d3619 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/defs_openbsd.go @@ -0,0 +1,105 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STATS = C.NET_RT_STATS + sysNET_RT_TABLE = C.NET_RT_TABLE + sysNET_RT_IFNAMES = C.NET_RT_IFNAMES + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_FS = C.CTL_FS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_DDB = C.CTL_DDB + sysCTL_VFS = C.CTL_VFS + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_DESYNC = C.RTM_DESYNC + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_SRC = C.RTA_SRC + sysRTA_SRCMASK = C.RTA_SRCMASK + sysRTA_LABEL = C.RTA_LABEL + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_SRC = C.RTAX_SRC + sysRTAX_SRCMASK = C.RTAX_SRCMASK + sysRTAX_LABEL = C.RTAX_LABEL + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofRtMsghdr = C.sizeof_struct_rt_msghdr + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/fn/vendor/golang.org/x/net/route/interface.go b/fn/vendor/golang.org/x/net/route/interface.go new file mode 100644 index 000000000..854906d9c --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/interface.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// An InterfaceMessage represents an interface message. +type InterfaceMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Name string // interface name + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// An InterfaceAddrMessage represents an interface address message. +type InterfaceAddrMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAddrMessage) Sys() []Sys { return nil } + +// An InterfaceMulticastAddrMessage represents an interface multicast +// address message. +type InterfaceMulticastAddrMessage struct { + Version int // message version + Type int // messsage type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMulticastAddrMessage) Sys() []Sys { return nil } + +// An InterfaceAnnounceMessage represents an interface announcement +// message. +type InterfaceAnnounceMessage struct { + Version int // message version + Type int // message type + Index int // interface index + Name string // interface name + What int // what type of announcement + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAnnounceMessage) Sys() []Sys { return nil } diff --git a/fn/vendor/golang.org/x/net/route/interface_announce.go b/fn/vendor/golang.org/x/net/route/interface_announce.go new file mode 100644 index 000000000..520d657b5 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/interface_announce.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd netbsd + +package route + +func (w *wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[4:6])), + What: int(nativeEndian.Uint16(b[22:24])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[6+i] != 0 { + continue + } + m.Name = string(b[6 : 6+i]) + break + } + return m, nil +} diff --git a/fn/vendor/golang.org/x/net/route/interface_classic.go b/fn/vendor/golang.org/x/net/route/interface_classic.go new file mode 100644 index 000000000..ac4e7a680 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/interface_classic.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly netbsd + +package route + +import "runtime" + +func (w *wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Addrs: make([]Addr, sysRTAX_MAX), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + extOff: w.extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[w.bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + raw: b[:l], + } + if runtime.GOOS == "netbsd" { + m.Index = int(nativeEndian.Uint16(b[16:18])) + } else { + m.Index = int(nativeEndian.Uint16(b[12:14])) + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/fn/vendor/golang.org/x/net/route/interface_freebsd.go b/fn/vendor/golang.org/x/net/route/interface_freebsd.go new file mode 100644 index 000000000..9f6f50c00 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/interface_freebsd.go @@ -0,0 +1,78 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (w *wireFormat) parseInterfaceMessage(typ RIBType, b []byte) (Message, error) { + var extOff, bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 20 { + return nil, errMessageTooShort + } + extOff = int(nativeEndian.Uint16(b[18:20])) + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + extOff = w.extOff + bodyOff = w.bodyOff + } + if len(b) < extOff || len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + Addrs: make([]Addr, sysRTAX_MAX), + extOff: extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(typ RIBType, b []byte) (Message, error) { + var bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 24 { + return nil, errMessageTooShort + } + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + bodyOff = w.bodyOff + } + if len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/fn/vendor/golang.org/x/net/route/interface_multicast.go b/fn/vendor/golang.org/x/net/route/interface_multicast.go new file mode 100644 index 000000000..1e99a9cc6 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/interface_multicast.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd + +package route + +func (w *wireFormat) parseInterfaceMulticastAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceMulticastAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/fn/vendor/golang.org/x/net/route/interface_openbsd.go b/fn/vendor/golang.org/x/net/route/interface_openbsd.go new file mode 100644 index 000000000..e4a143c1c --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/interface_openbsd.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (*wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 32 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[12:16])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + Addrs: make([]Addr, sysRTAX_MAX), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + a, err := parseLinkAddr(b[ll:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (*wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 24 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + bodyOff := int(nativeEndian.Uint16(b[4:6])) + if len(b) < bodyOff { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[12:16])), + Index: int(nativeEndian.Uint16(b[6:8])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} + +func (*wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 26 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[6:8])), + What: int(nativeEndian.Uint16(b[8:10])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[10+i] != 0 { + continue + } + m.Name = string(b[10 : 10+i]) + break + } + return m, nil +} diff --git a/fn/vendor/golang.org/x/net/route/message.go b/fn/vendor/golang.org/x/net/route/message.go new file mode 100644 index 000000000..0fa7e09f4 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/message.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// A Message represents a routing message. +type Message interface { + // Sys returns operating system-specific information. + Sys() []Sys +} + +// A Sys reprensents operating system-specific information. +type Sys interface { + // SysType returns a type of operating system-specific + // information. + SysType() SysType +} + +// A SysType represents a type of operating system-specific +// information. +type SysType int + +const ( + SysMetrics SysType = iota + SysStats +) + +// ParseRIB parses b as a routing information base and returns a list +// of routing messages. +func ParseRIB(typ RIBType, b []byte) ([]Message, error) { + if !typ.parseable() { + return nil, errUnsupportedMessage + } + var msgs []Message + nmsgs, nskips := 0, 0 + for len(b) > 4 { + nmsgs++ + l := int(nativeEndian.Uint16(b[:2])) + if l == 0 { + return nil, errInvalidMessage + } + if len(b) < l { + return nil, errMessageTooShort + } + if b[2] != sysRTM_VERSION { + b = b[l:] + continue + } + if w, ok := wireFormats[int(b[3])]; !ok { + nskips++ + } else { + m, err := w.parse(typ, b) + if err != nil { + return nil, err + } + if m == nil { + nskips++ + } else { + msgs = append(msgs, m) + } + } + b = b[l:] + } + // We failed to parse any of the messages - version mismatch? + if nmsgs != len(msgs)+nskips { + return nil, errMessageMismatch + } + return msgs, nil +} diff --git a/fn/vendor/golang.org/x/net/route/message_darwin_test.go b/fn/vendor/golang.org/x/net/route/message_darwin_test.go new file mode 100644 index 000000000..316aa7507 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/message_darwin_test.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "testing" + +func TestFetchAndParseRIBOnDarwin(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_FLAGS, sysNET_RT_DUMP2, sysNET_RT_IFLIST2} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } +} diff --git a/fn/vendor/golang.org/x/net/route/message_freebsd_test.go b/fn/vendor/golang.org/x/net/route/message_freebsd_test.go new file mode 100644 index 000000000..db4b56752 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/message_freebsd_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "testing" + "unsafe" +) + +func TestFetchAndParseRIBOnFreeBSD(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_IFMALIST} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } +} + +func TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) { + if _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil { + t.Skip("NET_RT_IFLISTL not supported") + } + var p uintptr + if kernelAlign != int(unsafe.Sizeof(p)) { + t.Skip("NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64") + } + + var tests = [2]struct { + typ RIBType + b []byte + msgs []Message + ss []string + }{ + {typ: sysNET_RT_IFLIST}, + {typ: sysNET_RT_IFLISTL}, + } + for i := range tests { + var lastErr error + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, tests[i].typ) + if err != nil { + lastErr = err + continue + } + tests[i].msgs = append(tests[i].msgs, rs...) + } + if len(tests[i].msgs) == 0 && lastErr != nil { + t.Error(tests[i].typ, lastErr) + continue + } + tests[i].ss, lastErr = msgs(tests[i].msgs).validate() + if lastErr != nil { + t.Error(tests[i].typ, lastErr) + continue + } + for _, s := range tests[i].ss { + t.Log(s) + } + } + for i := len(tests) - 1; i > 0; i-- { + if len(tests[i].ss) != len(tests[i-1].ss) { + t.Errorf("got %v; want %v", tests[i].ss, tests[i-1].ss) + continue + } + for j, s1 := range tests[i].ss { + s0 := tests[i-1].ss[j] + if s1 != s0 { + t.Errorf("got %s; want %s", s1, s0) + } + } + } +} diff --git a/fn/vendor/golang.org/x/net/route/message_test.go b/fn/vendor/golang.org/x/net/route/message_test.go new file mode 100644 index 000000000..e848dabf4 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/message_test.go @@ -0,0 +1,239 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "os" + "syscall" + "testing" + "time" +) + +func TestFetchAndParseRIB(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_DUMP, sysNET_RT_IFLIST} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(typ, s) + } + } +} + +var ( + rtmonSock int + rtmonErr error +) + +func init() { + // We need to keep rtmonSock alive to avoid treading on + // recycled socket descriptors. + rtmonSock, rtmonErr = syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) +} + +// TestMonitorAndParseRIB leaks a worker goroutine and a socket +// descriptor but that's intentional. +func TestMonitorAndParseRIB(t *testing.T) { + if testing.Short() || os.Getuid() != 0 { + t.Skip("must be root") + } + + if rtmonErr != nil { + t.Fatal(rtmonErr) + } + + // We suppose that using an IPv4 link-local address and the + // dot1Q ID for Token Ring and FDDI doesn't harm anyone. + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(1002); err != nil { + t.Skip(err) + } + if err := pv.setup(); err != nil { + t.Skip(err) + } + pv.teardown() + + go func() { + b := make([]byte, os.Getpagesize()) + for { + // There's no easy way to unblock this read + // call because the routing message exchange + // over routing socket is a connectionless + // message-oriented protocol, no control plane + // for signaling connectivity, and we cannot + // use the net package of standard library due + // to the lack of support for routing socket + // and circular dependency. + n, err := syscall.Read(rtmonSock, b) + if err != nil { + return + } + ms, err := ParseRIB(0, b[:n]) + if err != nil { + t.Error(err) + return + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(err) + return + } + for _, s := range ss { + t.Log(s) + } + } + }() + + for _, vid := range []int{1002, 1003, 1004, 1005} { + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(vid); err != nil { + t.Fatal(err) + } + if err := pv.setup(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + if err := pv.teardown(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + } +} + +func TestParseRIBWithFuzz(t *testing.T) { + for _, fuzz := range []string{ + "0\x00\x05\x050000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "0000000000000\x02000000" + + "00000000", + "\x02\x00\x05\f0000000000000000" + + "0\x0200000000000000", + "\x02\x00\x05\x100000000000000\x1200" + + "0\x00\xff\x00", + "\x02\x00\x05\f0000000000000000" + + "0\x12000\x00\x02\x0000", + "\x00\x00\x00\x01\x00", + "00000", + } { + for typ := RIBType(0); typ < 256; typ++ { + ParseRIB(typ, []byte(fuzz)) + } + } +} + +func TestRouteMessage(t *testing.T) { + s, err := syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) + if err != nil { + t.Fatal(err) + } + defer syscall.Close(s) + + var ms []RouteMessage + for _, af := range []int{sysAF_INET, sysAF_INET6} { + if _, err := fetchAndParseRIB(af, sysNET_RT_DUMP); err != nil { + t.Log(err) + continue + } + switch af { + case sysAF_INET: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet4Addr{}, + nil, + &Inet4Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + }, + }, + }...) + case sysAF_INET6: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet6Addr{}, + nil, + &Inet6Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + }, + }, + }...) + } + } + for i, m := range ms { + m.ID = uintptr(os.Getpid()) + m.Seq = i + 1 + wb, err := m.Marshal() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + if _, err := syscall.Write(s, wb); err != nil { + t.Fatalf("%v: %v", m, err) + } + rb := make([]byte, os.Getpagesize()) + n, err := syscall.Read(s, rb) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + rms, err := ParseRIB(0, rb[:n]) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, rm := range rms { + err := rm.(*RouteMessage).Err + if err != nil { + t.Errorf("%v: %v", m, err) + } + } + ss, err := msgs(rms).validate() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, s := range ss { + t.Log(s) + } + } +} diff --git a/fn/vendor/golang.org/x/net/route/route.go b/fn/vendor/golang.org/x/net/route/route.go new file mode 100644 index 000000000..081da0d5c --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/route.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +// Package route provides basic functions for the manipulation of +// packet routing facilities on BSD variants. +// +// The package supports any version of Darwin, any version of +// DragonFly BSD, FreeBSD 7 through 11, NetBSD 6 and above, and +// OpenBSD 5.6 and above. +package route + +import ( + "errors" + "os" + "syscall" +) + +var ( + errUnsupportedMessage = errors.New("unsupported message") + errMessageMismatch = errors.New("message mismatch") + errMessageTooShort = errors.New("message too short") + errInvalidMessage = errors.New("invalid message") + errInvalidAddr = errors.New("invalid address") + errShortBuffer = errors.New("short buffer") +) + +// A RouteMessage represents a message conveying an address prefix, a +// nexthop address and an output interface. +// +// Unlike other messages, this message can be used to query adjacency +// information for the given address prefix, to add a new route, and +// to delete or modify the existing route from the routing information +// base inside the kernel by writing and reading route messages on a +// routing socket. +// +// For the manipulation of routing information, the route message must +// contain appropriate fields that include: +// +// Version = +// Type = +// Flags = +// Index = +// ID = +// Seq = +// Addrs = +// +// The Type field specifies a type of manipulation, the Flags field +// specifies a class of target information and the Addrs field +// specifies target information like the following: +// +// route.RouteMessage{ +// Version: RTM_VERSION, +// Type: RTM_GET, +// Flags: RTF_UP | RTF_HOST, +// ID: uintptr(os.Getpid()), +// Seq: 1, +// Addrs: []route.Addrs{ +// RTAX_DST: &route.Inet4Addr{ ... }, +// RTAX_IFP: &route.LinkAddr{ ... }, +// RTAX_BRD: &route.Inet4Addr{ ... }, +// }, +// } +// +// The values for the above fields depend on the implementation of +// each operating system. +// +// The Err field on a response message contains an error value on the +// requested operation. If non-nil, the requested operation is failed. +type RouteMessage struct { + Version int // message version + Type int // message type + Flags int // route flags + Index int // interface index when atatched + ID uintptr // sender's identifier; usually process ID + Seq int // sequence number + Err error // error on requested operation + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// Marshal returns the binary encoding of m. +func (m *RouteMessage) Marshal() ([]byte, error) { + return m.marshal() +} + +// A RIBType reprensents a type of routing information base. +type RIBType int + +const ( + RIBTypeRoute RIBType = syscall.NET_RT_DUMP + RIBTypeInterface RIBType = syscall.NET_RT_IFLIST +) + +// FetchRIB fetches a routing information base from the operating +// system. +// +// The provided af must be an address family. +// +// The provided arg must be a RIBType-specific argument. +// When RIBType is related to routes, arg might be a set of route +// flags. When RIBType is related to network interfaces, arg might be +// an interface index or a set of interface flags. In most cases, zero +// means a wildcard. +func FetchRIB(af int, typ RIBType, arg int) ([]byte, error) { + mib := [6]int32{sysCTL_NET, sysAF_ROUTE, 0, int32(af), int32(typ), int32(arg)} + n := uintptr(0) + if err := sysctl(mib[:], nil, &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + if n == 0 { + return nil, nil + } + b := make([]byte, n) + if err := sysctl(mib[:], &b[0], &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + return b[:n], nil +} diff --git a/fn/vendor/golang.org/x/net/route/route_classic.go b/fn/vendor/golang.org/x/net/route/route_classic.go new file mode 100644 index 000000000..61b2bb4ad --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/route_classic.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package route + +import "syscall" + +func (m *RouteMessage) marshal() ([]byte, error) { + w, ok := wireFormats[m.Type] + if !ok { + return nil, errUnsupportedMessage + } + l := w.bodyOff + addrsSpace(m.Addrs) + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint32(b[8:12], uint32(m.Flags)) + nativeEndian.PutUint16(b[4:6], uint16(m.Index)) + nativeEndian.PutUint32(b[16:20], uint32(m.ID)) + nativeEndian.PutUint32(b[20:24], uint32(m.Seq)) + attrs, err := marshalAddrs(b[w.bodyOff:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + +func (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[4:6])), + ID: uintptr(nativeEndian.Uint32(b[16:20])), + Seq: int(nativeEndian.Uint32(b[20:24])), + extOff: w.extOff, + raw: b[:l], + } + errno := syscall.Errno(nativeEndian.Uint32(b[28:32])) + if errno != 0 { + m.Err = errno + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/fn/vendor/golang.org/x/net/route/route_openbsd.go b/fn/vendor/golang.org/x/net/route/route_openbsd.go new file mode 100644 index 000000000..daf2e90c4 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/route_openbsd.go @@ -0,0 +1,65 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "syscall" + +func (m *RouteMessage) marshal() ([]byte, error) { + l := sizeofRtMsghdr + addrsSpace(m.Addrs) + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint16(b[4:6], uint16(sizeofRtMsghdr)) + nativeEndian.PutUint32(b[16:20], uint32(m.Flags)) + nativeEndian.PutUint16(b[6:8], uint16(m.Index)) + nativeEndian.PutUint32(b[24:28], uint32(m.ID)) + nativeEndian.PutUint32(b[28:32], uint32(m.Seq)) + attrs, err := marshalAddrs(b[sizeofRtMsghdr:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + +func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < sizeofRtMsghdr { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + ID: uintptr(nativeEndian.Uint32(b[24:28])), + Seq: int(nativeEndian.Uint32(b[28:32])), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + errno := syscall.Errno(nativeEndian.Uint32(b[32:36])) + if errno != 0 { + m.Err = errno + } + as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:]) + if err != nil { + return nil, err + } + m.Addrs = as + return m, nil +} diff --git a/fn/vendor/golang.org/x/net/route/route_test.go b/fn/vendor/golang.org/x/net/route/route_test.go new file mode 100644 index 000000000..63fd8c561 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/route_test.go @@ -0,0 +1,386 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "fmt" + "os/exec" + "runtime" + "time" +) + +func (m *RouteMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[12:16]))) +} + +func (m *InterfaceMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceAddrMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceMulticastAddrMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[4:8]))) +} + +func (m *InterfaceAnnounceMessage) String() string { + what := "" + switch m.What { + case 0: + what = "arrival" + case 1: + what = "departure" + } + return fmt.Sprintf("(%d %s %s)", m.Index, m.Name, what) +} + +func (m *InterfaceMetrics) String() string { + return fmt.Sprintf("(type=%d mtu=%d)", m.Type, m.MTU) +} + +func (m *RouteMetrics) String() string { + return fmt.Sprintf("(pmtu=%d)", m.PathMTU) +} + +type addrAttrs uint + +var addrAttrNames = [...]string{ + "dst", + "gateway", + "netmask", + "genmask", + "ifp", + "ifa", + "author", + "brd", + "df:mpls1-n:tag-o:src", // mpls1 for dragonfly, tag for netbsd, src for openbsd + "df:mpls2-o:srcmask", // mpls2 for dragonfly, srcmask for openbsd + "df:mpls3-o:label", // mpls3 for dragonfly, label for openbsd +} + +func (attrs addrAttrs) String() string { + var s string + for i, name := range addrAttrNames { + if attrs&(1<" + } + return s +} + +type msgs []Message + +func (ms msgs) validate() ([]string, error) { + var ss []string + for _, m := range ms { + switch m := m.(type) { + case *RouteMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[12:16]))); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceAddrMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceMulticastAddrMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[4:8]))); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceAnnounceMessage: + ss = append(ss, m.String()) + default: + ss = append(ss, fmt.Sprintf("%+v", m)) + } + } + return ss, nil +} + +type syss []Sys + +func (sys syss) String() string { + var s string + for _, sy := range sys { + switch sy := sy.(type) { + case *InterfaceMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + case *RouteMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + } + } + return s +} + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_LINK: + return "link" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *LinkAddr) String() string { + name := a.Name + if name == "" { + name = "" + } + lla := llAddr(a.Addr).String() + if lla == "" { + lla = "" + } + return fmt.Sprintf("(%v %d %s %s)", addrFamily(a.Family()), a.Index, name, lla) +} + +func (a *Inet4Addr) String() string { + return fmt.Sprintf("(%v %v)", addrFamily(a.Family()), ipAddr(a.IP[:])) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%v %v %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.ZoneID) +} + +func (a *DefaultAddr) String() string { + return fmt.Sprintf("(%v %s)", addrFamily(a.Family()), ipAddr(a.Raw[2:]).String()) +} + +type addrs []Addr + +func (as addrs) String() string { + var s string + for _, a := range as { + if a == nil { + continue + } + if len(s) > 0 { + s += " " + } + switch a := a.(type) { + case *LinkAddr: + s += a.String() + case *Inet4Addr: + s += a.String() + case *Inet6Addr: + s += a.String() + case *DefaultAddr: + s += a.String() + } + } + if s == "" { + return "" + } + return s +} + +func (as addrs) match(attrs addrAttrs) error { + var ts addrAttrs + af := sysAF_UNSPEC + for i := range as { + if as[i] != nil { + ts |= 1 << uint(i) + } + switch as[i].(type) { + case *Inet4Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET + } + if af != sysAF_INET { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + case *Inet6Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET6 + } + if af != sysAF_INET6 { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + } + } + if ts != attrs && ts > attrs { + return fmt.Errorf("%v not included in %v", ts, attrs) + } + return nil +} + +func fetchAndParseRIB(af int, typ RIBType) ([]Message, error) { + var err error + var b []byte + for i := 0; i < 3; i++ { + if b, err = FetchRIB(af, typ, 0); err != nil { + time.Sleep(10 * time.Millisecond) + continue + } + break + } + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + ms, err := ParseRIB(typ, b) + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + return ms, nil +} + +// propVirtual is a proprietary virtual network interface. +type propVirtual struct { + name string + addr, mask string + setupCmds []*exec.Cmd + teardownCmds []*exec.Cmd +} + +func (pv *propVirtual) setup() error { + for _, cmd := range pv.setupCmds { + if err := cmd.Run(); err != nil { + pv.teardown() + return err + } + } + return nil +} + +func (pv *propVirtual) teardown() error { + for _, cmd := range pv.teardownCmds { + if err := cmd.Run(); err != nil { + return err + } + } + return nil +} + +func (pv *propVirtual) configure(suffix int) error { + if runtime.GOOS == "openbsd" { + pv.name = fmt.Sprintf("vether%d", suffix) + } else { + pv.name = fmt.Sprintf("vlan%d", suffix) + } + xname, err := exec.LookPath("ifconfig") + if err != nil { + return err + } + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "create"}, + }) + if runtime.GOOS == "netbsd" { + // NetBSD requires an underlying dot1Q-capable network + // interface. + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "vlan", fmt.Sprintf("%d", suffix&0xfff), "vlanif", "wm0"}, + }) + } + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "inet", pv.addr, "netmask", pv.mask}, + }) + pv.teardownCmds = append(pv.teardownCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "destroy"}, + }) + return nil +} diff --git a/fn/vendor/golang.org/x/net/route/sys.go b/fn/vendor/golang.org/x/net/route/sys.go new file mode 100644 index 000000000..3d0ee9b14 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/sys.go @@ -0,0 +1,39 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "unsafe" + +var ( + nativeEndian binaryByteOrder + kernelAlign int + wireFormats map[int]*wireFormat +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } + kernelAlign, wireFormats = probeRoutingStack() +} + +func roundup(l int) int { + if l == 0 { + return kernelAlign + } + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} + +type wireFormat struct { + extOff int // offset of header extension + bodyOff int // offset of message body + parse func(RIBType, []byte) (Message, error) +} diff --git a/fn/vendor/golang.org/x/net/route/sys_darwin.go b/fn/vendor/golang.org/x/net/route/sys_darwin.go new file mode 100644 index 000000000..e742c919d --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/sys_darwin.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STAT, sysNET_RT_TRASH: + return false + default: + return true + } +} + +// A RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// A InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + rtm := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdrDarwin15} + rtm.parse = rtm.parseRouteMessage + rtm2 := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdr2Darwin15} + rtm2.parse = rtm2.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDarwin15} + ifm.parse = ifm.parseInterfaceMessage + ifm2 := &wireFormat{extOff: 32, bodyOff: sizeofIfMsghdr2Darwin15} + ifm2.parse = ifm2.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrDarwin15, bodyOff: sizeofIfaMsghdrDarwin15} + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDarwin15, bodyOff: sizeofIfmaMsghdrDarwin15} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15} + ifmam2.parse = ifmam2.parseInterfaceMulticastAddrMessage + // Darwin kernels require 32-bit aligned access to routing facilities. + return 4, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFINFO2: ifm2, + sysRTM_NEWMADDR2: ifmam2, + sysRTM_GET2: rtm2, + } +} diff --git a/fn/vendor/golang.org/x/net/route/sys_dragonfly.go b/fn/vendor/golang.org/x/net/route/sys_dragonfly.go new file mode 100644 index 000000000..b175cb18c --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/sys_dragonfly.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { return true } + +// A RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// A InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrDragonFlyBSD4} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDragonFlyBSD4} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrDragonFlyBSD4, bodyOff: sizeofIfaMsghdrDragonFlyBSD4} + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDragonFlyBSD4, bodyOff: sizeofIfmaMsghdrDragonFlyBSD4} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrDragonFlyBSD4, bodyOff: sizeofIfAnnouncemsghdrDragonFlyBSD4} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/fn/vendor/golang.org/x/net/route/sys_freebsd.go b/fn/vendor/golang.org/x/net/route/sys_freebsd.go new file mode 100644 index 000000000..010d4ae78 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/sys_freebsd.go @@ -0,0 +1,155 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "syscall" + "unsafe" +) + +func (typ RIBType) parseable() bool { return true } + +// A RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + if kernelAlign == 8 { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } + } + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// A InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + wordSize := int(unsafe.Sizeof(p)) + align := int(unsafe.Sizeof(p)) + // In the case of kern.supported_archs="amd64 i386", we need + // to know the underlying kernel's architecture because the + // alignment for routing facilities are set at the build time + // of the kernel. + conf, _ := syscall.Sysctl("kern.conftxt") + for i, j := 0, 0; j < len(conf); j++ { + if conf[j] != '\n' { + continue + } + s := conf[i:j] + i = j + 1 + if len(s) > len("machine") && s[:len("machine")] == "machine" { + s = s[len("machine"):] + for k := 0; k < len(s); k++ { + if s[k] == ' ' || s[k] == '\t' { + s = s[1:] + } + break + } + if s == "amd64" { + align = 8 + } + break + } + } + var rtm, ifm, ifam, ifmam, ifanm *wireFormat + if align != wordSize { // 386 emulation on amd64 + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10Emu, bodyOff: sizeofIfmaMsghdrFreeBSD10Emu} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10Emu, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10Emu} + } else { + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10 - sizeofRtMetricsFreeBSD10, bodyOff: sizeofRtMsghdrFreeBSD10} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10, bodyOff: sizeofIfaMsghdrFreeBSD10} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10, bodyOff: sizeofIfmaMsghdrFreeBSD10} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10} + } + rel, _ := syscall.SysctlUint32("kern.osreldate") + switch { + case rel < 800000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD7 + } + case 800000 <= rel && rel < 900000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD8 + } + case 900000 <= rel && rel < 1000000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD9 + } + case 1000000 <= rel && rel < 1100000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD10 + } + default: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD11 + } + } + rtm.parse = rtm.parseRouteMessage + ifm.parse = ifm.parseInterfaceMessage + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return align, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/fn/vendor/golang.org/x/net/route/sys_netbsd.go b/fn/vendor/golang.org/x/net/route/sys_netbsd.go new file mode 100644 index 000000000..b4e330140 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/sys_netbsd.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { return true } + +// A RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// A InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrNetBSD7} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrNetBSD7} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrNetBSD7, bodyOff: sizeofIfaMsghdrNetBSD7} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrNetBSD7, bodyOff: sizeofIfAnnouncemsghdrNetBSD7} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + // NetBSD 6 and above kernels require 64-bit aligned access to + // routing facilities. + return 8, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFANNOUNCE: ifanm, + sysRTM_IFINFO: ifm, + } +} diff --git a/fn/vendor/golang.org/x/net/route/sys_openbsd.go b/fn/vendor/golang.org/x/net/route/sys_openbsd.go new file mode 100644 index 000000000..8798dc4ca --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/sys_openbsd.go @@ -0,0 +1,79 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STATS, sysNET_RT_TABLE: + return false + default: + return true + } +} + +// A RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[60:64])), + }, + } +} + +// A InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[24]), + MTU: int(nativeEndian.Uint32(m.raw[28:32])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + rtm := &wireFormat{extOff: -1, bodyOff: -1} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: -1, bodyOff: -1} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: -1, bodyOff: -1} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: -1, bodyOff: -1} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/fn/vendor/golang.org/x/net/route/syscall.go b/fn/vendor/golang.org/x/net/route/syscall.go new file mode 100644 index 000000000..c211188b1 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/syscall.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "syscall" + "unsafe" +) + +var zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var p unsafe.Pointer + if len(mib) > 0 { + p = unsafe.Pointer(&mib[0]) + } else { + p = unsafe.Pointer(&zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/fn/vendor/golang.org/x/net/route/zsys_darwin.go b/fn/vendor/golang.org/x/net/route/zsys_darwin.go new file mode 100644 index 000000000..4e2e1ab09 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STAT = 0x4 + sysNET_RT_TRASH = 0x5 + sysNET_RT_IFLIST2 = 0x6 + sysNET_RT_DUMP2 = 0x7 + sysNET_RT_MAXID = 0xa +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_MAXID = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFINFO2 = 0x12 + sysRTM_NEWMADDR2 = 0x13 + sysRTM_GET2 = 0x14 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrDarwin15 = 0x70 + sizeofIfaMsghdrDarwin15 = 0x14 + sizeofIfmaMsghdrDarwin15 = 0x10 + sizeofIfMsghdr2Darwin15 = 0xa0 + sizeofIfmaMsghdr2Darwin15 = 0x14 + sizeofIfDataDarwin15 = 0x60 + sizeofIfData64Darwin15 = 0x80 + + sizeofRtMsghdrDarwin15 = 0x5c + sizeofRtMsghdr2Darwin15 = 0x5c + sizeofRtMetricsDarwin15 = 0x38 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/route/zsys_dragonfly.go b/fn/vendor/golang.org/x/net/route/zsys_dragonfly.go new file mode 100644 index 000000000..719c88d11 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/zsys_dragonfly.go @@ -0,0 +1,98 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_MAXID = 0x4 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 + sysCTL_LWKT = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x6 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_MPLS1 = 0x100 + sysRTA_MPLS2 = 0x200 + sysRTA_MPLS3 = 0x400 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MPLS1 = 0x8 + sysRTAX_MPLS2 = 0x9 + sysRTAX_MPLS3 = 0xa + sysRTAX_MAX = 0xb +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = 0xb0 + sizeofIfaMsghdrDragonFlyBSD4 = 0x14 + sizeofIfmaMsghdrDragonFlyBSD4 = 0x10 + sizeofIfAnnouncemsghdrDragonFlyBSD4 = 0x18 + + sizeofRtMsghdrDragonFlyBSD4 = 0x98 + sizeofRtMetricsDragonFlyBSD4 = 0x70 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/route/zsys_freebsd_386.go b/fn/vendor/golang.org/x/net/route/zsys_freebsd_386.go new file mode 100644 index 000000000..b03bc01f6 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/zsys_freebsd_386.go @@ -0,0 +1,126 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x60 + sizeofIfMsghdrFreeBSD8 = 0x60 + sizeofIfMsghdrFreeBSD9 = 0x60 + sizeofIfMsghdrFreeBSD10 = 0x64 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x50 + sizeofIfDataFreeBSD8 = 0x50 + sizeofIfDataFreeBSD9 = 0x50 + sizeofIfDataFreeBSD10 = 0x54 + sizeofIfDataFreeBSD11 = 0x98 + + // MODIFIED BY HAND FOR 386 EMULATION ON AMD64 + // 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go b/fn/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go new file mode 100644 index 000000000..0b675b3d3 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go @@ -0,0 +1,123 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0xb0 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0xb0 + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x98 + sizeofRtMetricsFreeBSD10 = 0x70 + + sizeofIfMsghdrFreeBSD7 = 0xa8 + sizeofIfMsghdrFreeBSD8 = 0xa8 + sizeofIfMsghdrFreeBSD9 = 0xa8 + sizeofIfMsghdrFreeBSD10 = 0xa8 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x98 + sizeofIfDataFreeBSD8 = 0x98 + sizeofIfDataFreeBSD9 = 0x98 + sizeofIfDataFreeBSD10 = 0x98 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/route/zsys_freebsd_arm.go b/fn/vendor/golang.org/x/net/route/zsys_freebsd_arm.go new file mode 100644 index 000000000..58f8ea16f --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/zsys_freebsd_arm.go @@ -0,0 +1,123 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x70 + sizeofIfMsghdrFreeBSD8 = 0x70 + sizeofIfMsghdrFreeBSD9 = 0x70 + sizeofIfMsghdrFreeBSD10 = 0x70 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x60 + sizeofIfDataFreeBSD8 = 0x60 + sizeofIfDataFreeBSD9 = 0x60 + sizeofIfDataFreeBSD10 = 0x60 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0x68 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0x6c + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x5c + sizeofRtMetricsFreeBSD10Emu = 0x38 + + sizeofIfMsghdrFreeBSD7Emu = 0x70 + sizeofIfMsghdrFreeBSD8Emu = 0x70 + sizeofIfMsghdrFreeBSD9Emu = 0x70 + sizeofIfMsghdrFreeBSD10Emu = 0x70 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x60 + sizeofIfDataFreeBSD8Emu = 0x60 + sizeofIfDataFreeBSD9Emu = 0x60 + sizeofIfDataFreeBSD10Emu = 0x60 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/route/zsys_netbsd.go b/fn/vendor/golang.org/x/net/route/zsys_netbsd.go new file mode 100644 index 000000000..e0df45e8b --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/zsys_netbsd.go @@ -0,0 +1,97 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x22 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x5 + sysNET_RT_MAXID = 0x6 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_DDB = 0x9 + sysCTL_PROC = 0xa + sysCTL_VENDOR = 0xb + sysCTL_EMUL = 0xc + sysCTL_SECURITY = 0xd + sysCTL_MAXID = 0xe +) + +const ( + sysRTM_VERSION = 0x4 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFANNOUNCE = 0x10 + sysRTM_IEEE80211 = 0x11 + sysRTM_SETGATE = 0x12 + sysRTM_LLINFO_UPD = 0x13 + sysRTM_IFINFO = 0x14 + sysRTM_CHGADDR = 0x15 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_TAG = 0x100 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_TAG = 0x8 + sysRTAX_MAX = 0x9 +) + +const ( + sizeofIfMsghdrNetBSD7 = 0x98 + sizeofIfaMsghdrNetBSD7 = 0x18 + sizeofIfAnnouncemsghdrNetBSD7 = 0x18 + + sizeofRtMsghdrNetBSD7 = 0x78 + sizeofRtMetricsNetBSD7 = 0x50 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/route/zsys_openbsd.go b/fn/vendor/golang.org/x/net/route/zsys_openbsd.go new file mode 100644 index 000000000..f5a1ff967 --- /dev/null +++ b/fn/vendor/golang.org/x/net/route/zsys_openbsd.go @@ -0,0 +1,90 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STATS = 0x4 + sysNET_RT_TABLE = 0x5 + sysNET_RT_IFNAMES = 0x6 + sysNET_RT_MAXID = 0x7 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_FS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_DDB = 0x9 + sysCTL_VFS = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_IFANNOUNCE = 0xf + sysRTM_DESYNC = 0x10 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_SRC = 0x100 + sysRTA_SRCMASK = 0x200 + sysRTA_LABEL = 0x400 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_SRC = 0x8 + sysRTAX_SRCMASK = 0x9 + sysRTAX_LABEL = 0xa + sysRTAX_MAX = 0xb +) + +const ( + sizeofRtMsghdr = 0x60 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/fn/vendor/golang.org/x/net/trace/events.go b/fn/vendor/golang.org/x/net/trace/events.go index e66c7e328..c646a6952 100644 --- a/fn/vendor/golang.org/x/net/trace/events.go +++ b/fn/vendor/golang.org/x/net/trace/events.go @@ -21,11 +21,6 @@ import ( "time" ) -var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{ - "elapsed": elapsed, - "trimSpace": strings.TrimSpace, -}).Parse(eventsHTML)) - const maxEventsPerLog = 100 type bucket struct { @@ -44,9 +39,9 @@ var buckets = []bucket{ } // RenderEvents renders the HTML page typically served at /debug/events. -// It does not do any auth checking; see AuthRequest for the default auth check -// used by the handler registered on http.DefaultServeMux. -// req may be nil. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { now := time.Now() data := &struct { @@ -101,7 +96,7 @@ func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { famMu.RLock() defer famMu.RUnlock() - if err := eventsTmpl.Execute(w, data); err != nil { + if err := eventsTmpl().Execute(w, data); err != nil { log.Printf("net/trace: Failed executing template: %v", err) } } @@ -421,6 +416,19 @@ func freeEventLog(el *eventLog) { } } +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + const eventsHTML = ` diff --git a/fn/vendor/golang.org/x/net/trace/histogram.go b/fn/vendor/golang.org/x/net/trace/histogram.go index bb42aa532..9bf4286c7 100644 --- a/fn/vendor/golang.org/x/net/trace/histogram.go +++ b/fn/vendor/golang.org/x/net/trace/histogram.go @@ -12,6 +12,7 @@ import ( "html/template" "log" "math" + "sync" "golang.org/x/net/internal/timeseries" ) @@ -320,15 +321,20 @@ func (h *histogram) newData() *data { func (h *histogram) html() template.HTML { buf := new(bytes.Buffer) - if err := distTmpl.Execute(buf, h.newData()); err != nil { + if err := distTmpl().Execute(buf, h.newData()); err != nil { buf.Reset() log.Printf("net/trace: couldn't execute template: %v", err) } return template.HTML(buf.String()) } -// Input: data -var distTmpl = template.Must(template.New("distTmpl").Parse(` +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` @@ -354,3 +360,6 @@ var distTmpl = template.Must(template.New("distTmpl").Parse(` {{end}}
    Count: {{.Count}}
    `)) + }) + return distTmplCache +} diff --git a/fn/vendor/golang.org/x/net/trace/trace.go b/fn/vendor/golang.org/x/net/trace/trace.go index 0767c8c69..bb72a527e 100644 --- a/fn/vendor/golang.org/x/net/trace/trace.go +++ b/fn/vendor/golang.org/x/net/trace/trace.go @@ -77,7 +77,6 @@ import ( "sync/atomic" "time" - "golang.org/x/net/context" "golang.org/x/net/internal/timeseries" ) @@ -91,15 +90,19 @@ var DebugUseAfterFinish = false // It returns two bools; the first indicates whether the page may be viewed at all, // and the second indicates whether sensitive events will be shown. // -// AuthRequest may be replaced by a program to customise its authorisation requirements. +// AuthRequest may be replaced by a program to customize its authorization requirements. // -// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1]. +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. host, _, err := net.SplitHostPort(req.RemoteAddr) - switch { - case err != nil: // Badly formed address; fail closed. - return false, false - case host == "localhost" || host == "127.0.0.1" || host == "::1": + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": return true, true default: return false, false @@ -107,30 +110,46 @@ var AuthRequest = func(req *http.Request) (any, sensitive bool) { } func init() { - http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) - }) - http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) - }) + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) } // Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking; see AuthRequest for the default auth check -// used by the handler registered on http.DefaultServeMux. -// req may be nil. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. func Render(w io.Writer, req *http.Request, sensitive bool) { data := &struct { Families []string @@ -234,7 +253,7 @@ func Render(w io.Writer, req *http.Request, sensitive bool) { completedMu.RLock() defer completedMu.RUnlock() - if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil { + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { log.Printf("net/trace: Failed executing template: %v", err) } } @@ -267,18 +286,6 @@ type contextKeyT string var contextKey = contextKeyT("golang.org/x/net/trace.Trace") -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} - // Trace represents an active request. type Trace interface { // LazyLog adds x to the event log. It will be evaluated each time the @@ -329,7 +336,8 @@ func New(family, title string) Trace { tr.ref() tr.Family, tr.Title = family, title tr.Start = time.Now() - tr.events = make([]event, 0, maxEventsPerTrace) + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] activeMu.RLock() s := activeTraces[tr.Family] @@ -646,8 +654,8 @@ type event struct { Elapsed time.Duration // since previous event in trace NewDay bool // whether this event is on a different day to the previous event Recyclable bool // whether this event was passed via LazyLog - What interface{} // string or fmt.Stringer Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer } // WhenString returns a string representation of the elapsed time of the event. @@ -688,14 +696,17 @@ type trace struct { IsError bool // Append-only sequence of events (modulo discards). - mu sync.RWMutex - events []event + mu sync.RWMutex + events []event + maxEvents int refs int32 // how many buckets this is in recycler func(interface{}) disc discarded // scratch space to avoid allocation finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events } func (tr *trace) reset() { @@ -707,11 +718,15 @@ func (tr *trace) reset() { tr.traceID = 0 tr.spanID = 0 tr.IsError = false + tr.maxEvents = 0 tr.events = nil tr.refs = 0 tr.recycler = nil tr.disc = 0 tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } } // delta returns the elapsed time since the last event or the trace start, @@ -740,7 +755,7 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { and very unlikely to be the fault of this code. The most likely scenario is that some code elsewhere is using - a requestz.Trace after its Finish method is called. + a trace.Trace after its Finish method is called. You can temporarily set the DebugUseAfterFinish var to help discover where that is; do not leave that var set, since it makes this package much less efficient. @@ -749,11 +764,11 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} tr.mu.Lock() e.Elapsed, e.NewDay = tr.delta(e.When) - if len(tr.events) < cap(tr.events) { + if len(tr.events) < tr.maxEvents { tr.events = append(tr.events, e) } else { // Discard the middle events. - di := int((cap(tr.events) - 1) / 2) + di := int((tr.maxEvents - 1) / 2) if d, ok := tr.events[di].What.(*discarded); ok { (*d)++ } else { @@ -773,7 +788,7 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { go tr.recycler(tr.events[di+1].What) } copy(tr.events[di+1:], tr.events[di+2:]) - tr.events[cap(tr.events)-1] = e + tr.events[tr.maxEvents-1] = e } tr.mu.Unlock() } @@ -799,7 +814,7 @@ func (tr *trace) SetTraceInfo(traceID, spanID uint64) { func (tr *trace) SetMaxEvents(m int) { // Always keep at least three events: first, discarded count, last. if len(tr.events) == 0 && m > 3 { - tr.events = make([]event, 0, m) + tr.maxEvents = m } } @@ -890,10 +905,18 @@ func elapsed(d time.Duration) string { return string(b) } -var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{ - "elapsed": elapsed, - "add": func(a, b int) int { return a + b }, -}).Parse(pageHTML)) +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} const pageHTML = ` {{template "Prolog" .}} diff --git a/fn/vendor/golang.org/x/net/trace/trace_go16.go b/fn/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 000000000..d60819118 --- /dev/null +++ b/fn/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/fn/vendor/golang.org/x/net/trace/trace_go17.go b/fn/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 000000000..df6e1fba7 --- /dev/null +++ b/fn/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/fn/vendor/golang.org/x/net/trace/trace_test.go b/fn/vendor/golang.org/x/net/trace/trace_test.go index c2f5fcbaa..bfd9dfe94 100644 --- a/fn/vendor/golang.org/x/net/trace/trace_test.go +++ b/fn/vendor/golang.org/x/net/trace/trace_test.go @@ -5,6 +5,7 @@ package trace import ( + "net/http" "reflect" "testing" ) @@ -44,3 +45,134 @@ func TestResetLog(t *testing.T) { t.Errorf("reset didn't clear all fields: %+v", el) } } + +func TestAuthRequest(t *testing.T) { + testCases := []struct { + host string + want bool + }{ + {host: "192.168.23.1", want: false}, + {host: "192.168.23.1:8080", want: false}, + {host: "malformed remote addr", want: false}, + {host: "localhost", want: true}, + {host: "localhost:8080", want: true}, + {host: "127.0.0.1", want: true}, + {host: "127.0.0.1:8080", want: true}, + {host: "::1", want: true}, + {host: "[::1]:8080", want: true}, + } + for _, tt := range testCases { + req := &http.Request{RemoteAddr: tt.host} + any, sensitive := AuthRequest(req) + if any != tt.want || sensitive != tt.want { + t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want) + } + } +} + +// TestParseTemplate checks that all templates used by this package are valid +// as they are parsed on first usage +func TestParseTemplate(t *testing.T) { + if tmpl := distTmpl(); tmpl == nil { + t.Error("invalid template returned from distTmpl()") + } + if tmpl := pageTmpl(); tmpl == nil { + t.Error("invalid template returned from pageTmpl()") + } + if tmpl := eventsTmpl(); tmpl == nil { + t.Error("invalid template returned from eventsTmpl()") + } +} + +func benchmarkTrace(b *testing.B, maxEvents, numEvents int) { + numSpans := (b.N + numEvents + 1) / numEvents + + for i := 0; i < numSpans; i++ { + tr := New("test", "test") + tr.SetMaxEvents(maxEvents) + for j := 0; j < numEvents; j++ { + tr.LazyPrintf("%d", j) + } + tr.Finish() + } +} + +func BenchmarkTrace_Default_2(b *testing.B) { + benchmarkTrace(b, 0, 2) +} + +func BenchmarkTrace_Default_10(b *testing.B) { + benchmarkTrace(b, 0, 10) +} + +func BenchmarkTrace_Default_100(b *testing.B) { + benchmarkTrace(b, 0, 100) +} + +func BenchmarkTrace_Default_1000(b *testing.B) { + benchmarkTrace(b, 0, 1000) +} + +func BenchmarkTrace_Default_10000(b *testing.B) { + benchmarkTrace(b, 0, 10000) +} + +func BenchmarkTrace_10_2(b *testing.B) { + benchmarkTrace(b, 10, 2) +} + +func BenchmarkTrace_10_10(b *testing.B) { + benchmarkTrace(b, 10, 10) +} + +func BenchmarkTrace_10_100(b *testing.B) { + benchmarkTrace(b, 10, 100) +} + +func BenchmarkTrace_10_1000(b *testing.B) { + benchmarkTrace(b, 10, 1000) +} + +func BenchmarkTrace_10_10000(b *testing.B) { + benchmarkTrace(b, 10, 10000) +} + +func BenchmarkTrace_100_2(b *testing.B) { + benchmarkTrace(b, 100, 2) +} + +func BenchmarkTrace_100_10(b *testing.B) { + benchmarkTrace(b, 100, 10) +} + +func BenchmarkTrace_100_100(b *testing.B) { + benchmarkTrace(b, 100, 100) +} + +func BenchmarkTrace_100_1000(b *testing.B) { + benchmarkTrace(b, 100, 1000) +} + +func BenchmarkTrace_100_10000(b *testing.B) { + benchmarkTrace(b, 100, 10000) +} + +func BenchmarkTrace_1000_2(b *testing.B) { + benchmarkTrace(b, 1000, 2) +} + +func BenchmarkTrace_1000_10(b *testing.B) { + benchmarkTrace(b, 1000, 10) +} + +func BenchmarkTrace_1000_100(b *testing.B) { + benchmarkTrace(b, 1000, 100) +} + +func BenchmarkTrace_1000_1000(b *testing.B) { + benchmarkTrace(b, 1000, 1000) +} + +func BenchmarkTrace_1000_10000(b *testing.B) { + benchmarkTrace(b, 1000, 10000) +} diff --git a/fn/vendor/golang.org/x/net/webdav/file.go b/fn/vendor/golang.org/x/net/webdav/file.go index 9ba1ca16e..748118dd3 100644 --- a/fn/vendor/golang.org/x/net/webdav/file.go +++ b/fn/vendor/golang.org/x/net/webdav/file.go @@ -5,6 +5,7 @@ package webdav import ( + "encoding/xml" "io" "net/http" "os" @@ -14,7 +15,7 @@ import ( "sync" "time" - "golang.org/x/net/webdav/internal/xml" + "golang.org/x/net/context" ) // slashClean is equivalent to but slightly more efficient than @@ -37,11 +38,11 @@ func slashClean(name string) string { // might apply". In particular, whether or not renaming a file or directory // overwriting another existing file or directory is an error is OS-dependent. type FileSystem interface { - Mkdir(name string, perm os.FileMode) error - OpenFile(name string, flag int, perm os.FileMode) (File, error) - RemoveAll(name string) error - Rename(oldName, newName string) error - Stat(name string) (os.FileInfo, error) + Mkdir(ctx context.Context, name string, perm os.FileMode) error + OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) + RemoveAll(ctx context.Context, name string) error + Rename(ctx context.Context, oldName, newName string) error + Stat(ctx context.Context, name string) (os.FileInfo, error) } // A File is returned by a FileSystem's OpenFile method and can be served by a @@ -77,14 +78,14 @@ func (d Dir) resolve(name string) string { return filepath.Join(dir, filepath.FromSlash(slashClean(name))) } -func (d Dir) Mkdir(name string, perm os.FileMode) error { +func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error { if name = d.resolve(name); name == "" { return os.ErrNotExist } return os.Mkdir(name, perm) } -func (d Dir) OpenFile(name string, flag int, perm os.FileMode) (File, error) { +func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { if name = d.resolve(name); name == "" { return nil, os.ErrNotExist } @@ -95,7 +96,7 @@ func (d Dir) OpenFile(name string, flag int, perm os.FileMode) (File, error) { return f, nil } -func (d Dir) RemoveAll(name string) error { +func (d Dir) RemoveAll(ctx context.Context, name string) error { if name = d.resolve(name); name == "" { return os.ErrNotExist } @@ -106,7 +107,7 @@ func (d Dir) RemoveAll(name string) error { return os.RemoveAll(name) } -func (d Dir) Rename(oldName, newName string) error { +func (d Dir) Rename(ctx context.Context, oldName, newName string) error { if oldName = d.resolve(oldName); oldName == "" { return os.ErrNotExist } @@ -120,7 +121,7 @@ func (d Dir) Rename(oldName, newName string) error { return os.Rename(oldName, newName) } -func (d Dir) Stat(name string) (os.FileInfo, error) { +func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) { if name = d.resolve(name); name == "" { return nil, os.ErrNotExist } @@ -238,7 +239,7 @@ func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err return parent, frag, err } -func (fs *memFS) Mkdir(name string, perm os.FileMode) error { +func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error { fs.mu.Lock() defer fs.mu.Unlock() @@ -261,7 +262,7 @@ func (fs *memFS) Mkdir(name string, perm os.FileMode) error { return nil } -func (fs *memFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { +func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { fs.mu.Lock() defer fs.mu.Unlock() @@ -315,7 +316,7 @@ func (fs *memFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) }, nil } -func (fs *memFS) RemoveAll(name string) error { +func (fs *memFS) RemoveAll(ctx context.Context, name string) error { fs.mu.Lock() defer fs.mu.Unlock() @@ -331,7 +332,7 @@ func (fs *memFS) RemoveAll(name string) error { return nil } -func (fs *memFS) Rename(oldName, newName string) error { +func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error { fs.mu.Lock() defer fs.mu.Unlock() @@ -382,7 +383,7 @@ func (fs *memFS) Rename(oldName, newName string) error { return nil } -func (fs *memFS) Stat(name string) (os.FileInfo, error) { +func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) { fs.mu.Lock() defer fs.mu.Unlock() @@ -600,9 +601,9 @@ func (f *memFile) Write(p []byte) (int, error) { // moveFiles moves files and/or directories from src to dst. // // See section 9.9.4 for when various HTTP status codes apply. -func moveFiles(fs FileSystem, src, dst string, overwrite bool) (status int, err error) { +func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) { created := false - if _, err := fs.Stat(dst); err != nil { + if _, err := fs.Stat(ctx, dst); err != nil { if !os.IsNotExist(err) { return http.StatusForbidden, err } @@ -612,13 +613,13 @@ func moveFiles(fs FileSystem, src, dst string, overwrite bool) (status int, err // and the Overwrite header is "T", then prior to performing the move, // the server must perform a DELETE with "Depth: infinity" on the // destination resource. - if err := fs.RemoveAll(dst); err != nil { + if err := fs.RemoveAll(ctx, dst); err != nil { return http.StatusForbidden, err } } else { return http.StatusPreconditionFailed, os.ErrExist } - if err := fs.Rename(src, dst); err != nil { + if err := fs.Rename(ctx, src, dst); err != nil { return http.StatusForbidden, err } if created { @@ -651,7 +652,7 @@ func copyProps(dst, src File) error { // copyFiles copies files and/or directories from src to dst. // // See section 9.8.5 for when various HTTP status codes apply. -func copyFiles(fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { +func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { if recursion == 1000 { return http.StatusInternalServerError, errRecursionTooDeep } @@ -660,7 +661,7 @@ func copyFiles(fs FileSystem, src, dst string, overwrite bool, depth int, recurs // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/ // into /A/B/ could lead to infinite recursion if not handled correctly." - srcFile, err := fs.OpenFile(src, os.O_RDONLY, 0) + srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0) if err != nil { if os.IsNotExist(err) { return http.StatusNotFound, err @@ -678,7 +679,7 @@ func copyFiles(fs FileSystem, src, dst string, overwrite bool, depth int, recurs srcPerm := srcStat.Mode() & os.ModePerm created := false - if _, err := fs.Stat(dst); err != nil { + if _, err := fs.Stat(ctx, dst); err != nil { if os.IsNotExist(err) { created = true } else { @@ -688,13 +689,13 @@ func copyFiles(fs FileSystem, src, dst string, overwrite bool, depth int, recurs if !overwrite { return http.StatusPreconditionFailed, os.ErrExist } - if err := fs.RemoveAll(dst); err != nil && !os.IsNotExist(err) { + if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) { return http.StatusForbidden, err } } if srcStat.IsDir() { - if err := fs.Mkdir(dst, srcPerm); err != nil { + if err := fs.Mkdir(ctx, dst, srcPerm); err != nil { return http.StatusForbidden, err } if depth == infiniteDepth { @@ -706,7 +707,7 @@ func copyFiles(fs FileSystem, src, dst string, overwrite bool, depth int, recurs name := c.Name() s := path.Join(src, name) d := path.Join(dst, name) - cStatus, cErr := copyFiles(fs, s, d, overwrite, depth, recursion) + cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion) if cErr != nil { // TODO: MultiStatus. return cStatus, cErr @@ -715,7 +716,7 @@ func copyFiles(fs FileSystem, src, dst string, overwrite bool, depth int, recurs } } else { - dstFile, err := fs.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) + dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) if err != nil { if os.IsNotExist(err) { return http.StatusConflict, err @@ -748,7 +749,7 @@ func copyFiles(fs FileSystem, src, dst string, overwrite bool, depth int, recurs // Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, // walkFS calls walkFn. If a visited file system node is a directory and // walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. -func walkFS(fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { +func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { // This implementation is based on Walk's code in the standard path/filepath package. err := walkFn(name, info, nil) if err != nil { @@ -765,7 +766,7 @@ func walkFS(fs FileSystem, depth int, name string, info os.FileInfo, walkFn file } // Read directory names. - f, err := fs.OpenFile(name, os.O_RDONLY, 0) + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) if err != nil { return walkFn(name, info, err) } @@ -777,13 +778,13 @@ func walkFS(fs FileSystem, depth int, name string, info os.FileInfo, walkFn file for _, fileInfo := range fileInfos { filename := path.Join(name, fileInfo.Name()) - fileInfo, err := fs.Stat(filename) + fileInfo, err := fs.Stat(ctx, filename) if err != nil { if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { return err } } else { - err = walkFS(fs, depth, filename, fileInfo, walkFn) + err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn) if err != nil { if !fileInfo.IsDir() || err != filepath.SkipDir { return err diff --git a/fn/vendor/golang.org/x/net/webdav/file_go1.6.go b/fn/vendor/golang.org/x/net/webdav/file_go1.6.go new file mode 100644 index 000000000..fa387700d --- /dev/null +++ b/fn/vendor/golang.org/x/net/webdav/file_go1.6.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package webdav + +import ( + "net/http" + + "golang.org/x/net/context" +) + +func getContext(r *http.Request) context.Context { + return context.Background() +} diff --git a/fn/vendor/golang.org/x/net/webdav/file_go1.7.go b/fn/vendor/golang.org/x/net/webdav/file_go1.7.go new file mode 100644 index 000000000..d1c3de832 --- /dev/null +++ b/fn/vendor/golang.org/x/net/webdav/file_go1.7.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package webdav + +import ( + "context" + "net/http" +) + +func getContext(r *http.Request) context.Context { + return r.Context() +} diff --git a/fn/vendor/golang.org/x/net/webdav/file_test.go b/fn/vendor/golang.org/x/net/webdav/file_test.go index 99547e16b..bfd96e193 100644 --- a/fn/vendor/golang.org/x/net/webdav/file_test.go +++ b/fn/vendor/golang.org/x/net/webdav/file_test.go @@ -5,6 +5,7 @@ package webdav import ( + "encoding/xml" "fmt" "io" "io/ioutil" @@ -18,7 +19,7 @@ import ( "strings" "testing" - "golang.org/x/net/webdav/internal/xml" + "golang.org/x/net/context" ) func TestSlashClean(t *testing.T) { @@ -196,13 +197,15 @@ func TestWalk(t *testing.T) { }}, } + ctx := context.Background() + for _, tc := range testCases { fs := NewMemFS().(*memFS) parts := strings.Split(tc.dir, "/") for p := 2; p < len(parts); p++ { d := strings.Join(parts[:p], "/") - if err := fs.Mkdir(d, 0666); err != nil { + if err := fs.Mkdir(ctx, d, 0666); err != nil { t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err) } } @@ -232,14 +235,14 @@ func TestWalk(t *testing.T) { // analogous to the Unix find command. // // The returned strings are not guaranteed to be in any particular order. -func find(ss []string, fs FileSystem, name string) ([]string, error) { - stat, err := fs.Stat(name) +func find(ctx context.Context, ss []string, fs FileSystem, name string) ([]string, error) { + stat, err := fs.Stat(ctx, name) if err != nil { return nil, err } ss = append(ss, name) if stat.IsDir() { - f, err := fs.OpenFile(name, os.O_RDONLY, 0) + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) if err != nil { return nil, err } @@ -249,7 +252,7 @@ func find(ss []string, fs FileSystem, name string) ([]string, error) { return nil, err } for _, c := range children { - ss, err = find(ss, fs, path.Join(name, c.Name())) + ss, err = find(ctx, ss, fs, path.Join(name, c.Name())) if err != nil { return nil, err } @@ -404,6 +407,8 @@ func testFS(t *testing.T, fs FileSystem) { "copy__ o=F d=∞ /d/y /d/x want errExist", } + ctx := context.Background() + for i, tc := range testCases { tc = strings.TrimSpace(tc) j := strings.IndexByte(tc, ' ') @@ -421,7 +426,7 @@ func testFS(t *testing.T, fs FileSystem) { if len(parts) != 4 || parts[2] != "want" { t.Fatalf("test case #%d %q: invalid write", i, tc) } - f, opErr := fs.OpenFile(parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + f, opErr := fs.OpenFile(ctx, parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if got := errStr(opErr); got != parts[3] { t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3]) } @@ -435,7 +440,7 @@ func testFS(t *testing.T, fs FileSystem) { } case "find": - got, err := find(nil, fs, "/") + got, err := find(ctx, nil, fs, "/") if err != nil { t.Fatalf("test case #%d %q: find: %v", i, tc, err) } @@ -465,17 +470,17 @@ func testFS(t *testing.T, fs FileSystem) { if parts[1] == "d=∞" { depth = infiniteDepth } - _, opErr = copyFiles(fs, parts[2], parts[3], parts[0] == "o=T", depth, 0) + _, opErr = copyFiles(ctx, fs, parts[2], parts[3], parts[0] == "o=T", depth, 0) case "mk-dir": - opErr = fs.Mkdir(parts[0], 0777) + opErr = fs.Mkdir(ctx, parts[0], 0777) case "move__": - _, opErr = moveFiles(fs, parts[1], parts[2], parts[0] == "o=T") + _, opErr = moveFiles(ctx, fs, parts[1], parts[2], parts[0] == "o=T") case "rm-all": - opErr = fs.RemoveAll(parts[0]) + opErr = fs.RemoveAll(ctx, parts[0]) case "stat": var stat os.FileInfo fileName := parts[0] - if stat, opErr = fs.Stat(fileName); opErr == nil { + if stat, opErr = fs.Stat(ctx, fileName); opErr == nil { if stat.IsDir() { got = "dir" } else { @@ -527,9 +532,10 @@ func TestMemFS(t *testing.T) { } func TestMemFSRoot(t *testing.T) { + ctx := context.Background() fs := NewMemFS() for i := 0; i < 5; i++ { - stat, err := fs.Stat("/") + stat, err := fs.Stat(ctx, "/") if err != nil { t.Fatalf("i=%d: Stat: %v", i, err) } @@ -537,7 +543,7 @@ func TestMemFSRoot(t *testing.T) { t.Fatalf("i=%d: Stat.IsDir is false, want true", i) } - f, err := fs.OpenFile("/", os.O_RDONLY, 0) + f, err := fs.OpenFile(ctx, "/", os.O_RDONLY, 0) if err != nil { t.Fatalf("i=%d: OpenFile: %v", i, err) } @@ -554,19 +560,20 @@ func TestMemFSRoot(t *testing.T) { t.Fatalf("i=%d: Write: got nil error, want non-nil", i) } - if err := fs.Mkdir(fmt.Sprintf("/dir%d", i), 0777); err != nil { + if err := fs.Mkdir(ctx, fmt.Sprintf("/dir%d", i), 0777); err != nil { t.Fatalf("i=%d: Mkdir: %v", i, err) } } } func TestMemFileReaddir(t *testing.T) { + ctx := context.Background() fs := NewMemFS() - if err := fs.Mkdir("/foo", 0777); err != nil { + if err := fs.Mkdir(ctx, "/foo", 0777); err != nil { t.Fatalf("Mkdir: %v", err) } readdir := func(count int) ([]os.FileInfo, error) { - f, err := fs.OpenFile("/foo", os.O_RDONLY, 0) + f, err := fs.OpenFile(ctx, "/foo", os.O_RDONLY, 0) if err != nil { t.Fatalf("OpenFile: %v", err) } @@ -650,9 +657,11 @@ func TestMemFile(t *testing.T) { "seek cur -99 want err", } + ctx := context.Background() + const filename = "/foo" fs := NewMemFS() - f, err := fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + f, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { t.Fatalf("OpenFile: %v", err) } @@ -746,7 +755,7 @@ func TestMemFile(t *testing.T) { } case "wantData": - g, err := fs.OpenFile(filename, os.O_RDONLY, 0666) + g, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666) if err != nil { t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err) } @@ -772,7 +781,7 @@ func TestMemFile(t *testing.T) { if err != nil { t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg) } - fi, err := fs.Stat(filename) + fi, err := fs.Stat(ctx, filename) if err != nil { t.Fatalf("test case #%d %q: Stat: %v", i, tc, err) } @@ -787,8 +796,12 @@ func TestMemFile(t *testing.T) { // memFile doesn't allocate a new buffer for each of those N times. Otherwise, // calling io.Copy(aMemFile, src) is likely to have quadratic complexity. func TestMemFileWriteAllocs(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skip("gccgo allocates here") + } + ctx := context.Background() fs := NewMemFS() - f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { t.Fatalf("OpenFile: %v", err) } @@ -810,6 +823,7 @@ func TestMemFileWriteAllocs(t *testing.T) { } func BenchmarkMemFileWrite(b *testing.B) { + ctx := context.Background() fs := NewMemFS() xxx := make([]byte, 1024) for i := range xxx { @@ -818,7 +832,7 @@ func BenchmarkMemFileWrite(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { b.Fatalf("OpenFile: %v", err) } @@ -828,16 +842,17 @@ func BenchmarkMemFileWrite(b *testing.B) { if err := f.Close(); err != nil { b.Fatalf("Close: %v", err) } - if err := fs.RemoveAll("/xxx"); err != nil { + if err := fs.RemoveAll(ctx, "/xxx"); err != nil { b.Fatalf("RemoveAll: %v", err) } } } func TestCopyMoveProps(t *testing.T) { + ctx := context.Background() fs := NewMemFS() create := func(name string) error { - f, err := fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + f, err := fs.OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } @@ -849,7 +864,7 @@ func TestCopyMoveProps(t *testing.T) { return cErr } patch := func(name string, patches ...Proppatch) error { - f, err := fs.OpenFile(name, os.O_RDWR, 0666) + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) if err != nil { return err } @@ -861,7 +876,7 @@ func TestCopyMoveProps(t *testing.T) { return cErr } props := func(name string) (map[xml.Name]Property, error) { - f, err := fs.OpenFile(name, os.O_RDWR, 0666) + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) if err != nil { return nil, err } @@ -899,10 +914,10 @@ func TestCopyMoveProps(t *testing.T) { if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil { t.Fatalf("patch /src +p0 +p1: %v", err) } - if _, err := copyFiles(fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { + if _, err := copyFiles(ctx, fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { t.Fatalf("copyFiles /src /tmp: %v", err) } - if _, err := moveFiles(fs, "/tmp", "/dst", true); err != nil { + if _, err := moveFiles(ctx, fs, "/tmp", "/dst", true); err != nil { t.Fatalf("moveFiles /tmp /dst: %v", err) } if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil { @@ -1097,6 +1112,7 @@ func TestWalkFS(t *testing.T) { "/a/b/z", }, }} + ctx := context.Background() for _, tc := range testCases { fs, err := buildTestFS(tc.buildfs) if err != nil { @@ -1113,11 +1129,11 @@ func TestWalkFS(t *testing.T) { got = append(got, path) return nil } - fi, err := fs.Stat(tc.startAt) + fi, err := fs.Stat(ctx, tc.startAt) if err != nil { t.Fatalf("%s: cannot stat: %v", tc.desc, err) } - err = walkFS(fs, tc.depth, tc.startAt, fi, traceFn) + err = walkFS(ctx, fs, tc.depth, tc.startAt, fi, traceFn) if err != nil { t.Errorf("%s:\ngot error %v, want nil", tc.desc, err) continue @@ -1134,23 +1150,24 @@ func TestWalkFS(t *testing.T) { func buildTestFS(buildfs []string) (FileSystem, error) { // TODO: Could this be merged with the build logic in TestFS? + ctx := context.Background() fs := NewMemFS() for _, b := range buildfs { op := strings.Split(b, " ") switch op[0] { case "mkdir": - err := fs.Mkdir(op[1], os.ModeDir|0777) + err := fs.Mkdir(ctx, op[1], os.ModeDir|0777) if err != nil { return nil, err } case "touch": - f, err := fs.OpenFile(op[1], os.O_RDWR|os.O_CREATE, 0666) + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE, 0666) if err != nil { return nil, err } f.Close() case "write": - f, err := fs.OpenFile(op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return nil, err } diff --git a/fn/vendor/golang.org/x/net/webdav/internal/xml/example_test.go b/fn/vendor/golang.org/x/net/webdav/internal/xml/example_test.go index becedd583..21b48dea5 100644 --- a/fn/vendor/golang.org/x/net/webdav/internal/xml/example_test.go +++ b/fn/vendor/golang.org/x/net/webdav/internal/xml/example_test.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/fn/vendor/golang.org/x/net/webdav/internal/xml/marshal.go index 3c3b6aca5..cb82ec214 100644 --- a/fn/vendor/golang.org/x/net/webdav/internal/xml/marshal.go +++ b/fn/vendor/golang.org/x/net/webdav/internal/xml/marshal.go @@ -26,9 +26,9 @@ const ( // // Marshal handles an array or slice by marshalling each of the elements. // Marshal handles a pointer by marshalling the value it points at or, if the -// pointer is nil, by writing nothing. Marshal handles an interface value by +// pointer is nil, by writing nothing. Marshal handles an interface value by // marshalling the value it contains or, if the interface value is nil, by -// writing nothing. Marshal handles all other data by writing one or more XML +// writing nothing. Marshal handles all other data by writing one or more XML // elements containing the data. // // The name for the XML elements is taken from, in order of preference: @@ -61,7 +61,7 @@ const ( // value were part of the outer struct. // // If a field uses a tag "a>b>c", then the element c will be nested inside -// parent elements a and b. Fields that appear next to each other that name +// parent elements a and b. Fields that appear next to each other that name // the same parent will be enclosed in one XML element. // // See MarshalIndent for an example. @@ -222,7 +222,7 @@ func (enc *Encoder) EncodeToken(t Token) error { return p.cachedWriteError() case ProcInst: // First token to be encoded which is also a ProcInst with target of xml - // is the xml declaration. The only ProcInst where target of xml is allowed. + // is the xml declaration. The only ProcInst where target of xml is allowed. if t.Target == "xml" && p.Buffered() != 0 { return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") } diff --git a/fn/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go b/fn/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go index 5dc78e748..226cfd013 100644 --- a/fn/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go +++ b/fn/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go @@ -1868,7 +1868,7 @@ func TestRace9796(t *testing.T) { for i := 0; i < 2; i++ { wg.Add(1) go func() { - Marshal(B{[]A{A{}}}) + Marshal(B{[]A{{}}}) wg.Done() }() } diff --git a/fn/vendor/golang.org/x/net/webdav/internal/xml/read.go b/fn/vendor/golang.org/x/net/webdav/internal/xml/read.go index 75b9f2ba1..4089056a1 100644 --- a/fn/vendor/golang.org/x/net/webdav/internal/xml/read.go +++ b/fn/vendor/golang.org/x/net/webdav/internal/xml/read.go @@ -1,4 +1,4 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -27,7 +27,7 @@ import ( // discarded. // // Because Unmarshal uses the reflect package, it can only assign -// to exported (upper case) fields. Unmarshal uses a case-sensitive +// to exported (upper case) fields. Unmarshal uses a case-sensitive // comparison to match XML element names to tag values and struct // field names. // @@ -37,7 +37,7 @@ import ( // // * If the struct has a field of type []byte or string with tag // ",innerxml", Unmarshal accumulates the raw XML nested inside the -// element in that field. The rest of the rules still apply. +// element in that field. The rest of the rules still apply. // // * If the struct has a field named XMLName of type xml.Name, // Unmarshal records the element name in that field. @@ -59,7 +59,7 @@ import ( // // * If the XML element contains comments, they are accumulated in // the first struct field that has tag ",comment". The struct -// field may have type []byte or string. If there is no such +// field may have type []byte or string. If there is no such // field, the comments are discarded. // // * If the XML element contains a sub-element whose name matches @@ -102,7 +102,7 @@ import ( // // Unmarshal maps an XML element or attribute value to an integer or // floating-point field by setting the field to the result of -// interpreting the string value in decimal. There is no check for +// interpreting the string value in decimal. There is no check for // overflow. // // Unmarshal maps an XML element to an xml.Name by recording the diff --git a/fn/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/fn/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go index c9a6421f2..fdde288bc 100644 --- a/fn/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go +++ b/fn/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/fn/vendor/golang.org/x/net/webdav/internal/xml/xml.go index ffab4a70c..5b79cbecb 100644 --- a/fn/vendor/golang.org/x/net/webdav/internal/xml/xml.go +++ b/fn/vendor/golang.org/x/net/webdav/internal/xml/xml.go @@ -252,7 +252,7 @@ func NewDecoder(r io.Reader) *Decoder { // // Slices of bytes in the returned token data refer to the // parser's internal buffer and remain valid only until the next -// call to Token. To acquire a copy of the bytes, call CopyToken +// call to Token. To acquire a copy of the bytes, call CopyToken // or the token's Copy method. // // Token expands self-closing elements such as
    @@ -360,7 +360,7 @@ func (d *Decoder) switchToReader(r io.Reader) { } // Parsing state - stack holds old name space translations -// and the current set of open elements. The translations to pop when +// and the current set of open elements. The translations to pop when // ending a given tag are *below* it on the stack, which is // more work but forced on us by XML. type stack struct { @@ -1253,7 +1253,7 @@ func isNameString(s string) bool { // These tables were generated by cut and paste from Appendix B of // the XML spec at http://www.xml.com/axml/testaxml.htm -// and then reformatting. First corresponds to (Letter | '_' | ':') +// and then reformatting. First corresponds to (Letter | '_' | ':') // and second corresponds to NameChar. var first = &unicode.RangeTable{ diff --git a/fn/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go b/fn/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go index 312a7c98a..af4cf8ea8 100644 --- a/fn/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go +++ b/fn/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go @@ -1,4 +1,4 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/net/webdav/prop.go b/fn/vendor/golang.org/x/net/webdav/prop.go index 88b9a3a35..e36a3b31d 100644 --- a/fn/vendor/golang.org/x/net/webdav/prop.go +++ b/fn/vendor/golang.org/x/net/webdav/prop.go @@ -5,6 +5,8 @@ package webdav import ( + "bytes" + "encoding/xml" "fmt" "io" "mime" @@ -13,7 +15,7 @@ import ( "path/filepath" "strconv" - "golang.org/x/net/webdav/internal/xml" + "golang.org/x/net/context" ) // Proppatch describes a property update instruction as defined in RFC 4918. @@ -101,39 +103,46 @@ type DeadPropsHolder interface { var liveProps = map[xml.Name]struct { // findFn implements the propfind function of this property. If nil, // it indicates a hidden property. - findFn func(FileSystem, LockSystem, string, os.FileInfo) (string, error) + findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error) // dir is true if the property applies to directories. dir bool }{ - xml.Name{Space: "DAV:", Local: "resourcetype"}: { + {Space: "DAV:", Local: "resourcetype"}: { findFn: findResourceType, dir: true, }, - xml.Name{Space: "DAV:", Local: "displayname"}: { + {Space: "DAV:", Local: "displayname"}: { findFn: findDisplayName, dir: true, }, - xml.Name{Space: "DAV:", Local: "getcontentlength"}: { + {Space: "DAV:", Local: "getcontentlength"}: { findFn: findContentLength, dir: false, }, - xml.Name{Space: "DAV:", Local: "getlastmodified"}: { + {Space: "DAV:", Local: "getlastmodified"}: { findFn: findLastModified, - dir: false, + // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified + // suggests that getlastmodified should only apply to GETable + // resources, and this package does not support GET on directories. + // + // Nonetheless, some WebDAV clients expect child directories to be + // sortable by getlastmodified date, so this value is true, not false. + // See golang.org/issue/15334. + dir: true, }, - xml.Name{Space: "DAV:", Local: "creationdate"}: { + {Space: "DAV:", Local: "creationdate"}: { findFn: nil, dir: false, }, - xml.Name{Space: "DAV:", Local: "getcontentlanguage"}: { + {Space: "DAV:", Local: "getcontentlanguage"}: { findFn: nil, dir: false, }, - xml.Name{Space: "DAV:", Local: "getcontenttype"}: { + {Space: "DAV:", Local: "getcontenttype"}: { findFn: findContentType, dir: false, }, - xml.Name{Space: "DAV:", Local: "getetag"}: { + {Space: "DAV:", Local: "getetag"}: { findFn: findETag, // findETag implements ETag as the concatenated hex values of a file's // modification time and size. This is not a reliable synchronization @@ -144,8 +153,8 @@ var liveProps = map[xml.Name]struct { // TODO: The lockdiscovery property requires LockSystem to list the // active locks on a resource. - xml.Name{Space: "DAV:", Local: "lockdiscovery"}: {}, - xml.Name{Space: "DAV:", Local: "supportedlock"}: { + {Space: "DAV:", Local: "lockdiscovery"}: {}, + {Space: "DAV:", Local: "supportedlock"}: { findFn: findSupportedLock, dir: true, }, @@ -157,8 +166,8 @@ var liveProps = map[xml.Name]struct { // // Each Propstat has a unique status and each property name will only be part // of one Propstat element. -func props(fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { - f, err := fs.OpenFile(name, os.O_RDONLY, 0) +func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) if err != nil { return nil, err } @@ -187,7 +196,7 @@ func props(fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Prop } // Otherwise, it must either be a live property or we don't know it. if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) { - innerXML, err := prop.findFn(fs, ls, name, fi) + innerXML, err := prop.findFn(ctx, fs, ls, name, fi) if err != nil { return nil, err } @@ -205,8 +214,8 @@ func props(fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Prop } // Propnames returns the property names defined for resource name. -func propnames(fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { - f, err := fs.OpenFile(name, os.O_RDONLY, 0) +func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) if err != nil { return nil, err } @@ -245,8 +254,8 @@ func propnames(fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { // returned if they are named in 'include'. // // See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND -func allprop(fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { - pnames, err := propnames(fs, ls, name) +func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { + pnames, err := propnames(ctx, fs, ls, name) if err != nil { return nil, err } @@ -260,12 +269,12 @@ func allprop(fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]P pnames = append(pnames, pn) } } - return props(fs, ls, name, pnames) + return props(ctx, fs, ls, name, pnames) } // Patch patches the properties of resource name. The return values are // constrained in the same manner as DeadPropsHolder.Patch. -func patch(fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { +func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { conflict := false loop: for _, patch := range patches { @@ -296,7 +305,7 @@ loop: return makePropstats(pstatForbidden, pstatFailedDep), nil } - f, err := fs.OpenFile(name, os.O_RDWR, 0) + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0) if err != nil { return nil, err } @@ -327,31 +336,51 @@ loop: return []Propstat{pstat}, nil } -func findResourceType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { +func escapeXML(s string) string { + for i := 0; i < len(s); i++ { + // As an optimization, if s contains only ASCII letters, digits or a + // few special characters, the escaped value is s itself and we don't + // need to allocate a buffer and convert between string and []byte. + switch c := s[i]; { + case c == ' ' || c == '_' || + ('+' <= c && c <= '9') || // Digits as well as + , - . and / + ('A' <= c && c <= 'Z') || + ('a' <= c && c <= 'z'): + continue + } + // Otherwise, go through the full escaping process. + var buf bytes.Buffer + xml.EscapeText(&buf, []byte(s)) + return buf.String() + } + return s +} + +func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { if fi.IsDir() { return ``, nil } return "", nil } -func findDisplayName(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { +func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { if slashClean(name) == "/" { // Hide the real name of a possibly prefixed root directory. return "", nil } - return fi.Name(), nil + return escapeXML(fi.Name()), nil } -func findContentLength(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { +func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { return strconv.FormatInt(fi.Size(), 10), nil } -func findLastModified(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { +func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { return fi.ModTime().Format(http.TimeFormat), nil } -func findContentType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { - f, err := fs.OpenFile(name, os.O_RDONLY, 0) +func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) if err != nil { return "", err } @@ -373,14 +402,14 @@ func findContentType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) return ctype, err } -func findETag(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { +func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { // The Apache http 2.4 web server by default concatenates the // modification time and size of a file. We replicate the heuristic // with nanosecond granularity. return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil } -func findSupportedLock(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { +func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { return `` + `` + `` + diff --git a/fn/vendor/golang.org/x/net/webdav/prop_test.go b/fn/vendor/golang.org/x/net/webdav/prop_test.go index ad4ec5b12..57d0e826f 100644 --- a/fn/vendor/golang.org/x/net/webdav/prop_test.go +++ b/fn/vendor/golang.org/x/net/webdav/prop_test.go @@ -5,6 +5,7 @@ package webdav import ( + "encoding/xml" "fmt" "net/http" "os" @@ -12,14 +13,15 @@ import ( "sort" "testing" - "golang.org/x/net/webdav/internal/xml" + "golang.org/x/net/context" ) func TestMemPS(t *testing.T) { + ctx := context.Background() // calcProps calculates the getlastmodified and getetag DAV: property // values in pstats for resource name in file-system fs. calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error { - fi, err := fs.Stat(name) + fi, err := fs.Stat(ctx, name) if err != nil { return err } @@ -33,7 +35,7 @@ func TestMemPS(t *testing.T) { if fi.IsDir() { continue } - etag, err := findETag(fs, ls, name, fi) + etag, err := findETag(ctx, fs, ls, name, fi) if err != nil { return err } @@ -75,21 +77,22 @@ func TestMemPS(t *testing.T) { op: "propname", name: "/dir", wantPnames: []xml.Name{ - xml.Name{Space: "DAV:", Local: "resourcetype"}, - xml.Name{Space: "DAV:", Local: "displayname"}, - xml.Name{Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "getlastmodified"}, }, }, { op: "propname", name: "/file", wantPnames: []xml.Name{ - xml.Name{Space: "DAV:", Local: "resourcetype"}, - xml.Name{Space: "DAV:", Local: "displayname"}, - xml.Name{Space: "DAV:", Local: "getcontentlength"}, - xml.Name{Space: "DAV:", Local: "getlastmodified"}, - xml.Name{Space: "DAV:", Local: "getcontenttype"}, - xml.Name{Space: "DAV:", Local: "getetag"}, - xml.Name{Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, }, }}, }, { @@ -106,6 +109,9 @@ func TestMemPS(t *testing.T) { }, { XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, InnerXML: []byte("dir"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. }, { XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, InnerXML: []byte(lockEntry), @@ -452,14 +458,14 @@ func TestMemPS(t *testing.T) { op: "propname", name: "/file", wantPnames: []xml.Name{ - xml.Name{Space: "DAV:", Local: "resourcetype"}, - xml.Name{Space: "DAV:", Local: "displayname"}, - xml.Name{Space: "DAV:", Local: "getcontentlength"}, - xml.Name{Space: "DAV:", Local: "getlastmodified"}, - xml.Name{Space: "DAV:", Local: "getcontenttype"}, - xml.Name{Space: "DAV:", Local: "getetag"}, - xml.Name{Space: "DAV:", Local: "supportedlock"}, - xml.Name{Space: "foo", Local: "bar"}, + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "foo", Local: "bar"}, }, }}, }, { @@ -516,7 +522,7 @@ func TestMemPS(t *testing.T) { var propstats []Propstat switch op.op { case "propname": - pnames, err := propnames(fs, ls, op.name) + pnames, err := propnames(ctx, fs, ls, op.name) if err != nil { t.Errorf("%s: got error %v, want nil", desc, err) continue @@ -528,11 +534,11 @@ func TestMemPS(t *testing.T) { } continue case "allprop": - propstats, err = allprop(fs, ls, op.name, op.pnames) + propstats, err = allprop(ctx, fs, ls, op.name, op.pnames) case "propfind": - propstats, err = props(fs, ls, op.name, op.pnames) + propstats, err = props(ctx, fs, ls, op.name, op.pnames) case "proppatch": - propstats, err = patch(fs, ls, op.name, op.patches) + propstats, err = patch(ctx, fs, ls, op.name, op.patches) default: t.Fatalf("%s: %s not implemented", desc, op.op) } @@ -585,8 +591,8 @@ type noDeadPropsFS struct { FileSystem } -func (fs noDeadPropsFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, err := fs.FileSystem.OpenFile(name, flag, perm) +func (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FileSystem.OpenFile(ctx, name, flag, perm) if err != nil { return nil, err } diff --git a/fn/vendor/golang.org/x/net/webdav/webdav.go b/fn/vendor/golang.org/x/net/webdav/webdav.go index df6ef4501..7b56687fc 100644 --- a/fn/vendor/golang.org/x/net/webdav/webdav.go +++ b/fn/vendor/golang.org/x/net/webdav/webdav.go @@ -2,42 +2,21 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package webdav etc etc TODO. +// Package webdav provides a WebDAV server implementation. package webdav // import "golang.org/x/net/webdav" import ( "errors" "fmt" "io" - "log" "net/http" "net/url" "os" "path" - "runtime" "strings" "time" ) -// Package webdav's XML output requires the standard library's encoding/xml -// package version 1.5 or greater. Otherwise, it will produce malformed XML. -// -// As of May 2015, the Go stable release is version 1.4, so we print a message -// to let users know that this golang.org/x/etc package won't work yet. -// -// This package also won't work with Go 1.3 and earlier, but making this -// runtime version check catch all the earlier versions too, and not just -// "1.4.x", isn't worth the complexity. -// -// TODO: delete this check at some point after Go 1.5 is released. -var go1Dot4 = strings.HasPrefix(runtime.Version(), "go1.4.") - -func init() { - if go1Dot4 { - log.Println("package webdav requires Go version 1.5 or greater") - } -} - type Handler struct { // Prefix is the URL path prefix to strip from WebDAV resource paths. Prefix string @@ -169,7 +148,10 @@ func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func() if u.Host != r.Host { continue } - lsrc = u.Path + lsrc, status, err = h.stripPrefix(u.Path) + if err != nil { + return nil, status, err + } } release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...) if err == ErrConfirmationFailed { @@ -192,8 +174,9 @@ func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status if err != nil { return status, err } + ctx := getContext(r) allow := "OPTIONS, LOCK, PUT, MKCOL" - if fi, err := h.FileSystem.Stat(reqPath); err == nil { + if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil { if fi.IsDir() { allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND" } else { @@ -214,7 +197,8 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta return status, err } // TODO: check locks for read-only access?? - f, err := h.FileSystem.OpenFile(reqPath, os.O_RDONLY, 0) + ctx := getContext(r) + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0) if err != nil { return http.StatusNotFound, err } @@ -226,7 +210,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta if fi.IsDir() { return http.StatusMethodNotAllowed, nil } - etag, err := findETag(h.FileSystem, h.LockSystem, reqPath, fi) + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) if err != nil { return http.StatusInternalServerError, err } @@ -247,18 +231,20 @@ func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status i } defer release() + ctx := getContext(r) + // TODO: return MultiStatus where appropriate. // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll // returns nil (no error)." WebDAV semantics are that it should return a // "404 Not Found". We therefore have to Stat before we RemoveAll. - if _, err := h.FileSystem.Stat(reqPath); err != nil { + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { if os.IsNotExist(err) { return http.StatusNotFound, err } return http.StatusMethodNotAllowed, err } - if err := h.FileSystem.RemoveAll(reqPath); err != nil { + if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil { return http.StatusMethodNotAllowed, err } return http.StatusNoContent, nil @@ -276,8 +262,9 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, defer release() // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz' // comments in http.checkEtag. + ctx := getContext(r) - f, err := h.FileSystem.OpenFile(reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return http.StatusNotFound, err } @@ -294,7 +281,7 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, if closeErr != nil { return http.StatusMethodNotAllowed, closeErr } - etag, err := findETag(h.FileSystem, h.LockSystem, reqPath, fi) + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) if err != nil { return http.StatusInternalServerError, err } @@ -313,10 +300,12 @@ func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status in } defer release() + ctx := getContext(r) + if r.ContentLength > 0 { return http.StatusUnsupportedMediaType, nil } - if err := h.FileSystem.Mkdir(reqPath, 0777); err != nil { + if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil { if os.IsNotExist(err) { return http.StatusConflict, err } @@ -355,6 +344,8 @@ func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status return http.StatusForbidden, errDestinationEqualsSource } + ctx := getContext(r) + if r.Method == "COPY" { // Section 7.5.1 says that a COPY only needs to lock the destination, // not both destination and source. Strictly speaking, this is racy, @@ -378,7 +369,7 @@ func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status return http.StatusBadRequest, errInvalidDepth } } - return copyFiles(h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0) + return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0) } release, status, err := h.confirmLocks(r, src, dst) @@ -395,7 +386,7 @@ func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status return http.StatusBadRequest, errInvalidDepth } } - return moveFiles(h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T") + return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T") } func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) { @@ -408,6 +399,7 @@ func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus return status, err } + ctx := getContext(r) token, ld, now, created := "", LockDetails{}, time.Now(), false if li == (lockInfo{}) { // An empty lockInfo means to refresh the lock. @@ -465,8 +457,8 @@ func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus }() // Create the resource if it didn't previously exist. - if _, err := h.FileSystem.Stat(reqPath); err != nil { - f, err := h.FileSystem.OpenFile(reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { // TODO: detect missing intermediate dirs and return http.StatusConflict? return http.StatusInternalServerError, err @@ -519,7 +511,8 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status if err != nil { return status, err } - fi, err := h.FileSystem.Stat(reqPath) + ctx := getContext(r) + fi, err := h.FileSystem.Stat(ctx, reqPath) if err != nil { if os.IsNotExist(err) { return http.StatusNotFound, err @@ -546,7 +539,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status } var pstats []Propstat if pf.Propname != nil { - pnames, err := propnames(h.FileSystem, h.LockSystem, reqPath) + pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath) if err != nil { return err } @@ -556,9 +549,9 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status } pstats = append(pstats, pstat) } else if pf.Allprop != nil { - pstats, err = allprop(h.FileSystem, h.LockSystem, reqPath, pf.Prop) + pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) } else { - pstats, err = props(h.FileSystem, h.LockSystem, reqPath, pf.Prop) + pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) } if err != nil { return err @@ -566,7 +559,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats)) } - walkErr := walkFS(h.FileSystem, depth, reqPath, fi, walkFn) + walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn) closeErr := mw.close() if walkErr != nil { return http.StatusInternalServerError, walkErr @@ -588,7 +581,9 @@ func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (statu } defer release() - if _, err := h.FileSystem.Stat(reqPath); err != nil { + ctx := getContext(r) + + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { if os.IsNotExist(err) { return http.StatusNotFound, err } @@ -598,7 +593,7 @@ func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (statu if err != nil { return status, err } - pstats, err := patch(h.FileSystem, h.LockSystem, reqPath, patches) + pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches) if err != nil { return http.StatusInternalServerError, err } diff --git a/fn/vendor/golang.org/x/net/webdav/webdav_test.go b/fn/vendor/golang.org/x/net/webdav/webdav_test.go index 70a3bf2c7..25e0d5421 100644 --- a/fn/vendor/golang.org/x/net/webdav/webdav_test.go +++ b/fn/vendor/golang.org/x/net/webdav/webdav_test.go @@ -18,30 +18,47 @@ import ( "sort" "strings" "testing" + + "golang.org/x/net/context" ) // TODO: add tests to check XML responses with the expected prefix path func TestPrefix(t *testing.T) { const dst, blah = "Destination", "blah blah blah" - do := func(method, urlStr string, body io.Reader, wantStatusCode int, headers ...string) error { - req, err := http.NewRequest(method, urlStr, body) + // createLockBody comes from the example in Section 9.10.7. + const createLockBody = ` + + + + + http://example.org/~ejw/contact.html + + + ` + + do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) { + var bodyReader io.Reader + if body != "" { + bodyReader = strings.NewReader(body) + } + req, err := http.NewRequest(method, urlStr, bodyReader) if err != nil { - return err + return nil, err } for len(headers) >= 2 { req.Header.Add(headers[0], headers[1]) headers = headers[2:] } - res, err := http.DefaultClient.Do(req) + res, err := http.DefaultTransport.RoundTrip(req) if err != nil { - return err + return nil, err } defer res.Body.Close() if res.StatusCode != wantStatusCode { - return fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) + return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) } - return nil + return res.Header, nil } prefixes := []string{ @@ -50,6 +67,7 @@ func TestPrefix(t *testing.T) { "/a/b/", "/a/b/c/", } + ctx := context.Background() for _, prefix := range prefixes { fs := NewMemFS() h := &Handler{ @@ -71,8 +89,10 @@ func TestPrefix(t *testing.T) { // COPY /a/b/c /a/b/d // MKCOL /a/b/e // MOVE /a/b/d /a/b/e/f - // which should yield the (possibly stripped) filenames /a/b/c and - // /a/b/e/f, plus their parent directories. + // LOCK /a/b/e/g + // PUT /a/b/e/g + // which should yield the (possibly stripped) filenames /a/b/c, + // /a/b/e/f and /a/b/e/g, plus their parent directories. wantA := map[string]int{ "/": http.StatusCreated, @@ -80,7 +100,7 @@ func TestPrefix(t *testing.T) { "/a/b/": http.StatusNotFound, "/a/b/c/": http.StatusNotFound, }[prefix] - if err := do("MKCOL", srv.URL+"/a", nil, wantA); err != nil { + if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil { t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err) continue } @@ -91,7 +111,7 @@ func TestPrefix(t *testing.T) { "/a/b/": http.StatusMovedPermanently, "/a/b/c/": http.StatusNotFound, }[prefix] - if err := do("MKCOL", srv.URL+"/a/b", nil, wantB); err != nil { + if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil { t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err) continue } @@ -102,7 +122,7 @@ func TestPrefix(t *testing.T) { "/a/b/": http.StatusCreated, "/a/b/c/": http.StatusMovedPermanently, }[prefix] - if err := do("PUT", srv.URL+"/a/b/c", strings.NewReader(blah), wantC); err != nil { + if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil { t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err) continue } @@ -113,7 +133,7 @@ func TestPrefix(t *testing.T) { "/a/b/": http.StatusCreated, "/a/b/c/": http.StatusMovedPermanently, }[prefix] - if err := do("COPY", srv.URL+"/a/b/c", nil, wantD, dst, srv.URL+"/a/b/d"); err != nil { + if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil { t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err) continue } @@ -124,7 +144,7 @@ func TestPrefix(t *testing.T) { "/a/b/": http.StatusCreated, "/a/b/c/": http.StatusNotFound, }[prefix] - if err := do("MKCOL", srv.URL+"/a/b/e", nil, wantE); err != nil { + if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil { t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err) continue } @@ -135,22 +155,48 @@ func TestPrefix(t *testing.T) { "/a/b/": http.StatusCreated, "/a/b/c/": http.StatusNotFound, }[prefix] - if err := do("MOVE", srv.URL+"/a/b/d", nil, wantF, dst, srv.URL+"/a/b/e/f"); err != nil { + if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil { t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err) continue } - got, err := find(nil, fs, "/") + var lockToken string + wantG := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil { + t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err) + continue + } else { + lockToken = h.Get("Lock-Token") + } + + ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken) + wantH := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err) + continue + } + + got, err := find(ctx, nil, fs, "/") if err != nil { t.Errorf("prefix=%-9q find: %v", prefix, err) continue } sort.Strings(got) want := map[string][]string{ - "/": []string{"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f"}, - "/a/": []string{"/", "/b", "/b/c", "/b/e", "/b/e/f"}, - "/a/b/": []string{"/", "/c", "/e", "/e/f"}, - "/a/b/c/": []string{"/"}, + "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"}, + "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"}, + "/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"}, + "/a/b/c/": {"/"}, }[prefix] if !reflect.DeepEqual(got, want) { t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) @@ -159,57 +205,110 @@ func TestPrefix(t *testing.T) { } } +func TestEscapeXML(t *testing.T) { + // These test cases aren't exhaustive, and there is more than one way to + // escape e.g. a quot (as """ or """) or an apos. We presume that + // the encoding/xml package tests xml.EscapeText more thoroughly. This test + // here is just a sanity check for this package's escapeXML function, and + // its attempt to provide a fast path (and avoid a bytes.Buffer allocation) + // when escaping filenames is obviously a no-op. + testCases := map[string]string{ + "": "", + " ": " ", + "&": "&", + "*": "*", + "+": "+", + ",": ",", + "-": "-", + ".": ".", + "/": "/", + "0": "0", + "9": "9", + ":": ":", + "<": "<", + ">": ">", + "A": "A", + "_": "_", + "a": "a", + "~": "~", + "\u0201": "\u0201", + "&": "&amp;", + "foo&baz": "foo&<b/ar>baz", + } + + for in, want := range testCases { + if got := escapeXML(in); got != want { + t.Errorf("in=%q: got %q, want %q", in, got, want) + } + } +} + func TestFilenameEscape(t *testing.T) { - re := regexp.MustCompile(`([^<]*)`) - do := func(method, urlStr string) (string, error) { + hrefRe := regexp.MustCompile(`([^<]*)`) + displayNameRe := regexp.MustCompile(`([^<]*)`) + do := func(method, urlStr string) (string, string, error) { req, err := http.NewRequest(method, urlStr, nil) if err != nil { - return "", err + return "", "", err } res, err := http.DefaultClient.Do(req) if err != nil { - return "", err + return "", "", err } defer res.Body.Close() b, err := ioutil.ReadAll(res.Body) if err != nil { - return "", err + return "", "", err } - m := re.FindStringSubmatch(string(b)) - if len(m) != 2 { - return "", errors.New("D:href not found") + hrefMatch := hrefRe.FindStringSubmatch(string(b)) + if len(hrefMatch) != 2 { + return "", "", errors.New("D:href not found") + } + displayNameMatch := displayNameRe.FindStringSubmatch(string(b)) + if len(displayNameMatch) != 2 { + return "", "", errors.New("D:displayname not found") } - return m[1], nil + return hrefMatch[1], displayNameMatch[1], nil } testCases := []struct { - name, want string + name, wantHref, wantDisplayName string }{{ - name: `/foo%bar`, - want: `/foo%25bar`, + name: `/foo%bar`, + wantHref: `/foo%25bar`, + wantDisplayName: `foo%bar`, }, { - name: `/こんにちわ世界`, - want: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`, + name: `/こんにちわ世界`, + wantHref: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`, + wantDisplayName: `こんにちわ世界`, }, { - name: `/Program Files/`, - want: `/Program%20Files`, + name: `/Program Files/`, + wantHref: `/Program%20Files`, + wantDisplayName: `Program Files`, }, { - name: `/go+lang`, - want: `/go+lang`, + name: `/go+lang`, + wantHref: `/go+lang`, + wantDisplayName: `go+lang`, }, { - name: `/go&lang`, - want: `/go&lang`, + name: `/go&lang`, + wantHref: `/go&lang`, + wantDisplayName: `go&lang`, + }, { + name: `/goexclusive"` Shared *struct{} `xml:"lockscope>shared"` Write *struct{} `xml:"locktype>write"` @@ -33,7 +51,7 @@ type owner struct { func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { c := &countingReader{r: r} - if err = xml.NewDecoder(c).Decode(&li); err != nil { + if err = ixml.NewDecoder(c).Decode(&li); err != nil { if err == io.EOF { if c.n == 0 { // An empty body means to refresh the lock. @@ -88,7 +106,7 @@ func escape(s string) string { switch s[i] { case '"', '&', '\'', '<', '>': b := bytes.NewBuffer(nil) - xml.EscapeText(b, []byte(s)) + ixml.EscapeText(b, []byte(s)) return b.String() } } @@ -100,14 +118,14 @@ func escape(s string) string { // and directives. // http://www.webdav.org/specs/rfc4918.html#property_values // http://www.webdav.org/specs/rfc4918.html#xml-extensibility -func next(d *xml.Decoder) (xml.Token, error) { +func next(d *ixml.Decoder) (ixml.Token, error) { for { t, err := d.Token() if err != nil { return t, err } switch t.(type) { - case xml.Comment, xml.Directive, xml.ProcInst: + case ixml.Comment, ixml.Directive, ixml.ProcInst: continue default: return t, nil @@ -122,35 +140,35 @@ type propfindProps []xml.Name // // It returns an error if start does not contain any properties or if // properties contain values. Character data between properties is ignored. -func (pn *propfindProps) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { for { t, err := next(d) if err != nil { return err } switch t.(type) { - case xml.EndElement: + case ixml.EndElement: if len(*pn) == 0 { return fmt.Errorf("%s must not be empty", start.Name.Local) } return nil - case xml.StartElement: - name := t.(xml.StartElement).Name + case ixml.StartElement: + name := t.(ixml.StartElement).Name t, err = next(d) if err != nil { return err } - if _, ok := t.(xml.EndElement); !ok { + if _, ok := t.(ixml.EndElement); !ok { return fmt.Errorf("unexpected token %T", t) } - *pn = append(*pn, name) + *pn = append(*pn, xml.Name(name)) } } } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind type propfind struct { - XMLName xml.Name `xml:"DAV: propfind"` + XMLName ixml.Name `xml:"DAV: propfind"` Allprop *struct{} `xml:"DAV: allprop"` Propname *struct{} `xml:"DAV: propname"` Prop propfindProps `xml:"DAV: prop"` @@ -159,7 +177,7 @@ type propfind struct { func readPropfind(r io.Reader) (pf propfind, status int, err error) { c := countingReader{r: r} - if err = xml.NewDecoder(&c).Decode(&pf); err != nil { + if err = ixml.NewDecoder(&c).Decode(&pf); err != nil { if err == io.EOF { if c.n == 0 { // An empty body means to propfind allprop. @@ -206,11 +224,19 @@ type Property struct { InnerXML []byte `xml:",innerxml"` } +// ixmlProperty is the same as the Property type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlProperty struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + InnerXML []byte `xml:",innerxml"` +} + // http://www.webdav.org/specs/rfc4918.html#ELEMENT_error // See multistatusWriter for the "D:" namespace prefix. type xmlError struct { - XMLName xml.Name `xml:"D:error"` - InnerXML []byte `xml:",innerxml"` + XMLName ixml.Name `xml:"D:error"` + InnerXML []byte `xml:",innerxml"` } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat @@ -222,24 +248,48 @@ type propstat struct { ResponseDescription string `xml:"D:responsedescription,omitempty"` } +// ixmlPropstat is the same as the propstat type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlPropstat struct { + Prop []ixmlProperty `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + // MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace // before encoding. See multistatusWriter. -func (ps propstat) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error { + // Convert from a propstat to an ixmlPropstat. + ixmlPs := ixmlPropstat{ + Prop: make([]ixmlProperty, len(ps.Prop)), + Status: ps.Status, + Error: ps.Error, + ResponseDescription: ps.ResponseDescription, + } for k, prop := range ps.Prop { + ixmlPs.Prop[k] = ixmlProperty{ + XMLName: ixml.Name(prop.XMLName), + Lang: prop.Lang, + InnerXML: prop.InnerXML, + } + } + + for k, prop := range ixmlPs.Prop { if prop.XMLName.Space == "DAV:" { - prop.XMLName = xml.Name{Space: "", Local: "D:" + prop.XMLName.Local} - ps.Prop[k] = prop + prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local} + ixmlPs.Prop[k] = prop } } // Distinct type to avoid infinite recursion of MarshalXML. - type newpropstat propstat - return e.EncodeElement(newpropstat(ps), start) + type newpropstat ixmlPropstat + return e.EncodeElement(newpropstat(ixmlPs), start) } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_response // See multistatusWriter for the "D:" namespace prefix. type response struct { - XMLName xml.Name `xml:"D:response"` + XMLName ixml.Name `xml:"D:response"` Href []string `xml:"D:href"` Propstat []propstat `xml:"D:propstat"` Status string `xml:"D:status,omitempty"` @@ -264,7 +314,7 @@ type multistatusWriter struct { responseDescription string w http.ResponseWriter - enc *xml.Encoder + enc *ixml.Encoder } // Write validates and emits a DAV response as part of a multistatus response @@ -308,14 +358,14 @@ func (w *multistatusWriter) writeHeader() error { if err != nil { return err } - w.enc = xml.NewEncoder(w.w) - return w.enc.EncodeToken(xml.StartElement{ - Name: xml.Name{ + w.enc = ixml.NewEncoder(w.w) + return w.enc.EncodeToken(ixml.StartElement{ + Name: ixml.Name{ Space: "DAV:", Local: "multistatus", }, - Attr: []xml.Attr{{ - Name: xml.Name{Space: "xmlns", Local: "D"}, + Attr: []ixml.Attr{{ + Name: ixml.Name{Space: "xmlns", Local: "D"}, Value: "DAV:", }}, }) @@ -329,17 +379,17 @@ func (w *multistatusWriter) close() error { if w.enc == nil { return nil } - var end []xml.Token + var end []ixml.Token if w.responseDescription != "" { - name := xml.Name{Space: "DAV:", Local: "responsedescription"} + name := ixml.Name{Space: "DAV:", Local: "responsedescription"} end = append(end, - xml.StartElement{Name: name}, - xml.CharData(w.responseDescription), - xml.EndElement{Name: name}, + ixml.StartElement{Name: name}, + ixml.CharData(w.responseDescription), + ixml.EndElement{Name: name}, ) } - end = append(end, xml.EndElement{ - Name: xml.Name{Space: "DAV:", Local: "multistatus"}, + end = append(end, ixml.EndElement{ + Name: ixml.Name{Space: "DAV:", Local: "multistatus"}, }) for _, t := range end { err := w.enc.EncodeToken(t) @@ -350,12 +400,9 @@ func (w *multistatusWriter) close() error { return w.enc.Flush() } -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) -type proppatchProps []Property +var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} -var xmlLangName = xml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} - -func xmlLang(s xml.StartElement, d string) string { +func xmlLang(s ixml.StartElement, d string) string { for _, attr := range s.Attr { if attr.Name == xmlLangName { return attr.Value @@ -366,19 +413,19 @@ func xmlLang(s xml.StartElement, d string) string { type xmlValue []byte -func (v *xmlValue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { // The XML value of a property can be arbitrary, mixed-content XML. // To make sure that the unmarshalled value contains all required // namespaces, we encode all the property value XML tokens into a // buffer. This forces the encoder to redeclare any used namespaces. var b bytes.Buffer - e := xml.NewEncoder(&b) + e := ixml.NewEncoder(&b) for { t, err := next(d) if err != nil { return err } - if e, ok := t.(xml.EndElement); ok && e.Name == start.Name { + if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name { break } if err = e.EncodeToken(t); err != nil { @@ -393,6 +440,9 @@ func (v *xmlValue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { return nil } +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) +type proppatchProps []Property + // UnmarshalXML appends the property names and values enclosed within start // to ps. // @@ -401,7 +451,7 @@ func (v *xmlValue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { // // UnmarshalXML returns an error if start does not contain any properties or if // property values contain syntactically incorrect XML. -func (ps *proppatchProps) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { lang := xmlLang(start, "") for { t, err := next(d) @@ -409,15 +459,15 @@ func (ps *proppatchProps) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e return err } switch elem := t.(type) { - case xml.EndElement: + case ixml.EndElement: if len(*ps) == 0 { return fmt.Errorf("%s must not be empty", start.Name.Local) } return nil - case xml.StartElement: + case ixml.StartElement: p := Property{ - XMLName: t.(xml.StartElement).Name, - Lang: xmlLang(t.(xml.StartElement), lang), + XMLName: xml.Name(t.(ixml.StartElement).Name), + Lang: xmlLang(t.(ixml.StartElement), lang), } err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) if err != nil { @@ -431,29 +481,29 @@ func (ps *proppatchProps) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e // http://www.webdav.org/specs/rfc4918.html#ELEMENT_set // http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove type setRemove struct { - XMLName xml.Name + XMLName ixml.Name Lang string `xml:"xml:lang,attr,omitempty"` Prop proppatchProps `xml:"DAV: prop"` } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate type propertyupdate struct { - XMLName xml.Name `xml:"DAV: propertyupdate"` + XMLName ixml.Name `xml:"DAV: propertyupdate"` Lang string `xml:"xml:lang,attr,omitempty"` SetRemove []setRemove `xml:",any"` } func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { var pu propertyupdate - if err = xml.NewDecoder(r).Decode(&pu); err != nil { + if err = ixml.NewDecoder(r).Decode(&pu); err != nil { return nil, http.StatusBadRequest, err } for _, op := range pu.SetRemove { remove := false switch op.XMLName { - case xml.Name{Space: "DAV:", Local: "set"}: + case ixml.Name{Space: "DAV:", Local: "set"}: // No-op. - case xml.Name{Space: "DAV:", Local: "remove"}: + case ixml.Name{Space: "DAV:", Local: "remove"}: for _, p := range op.Prop { if len(p.InnerXML) > 0 { return nil, http.StatusBadRequest, errInvalidProppatch diff --git a/fn/vendor/golang.org/x/net/webdav/xml_test.go b/fn/vendor/golang.org/x/net/webdav/xml_test.go index bc5641f45..a3d9e1ed8 100644 --- a/fn/vendor/golang.org/x/net/webdav/xml_test.go +++ b/fn/vendor/golang.org/x/net/webdav/xml_test.go @@ -6,6 +6,7 @@ package webdav import ( "bytes" + "encoding/xml" "fmt" "io" "net/http" @@ -15,7 +16,7 @@ import ( "strings" "testing" - "golang.org/x/net/webdav/internal/xml" + ixml "golang.org/x/net/webdav/internal/xml" ) func TestReadLockInfo(t *testing.T) { @@ -86,7 +87,7 @@ func TestReadLockInfo(t *testing.T) { " gopher\n" + "", lockInfo{ - XMLName: xml.Name{Space: "DAV:", Local: "lockinfo"}, + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, Exclusive: new(struct{}), Write: new(struct{}), Owner: owner{ @@ -105,7 +106,7 @@ func TestReadLockInfo(t *testing.T) { " \n" + "", lockInfo{ - XMLName: xml.Name{Space: "DAV:", Local: "lockinfo"}, + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, Exclusive: new(struct{}), Write: new(struct{}), Owner: owner{ @@ -147,7 +148,7 @@ func TestReadPropfind(t *testing.T) { " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Propname: new(struct{}), }, }, { @@ -163,7 +164,7 @@ func TestReadPropfind(t *testing.T) { " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Allprop: new(struct{}), }, }, { @@ -174,7 +175,7 @@ func TestReadPropfind(t *testing.T) { " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Allprop: new(struct{}), Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, @@ -186,7 +187,7 @@ func TestReadPropfind(t *testing.T) { " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Allprop: new(struct{}), Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, @@ -197,7 +198,7 @@ func TestReadPropfind(t *testing.T) { " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, }, { @@ -210,7 +211,7 @@ func TestReadPropfind(t *testing.T) { " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, }, { @@ -220,7 +221,7 @@ func TestReadPropfind(t *testing.T) { " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, }, { @@ -230,7 +231,7 @@ func TestReadPropfind(t *testing.T) { " foobar\n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, }, { @@ -241,7 +242,7 @@ func TestReadPropfind(t *testing.T) { " *boss*\n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Propname: new(struct{}), }, }, { @@ -348,10 +349,6 @@ func TestReadPropfind(t *testing.T) { } func TestMultistatusWriter(t *testing.T) { - if go1Dot4 { - t.Skip("TestMultistatusWriter requires Go version 1.5 or greater") - } - ///The "section x.y.z" test cases come from section x.y.z of the spec at // http://www.webdav.org/specs/rfc4918.html testCases := []struct { @@ -802,7 +799,7 @@ func TestUnmarshalXMLValue(t *testing.T) { var n xmlNormalizer for _, tc := range testCases { - d := xml.NewDecoder(strings.NewReader(tc.input)) + d := ixml.NewDecoder(strings.NewReader(tc.input)) var v xmlValue if err := d.Decode(&v); err != nil { t.Errorf("%s: got error %v, want nil", tc.desc, err) @@ -840,8 +837,8 @@ type xmlNormalizer struct { // * Remove comments, if instructed to do so. // func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { - d := xml.NewDecoder(r) - e := xml.NewEncoder(w) + d := ixml.NewDecoder(r) + e := ixml.NewEncoder(w) for { t, err := d.Token() if err != nil { @@ -851,18 +848,18 @@ func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { return err } switch val := t.(type) { - case xml.Directive, xml.ProcInst: + case ixml.Directive, ixml.ProcInst: continue - case xml.Comment: + case ixml.Comment: if n.omitComments { continue } - case xml.CharData: + case ixml.CharData: if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { continue } - case xml.StartElement: - start, _ := xml.CopyToken(val).(xml.StartElement) + case ixml.StartElement: + start, _ := ixml.CopyToken(val).(ixml.StartElement) attr := start.Attr[:0] for _, a := range start.Attr { if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { @@ -897,7 +894,7 @@ func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) { return normA == normB, nil } -type byName []xml.Attr +type byName []ixml.Attr func (a byName) Len() int { return len(a) } func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/fn/vendor/golang.org/x/net/websocket/client.go b/fn/vendor/golang.org/x/net/websocket/client.go index 20d1e1e38..69a4ac7ee 100644 --- a/fn/vendor/golang.org/x/net/websocket/client.go +++ b/fn/vendor/golang.org/x/net/websocket/client.go @@ -6,7 +6,6 @@ package websocket import ( "bufio" - "crypto/tls" "io" "net" "net/http" @@ -87,20 +86,14 @@ func DialConfig(config *Config) (ws *Conn, err error) { if config.Origin == nil { return nil, &DialError{config, ErrBadWebSocketOrigin} } - switch config.Location.Scheme { - case "ws": - client, err = net.Dial("tcp", parseAuthority(config.Location)) - - case "wss": - client, err = tls.Dial("tcp", parseAuthority(config.Location), config.TlsConfig) - - default: - err = ErrBadScheme + dialer := config.Dialer + if dialer == nil { + dialer = &net.Dialer{} } + client, err = dialWithDialer(dialer, config) if err != nil { goto Error } - ws, err = NewClient(config, client) if err != nil { client.Close() diff --git a/fn/vendor/golang.org/x/net/websocket/dial.go b/fn/vendor/golang.org/x/net/websocket/dial.go new file mode 100644 index 000000000..2dab943a4 --- /dev/null +++ b/fn/vendor/golang.org/x/net/websocket/dial.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "net" +) + +func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { + switch config.Location.Scheme { + case "ws": + conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + return +} diff --git a/fn/vendor/golang.org/x/net/websocket/dial_test.go b/fn/vendor/golang.org/x/net/websocket/dial_test.go new file mode 100644 index 000000000..aa03e30dd --- /dev/null +++ b/fn/vendor/golang.org/x/net/websocket/dial_test.go @@ -0,0 +1,43 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "net/http/httptest" + "testing" + "time" +) + +// This test depend on Go 1.3+ because in earlier versions the Dialer won't be +// used in TLS connections and a timeout won't be triggered. +func TestDialConfigTLSWithDialer(t *testing.T) { + tlsServer := httptest.NewTLSServer(nil) + tlsServerAddr := tlsServer.Listener.Addr().String() + log.Print("Test TLS WebSocket server listening on ", tlsServerAddr) + defer tlsServer.Close() + config, _ := NewConfig(fmt.Sprintf("wss://%s/echo", tlsServerAddr), "http://localhost") + config.Dialer = &net.Dialer{ + Deadline: time.Now().Add(-time.Minute), + } + config.TlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + _, err := DialConfig(config) + dialerr, ok := err.(*DialError) + if !ok { + t.Fatalf("DialError expected, got %#v", err) + } + neterr, ok := dialerr.Err.(*net.OpError) + if !ok { + t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) + } + if !neterr.Timeout() { + t.Fatalf("expected timeout error, got %#v", neterr) + } +} diff --git a/fn/vendor/golang.org/x/net/websocket/hybi.go b/fn/vendor/golang.org/x/net/websocket/hybi.go index 60bbc8418..8cffdd16c 100644 --- a/fn/vendor/golang.org/x/net/websocket/hybi.go +++ b/fn/vendor/golang.org/x/net/websocket/hybi.go @@ -81,9 +81,6 @@ type hybiFrameReader struct { func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { n, err = frame.reader.Read(msg) - if err != nil { - return 0, err - } if frame.header.MaskingKey != nil { for i := 0; i < n; i++ { msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] diff --git a/fn/vendor/golang.org/x/net/websocket/websocket.go b/fn/vendor/golang.org/x/net/websocket/websocket.go index 606840098..e242c89a7 100644 --- a/fn/vendor/golang.org/x/net/websocket/websocket.go +++ b/fn/vendor/golang.org/x/net/websocket/websocket.go @@ -4,6 +4,12 @@ // Package websocket implements a client and server for the WebSocket protocol // as specified in RFC 6455. +// +// This package currently lacks some features found in an alternative +// and more actively maintained WebSocket package: +// +// https://godoc.org/github.com/gorilla/websocket +// package websocket // import "golang.org/x/net/websocket" import ( @@ -32,6 +38,8 @@ const ( PingFrame = 9 PongFrame = 10 UnknownFrame = 255 + + DefaultMaxPayloadBytes = 32 << 20 // 32MB ) // ProtocolError represents WebSocket protocol errors. @@ -58,6 +66,10 @@ var ( ErrNotSupported = &ProtocolError{"not supported"} ) +// ErrFrameTooLarge is returned by Codec's Receive method if payload size +// exceeds limit set by Conn.MaxPayloadBytes +var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit") + // Addr is an implementation of net.Addr for WebSocket. type Addr struct { *url.URL @@ -86,6 +98,9 @@ type Config struct { // Additional header fields to be sent in WebSocket opening handshake. Header http.Header + // Dialer used when opening websocket connections. + Dialer *net.Dialer + handshakeData map[string]string } @@ -144,6 +159,8 @@ type frameHandler interface { } // Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. type Conn struct { config *Config request *http.Request @@ -161,6 +178,10 @@ type Conn struct { frameHandler PayloadType byte defaultCloseStatus int + + // MaxPayloadBytes limits the size of frame payload received over Conn + // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used. + MaxPayloadBytes int } // Read implements the io.Reader interface: @@ -207,9 +228,6 @@ func (ws *Conn) Write(msg []byte) (n int, err error) { } n, err = w.Write(msg) w.Close() - if err != nil { - return n, err - } return n, err } @@ -300,7 +318,12 @@ func (cd Codec) Send(ws *Conn, v interface{}) (err error) { return err } -// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores in v. +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores +// in v. The whole frame payload is read to an in-memory buffer; max size of +// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds +// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire +// completely. The next call to Receive would read and discard leftover data of +// previous oversized frame before processing next frame. func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { ws.rio.Lock() defer ws.rio.Unlock() @@ -323,6 +346,19 @@ again: if frame == nil { goto again } + maxPayloadBytes := ws.MaxPayloadBytes + if maxPayloadBytes == 0 { + maxPayloadBytes = DefaultMaxPayloadBytes + } + if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) { + // payload size exceeds limit, no need to call Unmarshal + // + // set frameReader to current oversized frame so that + // the next call to this function can drain leftover + // data before processing the next frame + ws.frameReader = frame + return ErrFrameTooLarge + } payloadType := frame.PayloadType() data, err := ioutil.ReadAll(frame) if err != nil { diff --git a/fn/vendor/golang.org/x/net/websocket/websocket_test.go b/fn/vendor/golang.org/x/net/websocket/websocket_test.go index 05b7e5356..2054ce85a 100644 --- a/fn/vendor/golang.org/x/net/websocket/websocket_test.go +++ b/fn/vendor/golang.org/x/net/websocket/websocket_test.go @@ -6,6 +6,7 @@ package websocket import ( "bytes" + "crypto/rand" "fmt" "io" "log" @@ -357,6 +358,26 @@ func TestDialConfigBadVersion(t *testing.T) { } } +func TestDialConfigWithDialer(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Dialer = &net.Dialer{ + Deadline: time.Now().Add(-time.Minute), + } + _, err := DialConfig(config) + dialerr, ok := err.(*DialError) + if !ok { + t.Fatalf("DialError expected, got %#v", err) + } + neterr, ok := dialerr.Err.(*net.OpError) + if !ok { + t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) + } + if !neterr.Timeout() { + t.Fatalf("expected timeout error, got %#v", neterr) + } +} + func TestSmallBuffer(t *testing.T) { // http://code.google.com/p/go/issues/detail?id=1145 // Read should be able to handle reading a fragment of a frame. @@ -585,3 +606,60 @@ func TestCtrlAndData(t *testing.T) { } } } + +func TestCodec_ReceiveLimited(t *testing.T) { + const limit = 2048 + var payloads [][]byte + for _, size := range []int{ + 1024, + 2048, + 4096, // receive of this message would be interrupted due to limit + 2048, // this one is to make sure next receive recovers discarding leftovers + } { + b := make([]byte, size) + rand.Read(b) + payloads = append(payloads, b) + } + handlerDone := make(chan struct{}) + limitedHandler := func(ws *Conn) { + defer close(handlerDone) + ws.MaxPayloadBytes = limit + defer ws.Close() + for i, p := range payloads { + t.Logf("payload #%d (size %d, exceeds limit: %v)", i, len(p), len(p) > limit) + var recv []byte + err := Message.Receive(ws, &recv) + switch err { + case nil: + case ErrFrameTooLarge: + if len(p) <= limit { + t.Fatalf("unexpected frame size limit: expected %d bytes of payload having limit at %d", len(p), limit) + } + continue + default: + t.Fatalf("unexpected error: %v (want either nil or ErrFrameTooLarge)", err) + } + if len(recv) > limit { + t.Fatalf("received %d bytes of payload having limit at %d", len(recv), limit) + } + if !bytes.Equal(p, recv) { + t.Fatalf("received payload differs:\ngot:\t%v\nwant:\t%v", recv, p) + } + } + } + server := httptest.NewServer(Handler(limitedHandler)) + defer server.CloseClientConnections() + defer server.Close() + addr := server.Listener.Addr().String() + ws, err := Dial("ws://"+addr+"/", "", "http://localhost/") + if err != nil { + t.Fatal(err) + } + defer ws.Close() + for i, p := range payloads { + if err := Message.Send(ws, p); err != nil { + t.Fatalf("payload #%d (size %d): %v", i, len(p), err) + } + } + <-handlerDone +} diff --git a/fn/vendor/golang.org/x/net/xsrftoken/xsrf.go b/fn/vendor/golang.org/x/net/xsrftoken/xsrf.go index 8d2187872..bc861e1f3 100644 --- a/fn/vendor/golang.org/x/net/xsrftoken/xsrf.go +++ b/fn/vendor/golang.org/x/net/xsrftoken/xsrf.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -27,15 +27,18 @@ func clean(s string) string { // Generate returns a URL-safe secure XSRF token that expires in 24 hours. // -// key is a secret key for your application. -// userID is a unique identifier for the user. -// actionID is the action the user is taking (e.g. POSTing to a particular path). +// key is a secret key for your application; it must be non-empty. +// userID is an optional unique identifier for the user. +// actionID is an optional action the user is taking (e.g. POSTing to a particular path). func Generate(key, userID, actionID string) string { return generateTokenAtTime(key, userID, actionID, time.Now()) } // generateTokenAtTime is like Generate, but returns a token that expires 24 hours from now. func generateTokenAtTime(key, userID, actionID string, now time.Time) string { + if len(key) == 0 { + panic("zero length xsrf secret key") + } // Round time up and convert to milliseconds. milliTime := (now.UnixNano() + 1e6 - 1) / 1e6 @@ -57,6 +60,9 @@ func Valid(token, key, userID, actionID string) bool { // validTokenAtTime reports whether a token is valid at the given time. func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool { + if len(key) == 0 { + panic("zero length xsrf secret key") + } // Extract the issue time of the token. sep := strings.LastIndex(token, ":") if sep < 0 { diff --git a/fn/vendor/golang.org/x/net/xsrftoken/xsrf_test.go b/fn/vendor/golang.org/x/net/xsrftoken/xsrf_test.go index 9933f8671..6c8e7d9b5 100644 --- a/fn/vendor/golang.org/x/net/xsrftoken/xsrf_test.go +++ b/fn/vendor/golang.org/x/net/xsrftoken/xsrf_test.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/fn/vendor/golang.org/x/sys/unix/linux/types.go b/fn/vendor/golang.org/x/sys/unix/linux/types.go index 7236b7292..9b69e271c 100644 --- a/fn/vendor/golang.org/x/sys/unix/linux/types.go +++ b/fn/vendor/golang.org/x/sys/unix/linux/types.go @@ -47,6 +47,7 @@ package unix #include #include #include +#include #include #include #include @@ -533,6 +534,8 @@ type Sigset_t C.sigset_t const RNDGETENTCNT = C.RNDGETENTCNT +const PERF_IOC_FLAG_GROUP = C.PERF_IOC_FLAG_GROUP + // sysconf information const _SC_PAGESIZE = C._SC_PAGESIZE diff --git a/fn/vendor/golang.org/x/sys/unix/mkerrors.sh b/fn/vendor/golang.org/x/sys/unix/mkerrors.sh index 40c2058ea..efdacb706 100755 --- a/fn/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/fn/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -166,6 +166,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -404,6 +405,7 @@ ccflags="$@" $2 ~ /^GRND_/ || $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || + $2 ~ /^PERF_EVENT_IOC_/ || $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SPLICE_/ || $2 ~ /^(VM|VMADDR)_/ || diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 02a0861d3..4723162f7 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 538193c19..b5c978ece 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index ac9a8b328..0ae0e8c41 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1055,6 +1055,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 90cda517c..3c53a84c3 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 04345249f..23e845e57 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 9cf876d53..d27b373c3 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 7a070a9dd..b31460180 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index c48ec18de..aa69fe628 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 0949773b8..6438fc855 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1057,6 +1057,16 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index d195b6347..00c9942d3 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1057,6 +1057,16 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 9e9049b2e..89674f3aa 100644 --- a/fn/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/fn/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1055,6 +1055,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 7fc1eb2d9..811120659 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -662,6 +662,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x80045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 60a26eeea..075d9c561 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -680,6 +680,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x80045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index b994baa3e..a66c1603b 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -651,6 +651,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x80045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index c19c47881..b3b506a6d 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -659,6 +659,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x80045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index c84e4620c..5c654f552 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -656,6 +656,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 0c75cb937..3f11fb657 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -661,6 +661,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index c75f75a2c..1a4ad57e4 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -661,6 +661,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index cfc219f1b..b3f0f30fd 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -656,6 +656,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 4c285227c..aeee27e04 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -669,6 +669,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 1b511be22..b8cb2c3b2 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -669,6 +669,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index b408752d3..58883f92b 100644 --- a/fn/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/fn/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -686,6 +686,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x80045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/fn/version.go b/fn/version.go index b6d893c7b..0321166c2 100644 --- a/fn/version.go +++ b/fn/version.go @@ -10,7 +10,7 @@ import ( ) // Version of Functions CLI -var Version = "0.3.13" +var Version = "0.3.15" func version() cli.Command { r := versionCmd{VersionApi: functions.NewVersionApi()}